text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
from __future__ import print_function
## Author: Chris Wymant, c.wymant@imperial.ac.uk
## Acknowledgement: I wrote this while funded by ERC Advanced Grant PBDR-339251
##
## Overview:
ExplanatoryMessage = '''For each bam file in the list given as input, this
script does the following. The distribution of read lengths, and insert sizes if
reads are found to be paired, is calculated. (Here, length means length of the
mapping reference covered by the read, which will not be the same as the true
read length if there are insertions or deletions.) We then estimate the number
of reads and inserts expected to fully span a window of width W by assuming that
reads are distributed randomly over the genome (i.e. ignoring the actual
location information in the bam). We output this count for each bam file as a
function of W.'''
import os
import sys
import argparse
import pysam
import phyloscanner_funcs as pf
import collections
import numpy as np
# Define a function to check files exist, as a type for the argparse.
def File(MyFile):
if not os.path.isfile(MyFile):
raise argparse.ArgumentTypeError(MyFile+' does not exist or is not a file.')
return MyFile
# A class to have new lines in argument help text
class SmartFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
if text.startswith('R|'):
return text[2:].splitlines()
return argparse.HelpFormatter._split_lines(self, text, width)
# Set up the arguments for this script
parser = argparse.ArgumentParser(description=ExplanatoryMessage,
formatter_class=SmartFormatter)
# Positional args
parser.add_argument('BamAndRefList', type=File,
help='''R|A csv-format file listing the bam and reference files
(i.e. the fasta-format file containing the sequence to
which the reads were mapped). The first column should
be the bam file, the second column the corresponding
reference file, with a comma separating the two. An
optional third column, if present, will be used to
rename the bam files in all output. For example:
PatientA.bam,PatientA_ref.fasta,A
PatientB.bam,PatientB_ref.fasta,B''')
parser.add_argument('-N', '--normalise', action='store_true', help='''Normalise
the counts for each bam to the value at a window width of zero, making it easier
to compare the relative decline in number of reads with growing window size
between different bams with different total numbers of reads.''')
parser.add_argument('-O', '--out-filename', help="We'll append '.csv' for the "
"output data file, and '.pdf' for the plot. The default is "
"'EstimatedReadCountsPerWindow'.", default='EstimatedReadCountsPerWindow')
parser.add_argument('-OIS', '--overlapping-insert-sizes', action='store_true',
help='''Just record the insert size distribution for each bam, restricted to
inserts where the mates overlap.''')
parser.add_argument('-DB', '--dont-plot', action='store_true',
help="Don't plot the results.")
parser.add_argument('-MC', '--min-read-count', type=float, help='''Used to
specify a positive number: we'll truncate the x axis when the window width
becomes so large that all bams have a read count per window below this
value. The default is 1.''', default=1)
parser.add_argument('-AS', '--axis-font-size', type=int,
help='For the plot. The default is 15.', default=15)
parser.add_argument('-TS', '--title-font-size', type=int,
help='For the plot. The default is 15.', default=15)
parser.add_argument('-LS', '--legend-font-size', type=int,
help='For the plot. The default is 7.', default=7)
parser.add_argument('-LL', '--legend-location',
help='''For the plot. The default is 'lower left'. The other options are:
'best', 'upper right', 'upper left', 'lower right', 'right', 'center left',
'center right', 'lower center',' upper center', 'center' ''',
default='lower left')
parser.add_argument('-LY', '--linear-y-axis',
help='For the plot. The default is logarithmic.', action='store_true')
parser.add_argument('-XM', '--x-min-max', help='The minimum and maximum for '\
'the x axis in the plot, specified together as a comma-separated pair of '\
'numbers.')
parser.add_argument('-YM', '--y-min-max', help='The minimum and maximum for '\
'the y axis in the plot, specified together as a comma-separated pair of '\
'numbers.')
parser.add_argument('--x-samtools', default='samtools', help=\
'Used to specify the command required to run samtools, if it is needed to index'
' the bam files (by default: samtools).')
args = parser.parse_args()
InsertSizesOnly = args.overlapping_insert_sizes
def GetIntPair(arg, ArgName):
MinMax = arg.split(',')
if len(MinMax) != 2:
print(ArgName, 'should be used to specify a comma-separated pair of',
'numbers. Quitting.', file=sys.stderr)
exit(1)
try:
Min = float(MinMax[0])
Max = float(MinMax[1])
except ValueError:
print(ArgName, 'should be used to specify a comma-separated pair of',
'numbers. Quitting.', file=sys.stderr)
exit(1)
return min(Min, Max), max(Min, Max)
# Get plot limits
if args.x_min_max:
Xmin, Xmax = GetIntPair(args.x_min_max, '--x-min-max')
if args.y_min_max:
Ymin, Ymax = GetIntPair(args.y_min_max, '--y-min-max')
# Read in the input bam and ref files
BamFiles, RefFiles, aliases, BamFileBasenames = \
pf.ReadInputCSVfile(args.BamAndRefList)
NumBams = len(BamFiles)
# Make index files for the bam files if needed.
pf.MakeBamIndices(BamFiles, args.x_samtools)
def FindReadCountAsFuncOfWindowWidth(ReadSizeCountDict, RefLength):
# Return an empty array if there are no reads
if len(ReadSizeCountDict) == 0:
return np.zeros(0)
LargestReadLength = max(ReadSizeCountDict.keys())
RefLengthPlus1 = RefLength + 1
# The nth element of this list will eventually contain the number of reads
# expected to span a window of width n+1 (list is zero-based).
ReadsCountByWindowWidth = np.zeros(LargestReadLength)
for ReadLength, count in ReadSizeCountDict.items():
ReadLengthPlus1 = ReadLength + 1
# The number of positions at which we could place a window of width W is
# RefLength - W + 1
# The number of positions at which we could place a window of width W such
# that it is wholly inside a read is ReadLength - W + 1
# Probability of a given read overlapping a window of width W is therefore
# (ReadLength - W + 1) / (RefLength - W + 1)
for W in range(1, ReadLengthPlus1):
NumSpanningReads = count * \
float(ReadLengthPlus1 - W) / (RefLengthPlus1 - W)
ReadsCountByWindowWidth[W-1] += NumSpanningReads
if args.normalise:
ReadsCountByWindowWidth = [float(count) / ReadsCountByWindowWidth[0] \
for count in ReadsCountByWindowWidth]
return ReadsCountByWindowWidth
ReadLengthCountsByBam = collections.OrderedDict()
InsertSizeCountsByBam = collections.OrderedDict()
InsertSizesOnlyByBam = collections.OrderedDict()
for i, BamFileName in enumerate(BamFiles):
alias = aliases[i]
print('Now counting read and insert sizes for', alias)
bam = pysam.AlignmentFile(BamFileName, "rb")
# Find the reference in the bam file; there should only be one.
AllRefs = bam.references
if len(AllRefs) != 1:
print('Expected exactly one reference in', BamFileName + '; found',\
str(len(AllRefs)) + '.Quitting.', file=sys.stderr)
exit(1)
RefName = AllRefs[0]
# Get the length of the reference.
AllRefLengths = bam.lengths
if len(AllRefLengths) != 1:
print('Pysam error: found one reference but', len(AllRefLengths),
'reference lengths. Quitting.', file=sys.stderr)
exit(1)
RefLength = AllRefLengths[0]
PairedReadCoords = {}
ReadLengthCounts = {}
InsertSizeCounts = {}
TotalReadCount = 0
# Iterate through the reads
for read in bam.fetch(RefName):
MappedPositions = read.get_reference_positions(full_length=False)
# Skip unmapped reads
if not MappedPositions:
continue
TotalReadCount += 1
start = min(MappedPositions[0], MappedPositions[-1])
end = max(MappedPositions[0], MappedPositions[-1])
ReadLength = end - start
try:
ReadLengthCounts[ReadLength] += 1
except KeyError:
ReadLengthCounts[ReadLength] = 1
# The first time we encounter a mate from a pair, record its start and end.
# When we encounter its mate, if they overlap, record the insert size; if
# they don't overlap, record their separate lengths as though they are two
# different inserts (because phyloscanner won't merge them - they are
# effectively two separate inserts from the point of view of merging).
if read.is_paired:
if read.query_name in PairedReadCoords:
MateStart, MateEnd, MateFoundBool = PairedReadCoords[read.query_name]
PairedReadCoords[read.query_name][2] = True
if start <= MateStart <= end:
InsertSize = max(end, MateEnd) - start
try:
InsertSizeCounts[InsertSize] += 1
except KeyError:
InsertSizeCounts[InsertSize] = 1
elif MateStart <= start <= MateEnd:
InsertSize = max(end, MateEnd) - MateStart
try:
InsertSizeCounts[InsertSize] += 1
except KeyError:
InsertSizeCounts[InsertSize] = 1
else:
try:
InsertSizeCounts[ReadLength] += 1
except KeyError:
InsertSizeCounts[ReadLength] = 1
MateLength = MateEnd - MateStart
try:
InsertSizeCounts[MateLength] += 1
except KeyError:
InsertSizeCounts[MateLength] = 1
else:
PairedReadCoords[read.query_name] = [start, end, False]
# For paired reads for which we didn't find a mate, add just the read length
# to the insert size distribution.
NumMissingMates = 0
for start, end, MateFound in PairedReadCoords.values():
if not MateFound:
NumMissingMates += 1
ReadLength = end - start
try:
InsertSizeCounts[ReadLength] += 1
except KeyError:
InsertSizeCounts[ReadLength] = 1
if NumMissingMates > 0:
print('Info:', NumMissingMates, 'of', TotalReadCount, 'reads in',
BamFileName, "are flagged as being paired but don't have a mate present.")
# Skip empty bams
if TotalReadCount == 0:
print('Warning: no reads found in', BamFileName + '. Skipping.')
continue
if InsertSizesOnly:
InsertSizesOnlyByBam[alias] = InsertSizeCounts
ReadLengthCountsByBam[alias] = \
FindReadCountAsFuncOfWindowWidth(ReadLengthCounts, RefLength)
InsertSizeCountsByBam[alias] = \
FindReadCountAsFuncOfWindowWidth(InsertSizeCounts, RefLength)
if InsertSizesOnly:
with open(args.out_filename + '.csv', 'w') as f:
f.write('Bam file,Size of overlapping read pair or length of read in ' + \
'non-overlapping pair,Count\n')
for alias, InsertSizesOnly in InsertSizesOnlyByBam.items():
for size, count in sorted(InsertSizesOnly.items(), key=lambda x:x[0]):
f.write(alias + ',' + str(size) + ',' + str(count) + '\n')
exit(0)
# Make a matrix for which the first column is every window size we need to
# consider, in order, and subsequent columns list the number of reads (and
# inserts, if reads are paired) expected to fully span a window of that size,
# for each different bam.
MaxInsertSize = max(len(list_) for list_ in InsertSizeCountsByBam.values())
SomeDataIsPaired = MaxInsertSize > 0
MaxReadOrInsertSize = max(MaxInsertSize,
max(len(list_) for list_ in ReadLengthCountsByBam.values()))
if SomeDataIsPaired:
matrix = np.zeros((MaxReadOrInsertSize, 2 * NumBams + 1))
else:
matrix = np.zeros((MaxReadOrInsertSize, NumBams + 1))
matrix[:, 0] = np.arange(1, MaxReadOrInsertSize + 1)
header = 'window width'
if SomeDataIsPaired:
for alias in aliases:
header += ',' + 'read count in ' + alias + ',insert size count in ' + alias
for i, ReadLengthCounts in enumerate(ReadLengthCountsByBam.values()):
matrix[:len(ReadLengthCounts), 2 * i + 1] = ReadLengthCounts
for i, InsertSizeCounts in enumerate(InsertSizeCountsByBam.values()):
matrix[:len(InsertSizeCounts), 2 * i + 2] = InsertSizeCounts
else:
for alias in aliases:
header += ',' + 'read count in ' + alias
for i, ReadLengthCounts in enumerate(ReadLengthCountsByBam.values()):
matrix[:len(ReadLengthCounts), i + 1] = ReadLengthCounts
# Write the matrix to a csv file.
with open(args.out_filename + '.csv', 'w') as f:
np.savetxt(f, matrix, delimiter=',', header=header, fmt='%.1f')
if args.dont_plot:
exit(0)
try:
import matplotlib.pyplot as plt
except ImportError:
print("The python library matplotlib does not seem to be installed: you'll "
"need to plot", args.out_filename + '.csv yourself.' )
exit(1)
# For plotting: cut off the tail end of the matrix where read counts are too
# small.
LastDesiredRow = 0
for row in range(MaxReadOrInsertSize - 1, -1, -1):
if max(matrix[row, 1:]) >= args.min_read_count:
LastDesiredRow = row
break
if LastDesiredRow == 0:
print('Warning: no bam has', args.min_read_count, 'reads per window',
'regardless how small the window is. Ignoring the --min-read-count value.')
LastDesiredRow = MaxReadOrInsertSize - 1
matrix = matrix[:LastDesiredRow + 1, :]
ax = plt.figure().add_subplot(111)
if args.x_min_max:
ax.set_xlim(xmin=Xmin, xmax=Xmax)
if args.y_min_max:
ax.set_ylim(ymin=Ymin, ymax=Ymax)
for i in range(1, matrix.shape[1]):
if SomeDataIsPaired:
alias = aliases[(i - 1) / 2]
if i % 2 == 0:
label = 'read pairs, ' + alias
linestyle = '--'
else:
label = 'reads, ' + alias
linestyle = '-'
else:
label = aliases[i - 1]
linestyle = '-'
plt.plot(matrix[:, 0], matrix[:, i], label=label, linestyle=linestyle)
plt.xlabel('window width', fontsize=args.axis_font_size)
YaxisLabel = 'number of reads'
if args.normalise:
YaxisLabel += ' relative to\nwhen the window width of zero'
if SomeDataIsPaired:
title = \
'Estimating the number of unpaired reads and paired reads (merging\n' + \
'read in a pair when they overlap) spanning each window, assuming\n' + \
'reads are randomly distributed over the whole genome'
else:
title = \
'Estimating the number of reads spanning each window, assuming\n' + \
'they are randomly distributed over the whole genome'
plt.ylabel(YaxisLabel, fontsize=args.axis_font_size)
plt.title(title, fontsize=args.title_font_size)
ax.tick_params(axis='both', which='major', labelsize=args.axis_font_size)
if not args.linear_y_axis:
ax.set_yscale('log')
ax.set_xlim(xmin=0, xmax=LastDesiredRow)
plt.legend(loc=args.legend_location, fontsize=args.legend_font_size)
plt.tight_layout()
plt.savefig(args.out_filename + '.pdf')
|
BDI-pathogens/phyloscanner
|
tools/EstimateReadCountPerWindow.py
|
Python
|
gpl-3.0
| 14,576
|
[
"pysam"
] |
a88bd0e45e4c32af737cc7b9936d107dd92d9e7707dce5e2fe95da74b9ddc0dd
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.workflows.executions_v1.services.executions import (
ExecutionsAsyncClient,
)
from google.cloud.workflows.executions_v1.services.executions import ExecutionsClient
from google.cloud.workflows.executions_v1.services.executions import pagers
from google.cloud.workflows.executions_v1.services.executions import transports
from google.cloud.workflows.executions_v1.types import executions
from google.oauth2 import service_account
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert ExecutionsClient._get_default_mtls_endpoint(None) is None
assert (
ExecutionsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
ExecutionsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
ExecutionsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
ExecutionsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert ExecutionsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [ExecutionsClient, ExecutionsAsyncClient,])
def test_executions_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "workflowexecutions.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.ExecutionsGrpcTransport, "grpc"),
(transports.ExecutionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_executions_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [ExecutionsClient, ExecutionsAsyncClient,])
def test_executions_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "workflowexecutions.googleapis.com:443"
def test_executions_client_get_transport_class():
transport = ExecutionsClient.get_transport_class()
available_transports = [
transports.ExecutionsGrpcTransport,
]
assert transport in available_transports
transport = ExecutionsClient.get_transport_class("grpc")
assert transport == transports.ExecutionsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(ExecutionsClient, transports.ExecutionsGrpcTransport, "grpc"),
(
ExecutionsAsyncClient,
transports.ExecutionsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
ExecutionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ExecutionsClient)
)
@mock.patch.object(
ExecutionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ExecutionsAsyncClient),
)
def test_executions_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(ExecutionsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(ExecutionsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(ExecutionsClient, transports.ExecutionsGrpcTransport, "grpc", "true"),
(
ExecutionsAsyncClient,
transports.ExecutionsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(ExecutionsClient, transports.ExecutionsGrpcTransport, "grpc", "false"),
(
ExecutionsAsyncClient,
transports.ExecutionsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
ExecutionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ExecutionsClient)
)
@mock.patch.object(
ExecutionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ExecutionsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_executions_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [ExecutionsClient, ExecutionsAsyncClient])
@mock.patch.object(
ExecutionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ExecutionsClient)
)
@mock.patch.object(
ExecutionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(ExecutionsAsyncClient),
)
def test_executions_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(ExecutionsClient, transports.ExecutionsGrpcTransport, "grpc"),
(
ExecutionsAsyncClient,
transports.ExecutionsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_executions_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(ExecutionsClient, transports.ExecutionsGrpcTransport, "grpc", grpc_helpers),
(
ExecutionsAsyncClient,
transports.ExecutionsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_executions_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_executions_client_client_options_from_dict():
with mock.patch(
"google.cloud.workflows.executions_v1.services.executions.transports.ExecutionsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = ExecutionsClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(ExecutionsClient, transports.ExecutionsGrpcTransport, "grpc", grpc_helpers),
(
ExecutionsAsyncClient,
transports.ExecutionsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_executions_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"workflowexecutions.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="workflowexecutions.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [executions.ListExecutionsRequest, dict,])
def test_list_executions(request_type, transport: str = "grpc"):
client = ExecutionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = executions.ListExecutionsResponse(
next_page_token="next_page_token_value",
)
response = client.list_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == executions.ListExecutionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListExecutionsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_executions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ExecutionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
client.list_executions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == executions.ListExecutionsRequest()
@pytest.mark.asyncio
async def test_list_executions_async(
transport: str = "grpc_asyncio", request_type=executions.ListExecutionsRequest
):
client = ExecutionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
executions.ListExecutionsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == executions.ListExecutionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListExecutionsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_executions_async_from_dict():
await test_list_executions_async(request_type=dict)
def test_list_executions_field_headers():
client = ExecutionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = executions.ListExecutionsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
call.return_value = executions.ListExecutionsResponse()
client.list_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_executions_field_headers_async():
client = ExecutionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = executions.ListExecutionsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
executions.ListExecutionsResponse()
)
await client.list_executions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_executions_flattened():
client = ExecutionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = executions.ListExecutionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_executions(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_executions_flattened_error():
client = ExecutionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_executions(
executions.ListExecutionsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_executions_flattened_async():
client = ExecutionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = executions.ListExecutionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
executions.ListExecutionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_executions(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_executions_flattened_error_async():
client = ExecutionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_executions(
executions.ListExecutionsRequest(), parent="parent_value",
)
def test_list_executions_pager(transport_name: str = "grpc"):
client = ExecutionsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
executions.ListExecutionsResponse(
executions=[
executions.Execution(),
executions.Execution(),
executions.Execution(),
],
next_page_token="abc",
),
executions.ListExecutionsResponse(executions=[], next_page_token="def",),
executions.ListExecutionsResponse(
executions=[executions.Execution(),], next_page_token="ghi",
),
executions.ListExecutionsResponse(
executions=[executions.Execution(), executions.Execution(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_executions(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, executions.Execution) for i in results)
def test_list_executions_pages(transport_name: str = "grpc"):
client = ExecutionsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_executions), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
executions.ListExecutionsResponse(
executions=[
executions.Execution(),
executions.Execution(),
executions.Execution(),
],
next_page_token="abc",
),
executions.ListExecutionsResponse(executions=[], next_page_token="def",),
executions.ListExecutionsResponse(
executions=[executions.Execution(),], next_page_token="ghi",
),
executions.ListExecutionsResponse(
executions=[executions.Execution(), executions.Execution(),],
),
RuntimeError,
)
pages = list(client.list_executions(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_executions_async_pager():
client = ExecutionsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_executions), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
executions.ListExecutionsResponse(
executions=[
executions.Execution(),
executions.Execution(),
executions.Execution(),
],
next_page_token="abc",
),
executions.ListExecutionsResponse(executions=[], next_page_token="def",),
executions.ListExecutionsResponse(
executions=[executions.Execution(),], next_page_token="ghi",
),
executions.ListExecutionsResponse(
executions=[executions.Execution(), executions.Execution(),],
),
RuntimeError,
)
async_pager = await client.list_executions(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, executions.Execution) for i in responses)
@pytest.mark.asyncio
async def test_list_executions_async_pages():
client = ExecutionsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_executions), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
executions.ListExecutionsResponse(
executions=[
executions.Execution(),
executions.Execution(),
executions.Execution(),
],
next_page_token="abc",
),
executions.ListExecutionsResponse(executions=[], next_page_token="def",),
executions.ListExecutionsResponse(
executions=[executions.Execution(),], next_page_token="ghi",
),
executions.ListExecutionsResponse(
executions=[executions.Execution(), executions.Execution(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_executions(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [executions.CreateExecutionRequest, dict,])
def test_create_execution(request_type, transport: str = "grpc"):
client = ExecutionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = executions.Execution(
name="name_value",
state=executions.Execution.State.ACTIVE,
argument="argument_value",
result="result_value",
workflow_revision_id="workflow_revision_id_value",
call_log_level=executions.Execution.CallLogLevel.LOG_ALL_CALLS,
)
response = client.create_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == executions.CreateExecutionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, executions.Execution)
assert response.name == "name_value"
assert response.state == executions.Execution.State.ACTIVE
assert response.argument == "argument_value"
assert response.result == "result_value"
assert response.workflow_revision_id == "workflow_revision_id_value"
assert response.call_log_level == executions.Execution.CallLogLevel.LOG_ALL_CALLS
def test_create_execution_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ExecutionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_execution), "__call__") as call:
client.create_execution()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == executions.CreateExecutionRequest()
@pytest.mark.asyncio
async def test_create_execution_async(
transport: str = "grpc_asyncio", request_type=executions.CreateExecutionRequest
):
client = ExecutionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
executions.Execution(
name="name_value",
state=executions.Execution.State.ACTIVE,
argument="argument_value",
result="result_value",
workflow_revision_id="workflow_revision_id_value",
call_log_level=executions.Execution.CallLogLevel.LOG_ALL_CALLS,
)
)
response = await client.create_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == executions.CreateExecutionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, executions.Execution)
assert response.name == "name_value"
assert response.state == executions.Execution.State.ACTIVE
assert response.argument == "argument_value"
assert response.result == "result_value"
assert response.workflow_revision_id == "workflow_revision_id_value"
assert response.call_log_level == executions.Execution.CallLogLevel.LOG_ALL_CALLS
@pytest.mark.asyncio
async def test_create_execution_async_from_dict():
await test_create_execution_async(request_type=dict)
def test_create_execution_field_headers():
client = ExecutionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = executions.CreateExecutionRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_execution), "__call__") as call:
call.return_value = executions.Execution()
client.create_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_execution_field_headers_async():
client = ExecutionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = executions.CreateExecutionRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_execution), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
executions.Execution()
)
await client.create_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_execution_flattened():
client = ExecutionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = executions.Execution()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_execution(
parent="parent_value", execution=executions.Execution(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].execution
mock_val = executions.Execution(name="name_value")
assert arg == mock_val
def test_create_execution_flattened_error():
client = ExecutionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_execution(
executions.CreateExecutionRequest(),
parent="parent_value",
execution=executions.Execution(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_execution_flattened_async():
client = ExecutionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = executions.Execution()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
executions.Execution()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_execution(
parent="parent_value", execution=executions.Execution(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].execution
mock_val = executions.Execution(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_execution_flattened_error_async():
client = ExecutionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_execution(
executions.CreateExecutionRequest(),
parent="parent_value",
execution=executions.Execution(name="name_value"),
)
@pytest.mark.parametrize("request_type", [executions.GetExecutionRequest, dict,])
def test_get_execution(request_type, transport: str = "grpc"):
client = ExecutionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = executions.Execution(
name="name_value",
state=executions.Execution.State.ACTIVE,
argument="argument_value",
result="result_value",
workflow_revision_id="workflow_revision_id_value",
call_log_level=executions.Execution.CallLogLevel.LOG_ALL_CALLS,
)
response = client.get_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == executions.GetExecutionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, executions.Execution)
assert response.name == "name_value"
assert response.state == executions.Execution.State.ACTIVE
assert response.argument == "argument_value"
assert response.result == "result_value"
assert response.workflow_revision_id == "workflow_revision_id_value"
assert response.call_log_level == executions.Execution.CallLogLevel.LOG_ALL_CALLS
def test_get_execution_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ExecutionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_execution), "__call__") as call:
client.get_execution()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == executions.GetExecutionRequest()
@pytest.mark.asyncio
async def test_get_execution_async(
transport: str = "grpc_asyncio", request_type=executions.GetExecutionRequest
):
client = ExecutionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
executions.Execution(
name="name_value",
state=executions.Execution.State.ACTIVE,
argument="argument_value",
result="result_value",
workflow_revision_id="workflow_revision_id_value",
call_log_level=executions.Execution.CallLogLevel.LOG_ALL_CALLS,
)
)
response = await client.get_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == executions.GetExecutionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, executions.Execution)
assert response.name == "name_value"
assert response.state == executions.Execution.State.ACTIVE
assert response.argument == "argument_value"
assert response.result == "result_value"
assert response.workflow_revision_id == "workflow_revision_id_value"
assert response.call_log_level == executions.Execution.CallLogLevel.LOG_ALL_CALLS
@pytest.mark.asyncio
async def test_get_execution_async_from_dict():
await test_get_execution_async(request_type=dict)
def test_get_execution_field_headers():
client = ExecutionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = executions.GetExecutionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_execution), "__call__") as call:
call.return_value = executions.Execution()
client.get_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_execution_field_headers_async():
client = ExecutionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = executions.GetExecutionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_execution), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
executions.Execution()
)
await client.get_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_execution_flattened():
client = ExecutionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = executions.Execution()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_execution(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_execution_flattened_error():
client = ExecutionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_execution(
executions.GetExecutionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_execution_flattened_async():
client = ExecutionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = executions.Execution()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
executions.Execution()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_execution(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_execution_flattened_error_async():
client = ExecutionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_execution(
executions.GetExecutionRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [executions.CancelExecutionRequest, dict,])
def test_cancel_execution(request_type, transport: str = "grpc"):
client = ExecutionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = executions.Execution(
name="name_value",
state=executions.Execution.State.ACTIVE,
argument="argument_value",
result="result_value",
workflow_revision_id="workflow_revision_id_value",
call_log_level=executions.Execution.CallLogLevel.LOG_ALL_CALLS,
)
response = client.cancel_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == executions.CancelExecutionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, executions.Execution)
assert response.name == "name_value"
assert response.state == executions.Execution.State.ACTIVE
assert response.argument == "argument_value"
assert response.result == "result_value"
assert response.workflow_revision_id == "workflow_revision_id_value"
assert response.call_log_level == executions.Execution.CallLogLevel.LOG_ALL_CALLS
def test_cancel_execution_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = ExecutionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_execution), "__call__") as call:
client.cancel_execution()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == executions.CancelExecutionRequest()
@pytest.mark.asyncio
async def test_cancel_execution_async(
transport: str = "grpc_asyncio", request_type=executions.CancelExecutionRequest
):
client = ExecutionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
executions.Execution(
name="name_value",
state=executions.Execution.State.ACTIVE,
argument="argument_value",
result="result_value",
workflow_revision_id="workflow_revision_id_value",
call_log_level=executions.Execution.CallLogLevel.LOG_ALL_CALLS,
)
)
response = await client.cancel_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == executions.CancelExecutionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, executions.Execution)
assert response.name == "name_value"
assert response.state == executions.Execution.State.ACTIVE
assert response.argument == "argument_value"
assert response.result == "result_value"
assert response.workflow_revision_id == "workflow_revision_id_value"
assert response.call_log_level == executions.Execution.CallLogLevel.LOG_ALL_CALLS
@pytest.mark.asyncio
async def test_cancel_execution_async_from_dict():
await test_cancel_execution_async(request_type=dict)
def test_cancel_execution_field_headers():
client = ExecutionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = executions.CancelExecutionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_execution), "__call__") as call:
call.return_value = executions.Execution()
client.cancel_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_cancel_execution_field_headers_async():
client = ExecutionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = executions.CancelExecutionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_execution), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
executions.Execution()
)
await client.cancel_execution(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_cancel_execution_flattened():
client = ExecutionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = executions.Execution()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.cancel_execution(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_cancel_execution_flattened_error():
client = ExecutionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.cancel_execution(
executions.CancelExecutionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_cancel_execution_flattened_async():
client = ExecutionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.cancel_execution), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = executions.Execution()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
executions.Execution()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.cancel_execution(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_cancel_execution_flattened_error_async():
client = ExecutionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.cancel_execution(
executions.CancelExecutionRequest(), name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.ExecutionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ExecutionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.ExecutionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ExecutionsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.ExecutionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = ExecutionsClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = ExecutionsClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.ExecutionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = ExecutionsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.ExecutionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = ExecutionsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.ExecutionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.ExecutionsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.ExecutionsGrpcTransport, transports.ExecutionsGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = ExecutionsClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.ExecutionsGrpcTransport,)
def test_executions_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.ExecutionsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_executions_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.workflows.executions_v1.services.executions.transports.ExecutionsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.ExecutionsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_executions",
"create_execution",
"get_execution",
"cancel_execution",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_executions_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.workflows.executions_v1.services.executions.transports.ExecutionsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ExecutionsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_executions_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.workflows.executions_v1.services.executions.transports.ExecutionsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.ExecutionsTransport()
adc.assert_called_once()
def test_executions_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
ExecutionsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.ExecutionsGrpcTransport, transports.ExecutionsGrpcAsyncIOTransport,],
)
def test_executions_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.ExecutionsGrpcTransport, grpc_helpers),
(transports.ExecutionsGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_executions_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"workflowexecutions.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="workflowexecutions.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.ExecutionsGrpcTransport, transports.ExecutionsGrpcAsyncIOTransport],
)
def test_executions_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_executions_host_no_port():
client = ExecutionsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="workflowexecutions.googleapis.com"
),
)
assert client.transport._host == "workflowexecutions.googleapis.com:443"
def test_executions_host_with_port():
client = ExecutionsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="workflowexecutions.googleapis.com:8000"
),
)
assert client.transport._host == "workflowexecutions.googleapis.com:8000"
def test_executions_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.ExecutionsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_executions_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.ExecutionsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.ExecutionsGrpcTransport, transports.ExecutionsGrpcAsyncIOTransport],
)
def test_executions_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.ExecutionsGrpcTransport, transports.ExecutionsGrpcAsyncIOTransport],
)
def test_executions_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_execution_path():
project = "squid"
location = "clam"
workflow = "whelk"
execution = "octopus"
expected = "projects/{project}/locations/{location}/workflows/{workflow}/executions/{execution}".format(
project=project, location=location, workflow=workflow, execution=execution,
)
actual = ExecutionsClient.execution_path(project, location, workflow, execution)
assert expected == actual
def test_parse_execution_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"workflow": "cuttlefish",
"execution": "mussel",
}
path = ExecutionsClient.execution_path(**expected)
# Check that the path construction is reversible.
actual = ExecutionsClient.parse_execution_path(path)
assert expected == actual
def test_workflow_path():
project = "winkle"
location = "nautilus"
workflow = "scallop"
expected = "projects/{project}/locations/{location}/workflows/{workflow}".format(
project=project, location=location, workflow=workflow,
)
actual = ExecutionsClient.workflow_path(project, location, workflow)
assert expected == actual
def test_parse_workflow_path():
expected = {
"project": "abalone",
"location": "squid",
"workflow": "clam",
}
path = ExecutionsClient.workflow_path(**expected)
# Check that the path construction is reversible.
actual = ExecutionsClient.parse_workflow_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "whelk"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = ExecutionsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "octopus",
}
path = ExecutionsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = ExecutionsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "oyster"
expected = "folders/{folder}".format(folder=folder,)
actual = ExecutionsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nudibranch",
}
path = ExecutionsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = ExecutionsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "cuttlefish"
expected = "organizations/{organization}".format(organization=organization,)
actual = ExecutionsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "mussel",
}
path = ExecutionsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = ExecutionsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "winkle"
expected = "projects/{project}".format(project=project,)
actual = ExecutionsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nautilus",
}
path = ExecutionsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = ExecutionsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "scallop"
location = "abalone"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = ExecutionsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "squid",
"location": "clam",
}
path = ExecutionsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = ExecutionsClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.ExecutionsTransport, "_prep_wrapped_messages"
) as prep:
client = ExecutionsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.ExecutionsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = ExecutionsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = ExecutionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = ExecutionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = ExecutionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(ExecutionsClient, transports.ExecutionsGrpcTransport),
(ExecutionsAsyncClient, transports.ExecutionsGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-workflows
|
tests/unit/gapic/executions_v1/test_executions.py
|
Python
|
apache-2.0
| 88,445
|
[
"Octopus"
] |
2cac194953aefcf76cf2753d0e2bda04bb1d40eec4d2f94480aede087897c059
|
__author__ = 'stephen'
# ===============================================================================
# GLOBAL IMPORTS:
import os,sys
import numpy as np
import argparse
# ===============================================================================
# LOCAL IMPORTS:
HK_DataMiner_Path = os.path.relpath(os.pardir)
#HK_DataMiner_Path = os.path.abspath("/home/stephen/Dropbox/projects/work-2015.5/HK_DataMiner/")
print HK_DataMiner_Path
sys.path.append(HK_DataMiner_Path)
from cluster import DBSCAN
from utils import XTCReader, plot_cluster
# ===============================================================================
cli = argparse.ArgumentParser()
cli.add_argument('-t', '--trajListFns', default = 'trajlist',
help='List of trajectory files to read in, separated by spaces.')
cli.add_argument('-a', '--atomListFns', default='atom_indices',
help='List of atom index files to read in, separated by spaces.')
cli.add_argument('-g', '--topology', default='native.pdb', help='topology file.')
cli.add_argument('-o', '--homedir', help='Home dir.', default=".", type=str)
cli.add_argument('-e', '--iext', help='''The file extension of input trajectory
files. Must be a filetype that mdtraj.load() can recognize.''',
default="xtc", type=str)
cli.add_argument('-n', '--n_clusters', help='''n_clusters.''',
default=100, type=int)
cli.add_argument('-m', '--n_macro_states', help='''n_macro_states.''',
default=6, type=int)
cli.add_argument('-s', '--stride', help='stride.',
default=None, type=int)
args = cli.parse_args()
trajlistname = args.trajListFns
atom_indicesname = args.atomListFns
trajext = args.iext
File_TOP = args.topology
homedir = args.homedir
n_clusters = args.n_clusters
n_macro_states = args.n_macro_states
stride = args.stride
# ===========================================================================
# Reading Trajs from XTC files
#print "stride:", stride
#trajreader = XTCReader(trajlistname, atom_indicesname, homedir, trajext, File_TOP, nSubSample=stride)
#trajs = trajreader.trajs
#print trajs
#traj_len = trajreader.traj_len
#np.savetxt("./traj_len.txt", traj_len, fmt="%d")
if os.path.isfile("./phi_angles.txt") and os.path.isfile("./psi_angles.txt") is True:
phi_angles = np.loadtxt("./phi_angles.txt", dtype=np.float32)
psi_angles = np.loadtxt("./psi_angles.txt", dtype=np.float32)
else:
phi_angles, psi_angles = trajreader.get_phipsi(trajs, psi=[6, 8, 14, 16], phi=[4, 6, 8, 14])
#phi_angles, psi_angles = trajreader.get_phipsi(trajs, psi=[5, 7, 13, 15], phi=[3, 5, 7, 13])
np.savetxt("./phi_angles.txt", phi_angles, fmt="%f")
np.savetxt("./psi_angles.txt", psi_angles, fmt="%f")
phi_psi=np.column_stack((phi_angles, psi_angles))
print phi_psi
# ===========================================================================
# do Clustering using DBSCAN method
cluster = DBSCAN(eps=10.0, min_samples=10, metric="euclidean")
print cluster
cluster.fit(phi_psi)
#cluster.fit(trajs)
labels = cluster.labels_
print labels
n_microstates = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_microstates)
#cluster_centers_ = cluster.cluster_centers_
# plot micro states
clustering_name = "dbscan_n_" + str(n_microstates)
np.savetxt("assignments_"+clustering_name+".txt", labels, fmt="%d")
#np.savetxt("cluster_centers_"+clustering_name+".txt", cluster_centers_, fmt="%d")
plot_cluster(labels=labels, phi_angles=phi_angles, psi_angles=psi_angles, name=clustering_name)
|
stephenliu1989/HK_DataMiner
|
hkdataminer/scripts/test_dbscan.py
|
Python
|
apache-2.0
| 3,593
|
[
"MDTraj"
] |
4f8f4b5d124136dfb812c371b30844b2e5fba3d490f787087fb75e2397329935
|
"""
The Job Path Agent determines the chain of Optimizing Agents that must
work on the job prior to the scheduling decision.
Initially this takes jobs in the received state and starts the jobs on the
optimizer chain. The next development will be to explicitly specify the
path through the optimizers.
"""
__RCSID__ = "$Id$"
import types
from DIRAC import S_OK, S_ERROR, List
from DIRAC.WorkloadManagementSystem.Executor.Base.OptimizerExecutor import OptimizerExecutor
class JobPath( OptimizerExecutor ):
"""
The specific Optimizer must provide the following methods:
- optimizeJob() - the main method called for each job
and it can provide:
- initializeOptimizer() before each execution cycle
"""
@classmethod
def initializeOptimizer( cls ):
cls.__voPlugins = {}
return S_OK()
def __setOptimizerChain( self, jobState, opChain ):
if type( opChain ) not in types.StringTypes:
opChain = ",".join( opChain )
result = jobState.setOptParameter( "OptimizerChain", opChain )
if not result[ 'OK' ]:
return result
return jobState.setParameter( "JobPath", opChain )
def __executeVOPlugin( self, voPlugin, jobState ):
if voPlugin not in self.__voPlugins:
modName = List.fromChar( voPlugin, "." )[-1]
try:
module = __import__( voPlugin, globals(), locals(), [ modName ] )
except ImportError, excp:
self.jobLog.exception( "Could not import VO plugin %s" % voPlugin )
return S_ERROR( "Could not import VO plugin %s: %s" % ( voPlugin, excp ) )
try:
self.__voPlugins[ voPlugin ] = getattr( module, modName )
except AttributeError, excp:
return S_ERROR( "Could not get plugin %s from module %s: %s" % ( modName, voPlugin, str( excp ) ) )
argsDict = { 'JobID': jobState.jid,
'JobState' : jobState,
'ConfigPath':self.ex_getProperty( "section" ) }
try:
modInstance = self.__voPlugins[ voPlugin ]( argsDict )
result = modInstance.execute()
except Exception, excp:
self.jobLog.exception( "Excp while executing %s" % voPlugin )
return S_ERROR( "Could not execute VO plugin %s: %s" % ( voPlugin, excp ) )
if not result['OK']:
return result
extraPath = result[ 'Value' ]
if type( extraPath ) in types.StringTypes:
extraPath = List.fromChar( result['Value'] )
return S_OK( extraPath )
def optimizeJob( self, jid, jobState ):
result = jobState.getManifest()
if not result[ 'OK' ]:
return result
jobManifest = result[ 'Value' ]
opChain = jobManifest.getOption( "JobPath", [] )
if opChain:
self.jobLog.info( 'Job defines its own optimizer chain %s' % opChain )
return self.__setOptimizerChain( jobState, opChain )
#Construct path
opPath = self.ex_getOption( 'BasePath', ['JobPath', 'JobSanity'] )
voPlugin = self.ex_getOption( 'VOPlugin', '' )
#Specific VO path
if voPlugin:
result = self.__executeVOPlugin( voPlugin, jobState )
if not result[ 'OK' ]:
return result
extraPath = result[ 'Value' ]
if extraPath:
opPath.extend( extraPath )
self.jobLog.verbose( 'Adding extra VO specific optimizers to path: %s' % ( extraPath ) )
else:
#Generic path: Should only rely on an input data setting in absence of VO plugin
self.jobLog.verbose( 'No VO specific plugin module specified' )
result = jobState.getInputData()
if not result['OK']:
return result
if result['Value']:
# if the returned tuple is not empty it will evaluate true
self.jobLog.info( 'Input data requirement found' )
opPath.extend( self.ex_getOption( 'InputData', ['InputData'] ) )
else:
self.jobLog.info( 'No input data requirement' )
#End of path
opPath.extend( self.ex_getOption( 'EndPath', ['JobScheduling'] ) )
uPath = []
for opN in opPath:
if opN not in uPath:
uPath.append( opN )
opPath = uPath
self.jobLog.info( 'Constructed path is: %s' % "->".join( opPath ) )
result = self.__setOptimizerChain( jobState, opPath )
if not result['OK']:
return result
return self.setNextOptimizer( jobState )
|
marcelovilaca/DIRAC
|
WorkloadManagementSystem/Executor/JobPath.py
|
Python
|
gpl-3.0
| 4,241
|
[
"DIRAC"
] |
e68edc4c90c6033983b3456d3337918eb650ac9403990096baecdff0f32a2515
|
import pytest
import numpy as np
from cplpy import run_test, prepare_config
import os
# -----Velocities TESTS-----
# EXPLANATION:
MD_FNAME = "lammps_vels.in"
MD_ARGS = "-in " + MD_FNAME
MD_EXEC = "lmp_cpl"
CFD_FNAME = "dummyCFD.py"
CFD_ARGS = CFD_FNAME
CFD_EXEC = "python"
TEST_TEMPLATE_DIR = os.path.join(os.environ["CPL_PATH"], "test/templates")
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
@pytest.fixture()
def prepare_config_fix(tmpdir):
prepare_config(tmpdir, TEST_DIR, MD_FNAME, CFD_FNAME)
def compare_vels(tol, lammps_fname="lammps_vels.dat",
cfd_fname="cfd_vels.dat"):
# Line format of CFD script file -- > x y z vx vy vz
with open(cfd_fname, "r") as cfd_file:
cfd_lines = cfd_file.readlines()
cfd_lines = [l[:-1].split(" ") for l in cfd_lines]
cfd_cells = {}
for l in cfd_lines:
cfd_cells[(float(l[0]), float(l[1]), float(l[2]))] = np.array([float(l[3]),
float(l[4]),
float(l[5])])
# Line format of LAMMPS file -- > chunk x y z ncount vx vy vz
with open(lammps_fname, "r") as lammps_file:
lammps_lines = lammps_file.readlines()
skip = int(lammps_lines[3].split(" ")[1])
lammps_lines = lammps_lines[4:]
lammps_lines = lammps_lines[:skip]
lammps_lines = [l[:-1].split(" ") for l in lammps_lines]
lammps_cells = {}
for l in lammps_lines:
l = filter(None, l)
lammps_cells[(float(l[1]), float(l[2]), float(l[3]))] = np.array([float(l[5]),
float(l[6]),
float(l[7])])
# Compare each cell velocity up to a certain tolerance
for cell in cfd_cells.keys():
try:
diff_vel = abs(cfd_cells[cell] - lammps_cells[cell])
if (np.any(diff_vel > tol)):
print "Cell value disagreement:"
print cfd_cells[cell]
print lammps_cells[cell]
assert False
except KeyError:
print "Cell not found: " + str(cell)
assert False
# -----VELOCITY TESTS-----
# EXPLANATION: See README-test located in this folder.
@pytest.mark.parametrize("cfdprocs, mdprocs, err_msg", [
((3, 3, 3), (3, 3, 3), ""),
((1, 1, 1), (3, 3, 3), "")])
def test_velocitiesP2C(prepare_config_fix, cfdprocs, mdprocs, err_msg):
MD_PARAMS = {"lx": 300.0, "ly": 300.0, "lz": 300.0}
MD_PARAMS["npx"], MD_PARAMS["npy"], MD_PARAMS["npz"] = mdprocs
CFD_PARAMS = {"lx": 300.0, "ly": 300.0, "lz": 300.0,
"ncx": 15, "ncy": 15, "ncz": 15, }
CFD_PARAMS["npx"], CFD_PARAMS["npy"], CFD_PARAMS["npz"] = cfdprocs
CONFIG_PARAMS = {"cfd_bcx": 1, "cfd_bcy": 1, "cfd_bcz": 1,
"olap_xlo": 1, "olap_xhi": 15,
"olap_ylo": 1, "olap_yhi": 5,
"olap_zlo": 1, "olap_zhi": 15,
"cnst_xlo": 1, "cnst_xhi": 15,
"cnst_ylo": 5, "cnst_yhi": 5,
"cnst_zlo": 1, "cnst_zhi": 15,
"tstep_ratio": 1, }
correct = run_test(TEST_TEMPLATE_DIR, CONFIG_PARAMS, MD_EXEC, MD_FNAME, MD_ARGS,
CFD_EXEC, CFD_FNAME, CFD_ARGS, MD_PARAMS, CFD_PARAMS, err_msg, True)
if correct:
compare_vels(1e-6)
|
Crompulence/cpl-library
|
examples/sockets/LAMMPS/LAMMPS-dev/cpl-socket/test/velocityP-C/test_vels.py
|
Python
|
gpl-3.0
| 3,494
|
[
"LAMMPS"
] |
7adec9516a11cbcb51fe166f2941ac6fc7e9587d742e5b980b1701d9cbcc2a69
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Yang Gao <younggao1994@gmail.com>
#
'''
Non-relativistic analytical nuclear gradients for unrestricted Hartree Fock with kpoints sampling
'''
import numpy as np
from pyscf.lib import logger
from pyscf.pbc.grad import krhf as rhf_grad
from pyscf.pbc import gto
def grad_elec(mf_grad, mo_energy=None, mo_coeff=None, mo_occ=None, atmlst=None):
mf = mf_grad.base
cell = mf_grad.cell
kpts = mf.kpts
nkpts = len(kpts)
if mo_energy is None: mo_energy = mf.mo_energy
if mo_occ is None: mo_occ = mf.mo_occ
if mo_coeff is None: mo_coeff = mf.mo_coeff
if atmlst is None: atmlst = range(cell.natm)
log = logger.Logger(mf_grad.stdout, mf_grad.verbose)
hcore_deriv = mf_grad.hcore_generator(cell, kpts)
s1 = mf_grad.get_ovlp(cell, kpts)
dm0 = mf.make_rdm1(mo_coeff, mo_occ)
t0 = (logger.process_clock(), logger.perf_counter())
log.debug('Computing Gradients of NR-UHF Coulomb repulsion')
vhf = mf_grad.get_veff(dm0, kpts)
log.timer('gradients of 2e part', *t0)
dme0 = mf_grad.make_rdm1e(mo_energy, mo_coeff, mo_occ)
dm0_sf = dm0[0] + dm0[1]
dme0_sf = dme0[0] + dme0[1]
if atmlst is None:
atmlst = range(cell.natm)
aoslices = cell.aoslice_by_atom()
de = np.zeros((len(atmlst),3))
for k, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = aoslices[ia]
h1ao = hcore_deriv(ia)
de[k] += np.einsum('xkij,kji->x', h1ao, dm0_sf).real
de[k] += np.einsum('xskij,skji->x', vhf[:,:,:,p0:p1], dm0[:,:,:,p0:p1]).real * 2
de[k] -= np.einsum('kxij,kji->x', s1[:,:,p0:p1], dme0_sf[:,:,p0:p1]).real * 2
de[k] /= nkpts
de[k] += mf_grad.extra_force(ia, locals())
if log.verbose > logger.DEBUG:
log.debug('gradients of electronic part')
mf_grad._write(log, cell, de, atmlst)
return de
def get_veff(mf_grad, dm, kpts):
'''NR Hartree-Fock Coulomb repulsion'''
vj, vk = mf_grad.get_jk(dm, kpts)
vj = vj[:,0] + vj[:,1]
return vj[:,None] - vk
def make_rdm1e(mo_energy, mo_coeff, mo_occ):
'''Energy weighted density matrix'''
dm1ea = rhf_grad.make_rdm1e(mo_energy[0], mo_coeff[0], mo_occ[0])
dm1eb = rhf_grad.make_rdm1e(mo_energy[1], mo_coeff[1], mo_occ[1])
return np.stack((dm1ea,dm1eb), axis=0)
class Gradients(rhf_grad.Gradients):
'''Non-relativistic restricted Hartree-Fock gradients'''
def get_veff(self, dm=None, kpts=None):
if kpts is None: kpts = self.kpts
if dm is None: dm = self.base.make_rdm1()
return get_veff(self, dm, kpts)
def make_rdm1e(self, mo_energy=None, mo_coeff=None, mo_occ=None):
if mo_energy is None: mo_energy = self.base.mo_energy
if mo_coeff is None: mo_coeff = self.base.mo_coeff
if mo_occ is None: mo_occ = self.base.mo_occ
return make_rdm1e(mo_energy, mo_coeff, mo_occ)
grad_elec = grad_elec
if __name__=='__main__':
from pyscf.pbc import scf
cell = gto.Cell()
cell.atom = [['He', [0.0, 0.0, 0.0]], ['He', [1, 1.1, 1.2]]]
cell.basis = 'gth-dzv'
cell.a = np.eye(3) * 3
cell.unit='bohr'
cell.pseudo='gth-pade'
cell.verbose=4
cell.build()
nmp = [1,1,3]
kpts = cell.make_kpts(nmp)
kmf = scf.KUHF(cell, kpts, exxdiv=None)
kmf.kernel()
mygrad = Gradients(kmf)
mygrad.kernel()
|
sunqm/pyscf
|
pyscf/pbc/grad/kuhf.py
|
Python
|
apache-2.0
| 3,969
|
[
"PySCF"
] |
516b10364c48dff0be8b4867901cd238de4fc3cc28340f3392a95bb6cd1cade1
|
# $Id$
#
# Copyright (C) 2003-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" This functionality gets mixed into the BitEnsemble class
"""
from rdkit.DataStructs.BitEnsemble import BitEnsemble
def _InitScoreTable(self, dbConn, tableName, idInfo='', actInfo=''):
""" inializes a db table to store our scores
idInfo and actInfo should be strings with the definitions of the id and
activity columns of the table (when desired)
"""
if idInfo:
cols = [idInfo]
else:
cols = []
for bit in self.GetBits():
cols.append('Bit_%d smallint' % (bit))
if actInfo:
cols.append(actInfo)
dbConn.AddTable(tableName, ','.join(cols))
self._dbTableName = tableName
def _ScoreToDb(self, sig, dbConn, tableName=None, id=None, act=None):
""" scores the "signature" that is passed in and puts the
results in the db table
"""
if tableName is None:
try:
tableName = self._dbTableName
except AttributeError:
raise ValueError('table name not set in BitEnsemble pre call to ScoreToDb()')
if id is not None:
cols = [id]
else:
cols = []
score = 0
for bit in self.GetBits():
b = sig[bit]
cols.append(b)
score += b
if act is not None:
cols.append(act)
dbConn.InsertData(tableName, cols)
BitEnsemble.InitScoreTable = _InitScoreTable
BitEnsemble.ScoreToDb = _ScoreToDb
|
jandom/rdkit
|
rdkit/DataStructs/BitEnsembleDb.py
|
Python
|
bsd-3-clause
| 1,571
|
[
"RDKit"
] |
624d525b88e46dfb6e165688ff7ea5350fc659462f1733aadcca09bc3328ac3b
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import pickle
import os
import numpy as np
import warnings
import scipy.constants as const
from pathlib import Path
from monty.tempfile import ScratchDir
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.vasp.inputs import Incar, Poscar, Kpoints, Potcar, \
PotcarSingle, VaspInput, BadIncarWarning
from pymatgen import Composition, Structure
from pymatgen.electronic_structure.core import Magmom
from monty.io import zopen
class PoscarTest(PymatgenTest):
def test_init(self):
filepath = self.TEST_FILES_DIR / 'POSCAR'
poscar = Poscar.from_file(filepath, check_for_POTCAR=False)
comp = poscar.structure.composition
self.assertEqual(comp, Composition("Fe4P4O16"))
# Vasp 4 type with symbols at the end.
poscar_string = """Test1
1.0
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
1 1
direct
0.000000 0.000000 0.000000 Si
0.750000 0.500000 0.750000 F
"""
poscar = Poscar.from_string(poscar_string)
self.assertEqual(poscar.structure.composition, Composition("SiF"))
poscar_string = ""
self.assertRaises(ValueError, Poscar.from_string, poscar_string)
# Vasp 4 tyle file with default names, i.e. no element symbol found.
poscar_string = """Test2
1.0
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
1 1
direct
0.000000 0.000000 0.000000
0.750000 0.500000 0.750000
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
poscar = Poscar.from_string(poscar_string)
self.assertEqual(poscar.structure.composition, Composition("HHe"))
# Vasp 4 tyle file with default names, i.e. no element symbol found.
poscar_string = """Test3
1.0
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
1 1
Selective dynamics
direct
0.000000 0.000000 0.000000 T T T Si
0.750000 0.500000 0.750000 F F F O
"""
poscar = Poscar.from_string(poscar_string)
self.assertEqual(poscar.selective_dynamics, [[True, True, True],
[False, False, False]])
self.selective_poscar = poscar
def test_from_file(self):
filepath = self.TEST_FILES_DIR / 'POSCAR.symbols_natoms_multilines'
poscar = Poscar.from_file(filepath, check_for_POTCAR=False,
read_velocities=False)
ordered_expected_elements = ['Fe', 'Cr', 'Fe', 'Fe', 'Cr', 'Cr', 'Cr',
'Cr',
'Fe', 'Fe', 'Cr', 'Fe', 'Cr', 'Fe', 'Fe',
'Cr',
'Fe', 'Cr', 'Fe', 'Fe', 'Fe', 'Fe', 'Cr',
'Fe',
'Ni', 'Fe', 'Fe', 'Fe', 'Fe', 'Fe', 'Cr',
'Cr',
'Cr', 'Fe', 'Fe', 'Fe', 'Fe', 'Fe', 'Fe',
'Cr',
'Fe', 'Fe', 'Ni', 'Fe', 'Fe', 'Fe', 'Cr',
'Cr',
'Fe', 'Fe', 'Fe', 'Fe', 'Fe']
self.assertEqual([site.specie.symbol for site in poscar.structure],
ordered_expected_elements)
def test_to_from_dict(self):
poscar_string = """Test3
1.0
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
1 1
Selective dynamics
direct
0.000000 0.000000 0.000000 T T T Si
0.750000 0.500000 0.750000 F F F O
"""
poscar = Poscar.from_string(poscar_string)
d = poscar.as_dict()
poscar2 = Poscar.from_dict(d)
self.assertEqual(poscar2.comment, "Test3")
self.assertTrue(all(poscar2.selective_dynamics[0]))
self.assertFalse(all(poscar2.selective_dynamics[1]))
def test_cart_scale(self):
poscar_string = """Test1
1.1
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
Si F
1 1
cart
0.000000 0.00000000 0.00000000
3.840198 1.50000000 2.35163175
"""
p = Poscar.from_string(poscar_string)
site = p.structure[1]
self.assertArrayAlmostEqual(site.coords,
np.array([3.840198, 1.5, 2.35163175]) * 1.1)
def test_significant_figures(self):
si = 14
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
# Silicon structure for testing.
latt = [[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]]
struct = Structure(latt, [si, si], coords)
poscar = Poscar(struct)
expected_str = '''Si2
1.0
3.84 0.00 0.00
1.92 3.33 0.00
0.00 -2.22 3.14
Si
2
direct
0.00 0.00 0.00 Si
0.75 0.50 0.75 Si
'''
actual_str = poscar.get_string(significant_figures=2)
self.assertEqual(actual_str, expected_str, "Wrong POSCAR output!")
def test_str(self):
si = 14
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
# Silicon structure for testing.
latt = [[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]]
struct = Structure(latt, [si, si], coords)
poscar = Poscar(struct)
expected_str = '''Si2
1.0
3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
Si
2
direct
0.000000 0.000000 0.000000 Si
0.750000 0.500000 0.750000 Si
'''
self.assertEqual(str(poscar), expected_str, "Wrong POSCAR output!")
# Vasp 4 type with symbols at the end.
poscar_string = """Test1
1.0
-3.840198 0.000000 0.000000
1.920099 3.325710 0.000000
0.000000 -2.217138 3.135509
1 1
direct
0.000000 0.000000 0.000000 Si
0.750000 0.500000 0.750000 F
"""
expected = """Test1
1.0
3.840198 -0.000000 -0.000000
-1.920099 -3.325710 -0.000000
-0.000000 2.217138 -3.135509
Si F
1 1
direct
0.000000 0.000000 0.000000 Si
0.750000 0.500000 0.750000 F
"""
poscar = Poscar.from_string(poscar_string)
self.assertEqual(str(poscar), expected)
def test_from_md_run(self):
# Parsing from an MD type run with velocities and predictor corrector data
p = Poscar.from_file(self.TEST_FILES_DIR / "CONTCAR.MD", check_for_POTCAR=False)
self.assertAlmostEqual(np.sum(np.array(p.velocities)), 0.0065417961324)
self.assertEqual(p.predictor_corrector[0][0][0], 0.33387820E+00)
self.assertEqual(p.predictor_corrector[0][1][1], -0.10583589E-02)
def test_write_MD_poscar(self):
# Parsing from an MD type run with velocities and predictor corrector data
# And writing a new POSCAR from the new structure
p = Poscar.from_file(self.TEST_FILES_DIR / "CONTCAR.MD", check_for_POTCAR=False)
tempfname = Path("POSCAR.testing.md")
p.write_file(tempfname)
p3 = Poscar.from_file(tempfname)
self.assertArrayAlmostEqual(p.structure.lattice.abc,
p3.structure.lattice.abc, 5)
self.assertArrayAlmostEqual(p.velocities,
p3.velocities, 5)
self.assertArrayAlmostEqual(p.predictor_corrector,
p3.predictor_corrector, 5)
self.assertEqual(p.predictor_corrector_preamble,
p3.predictor_corrector_preamble)
tempfname.unlink()
def test_setattr(self):
filepath = self.TEST_FILES_DIR / 'POSCAR'
poscar = Poscar.from_file(filepath, check_for_POTCAR=False)
self.assertRaises(ValueError, setattr, poscar, 'velocities',
[[0, 0, 0]])
poscar.selective_dynamics = np.array([[True, False, False]] * 24)
ans = """
LiFePO4
1.0
10.411767 0.000000 0.000000
0.000000 6.067172 0.000000
0.000000 0.000000 4.759490
Fe P O
4 4 16
Selective dynamics
direct
0.218728 0.750000 0.474867 T F F Fe
0.281272 0.250000 0.974867 T F F Fe
0.718728 0.750000 0.025133 T F F Fe
0.781272 0.250000 0.525133 T F F Fe
0.094613 0.250000 0.418243 T F F P
0.405387 0.750000 0.918243 T F F P
0.594613 0.250000 0.081757 T F F P
0.905387 0.750000 0.581757 T F F P
0.043372 0.750000 0.707138 T F F O
0.096642 0.250000 0.741320 T F F O
0.165710 0.046072 0.285384 T F F O
0.165710 0.453928 0.285384 T F F O
0.334290 0.546072 0.785384 T F F O
0.334290 0.953928 0.785384 T F F O
0.403358 0.750000 0.241320 T F F O
0.456628 0.250000 0.207138 T F F O
0.543372 0.750000 0.792862 T F F O
0.596642 0.250000 0.758680 T F F O
0.665710 0.046072 0.214616 T F F O
0.665710 0.453928 0.214616 T F F O
0.834290 0.546072 0.714616 T F F O
0.834290 0.953928 0.714616 T F F O
0.903358 0.750000 0.258680 T F F O
0.956628 0.250000 0.292862 T F F O"""
self.assertEqual(str(poscar).strip(), ans.strip())
def test_velocities(self):
si = 14
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
# Silicon structure for testing.
latt = [[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]]
struct = Structure(latt, [si, si], coords)
poscar = Poscar(struct)
poscar.set_temperature(900)
v = np.array(poscar.velocities)
for x in np.sum(v, axis=0):
self.assertAlmostEqual(x, 0, 7)
temperature = struct[0].specie.atomic_mass.to("kg") * np.sum(v ** 2) / (3 * const.k) * 1e10
self.assertAlmostEqual(temperature, 900, 4,
'Temperature instantiated incorrectly')
poscar.set_temperature(700)
v = np.array(poscar.velocities)
for x in np.sum(v, axis=0):
self.assertAlmostEqual(
x, 0, 7, 'Velocities initialized with a net momentum')
temperature = struct[0].specie.atomic_mass.to("kg") * np.sum(v ** 2) / (3 * const.k) * 1e10
self.assertAlmostEqual(temperature, 700, 4,
'Temperature instantiated incorrectly')
def test_write(self):
filepath = self.TEST_FILES_DIR / 'POSCAR'
poscar = Poscar.from_file(filepath)
tempfname = Path("POSCAR.testing")
poscar.write_file(tempfname)
p = Poscar.from_file(tempfname)
self.assertArrayAlmostEqual(poscar.structure.lattice.abc,
p.structure.lattice.abc, 5)
tempfname.unlink()
class IncarTest(PymatgenTest):
def setUp(self):
file_name = self.TEST_FILES_DIR / 'INCAR'
self.incar = Incar.from_file(file_name)
def test_init(self):
incar = self.incar
incar["LDAU"] = "T"
self.assertEqual(incar["ALGO"], "Damped", "Wrong Algo")
self.assertEqual(float(incar["EDIFF"]), 1e-4, "Wrong EDIFF")
self.assertEqual(type(incar["LORBIT"]), int)
def test_diff(self):
incar = self.incar
filepath1 = self.TEST_FILES_DIR / 'INCAR'
incar1 = Incar.from_file(filepath1)
filepath2 = self.TEST_FILES_DIR / 'INCAR.2'
incar2 = Incar.from_file(filepath2)
filepath3 = self.TEST_FILES_DIR / 'INCAR.3'
incar3 = Incar.from_file(filepath2)
self.assertEqual(
incar1.diff(incar2),
{'Different': {
'NELM': {'INCAR1': None, 'INCAR2': 100},
'ISPIND': {'INCAR1': 2, 'INCAR2': None},
'LWAVE': {'INCAR1': True, 'INCAR2': False},
'LDAUPRINT': {'INCAR1': None, 'INCAR2': 1},
'MAGMOM': {'INCAR1': [6, -6, -6, 6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6],
'INCAR2': None},
'NELMIN': {'INCAR1': None, 'INCAR2': 3},
'ENCUTFOCK': {'INCAR1': 0.0, 'INCAR2': None},
'HFSCREEN': {'INCAR1': 0.207, 'INCAR2': None},
'LSCALU': {'INCAR1': False, 'INCAR2': None},
'ENCUT': {'INCAR1': 500, 'INCAR2': None},
'NSIM': {'INCAR1': 1, 'INCAR2': None},
'ICHARG': {'INCAR1': None, 'INCAR2': 1},
'NSW': {'INCAR1': 99, 'INCAR2': 51},
'NKRED': {'INCAR1': 2, 'INCAR2': None},
'NUPDOWN': {'INCAR1': 0, 'INCAR2': None},
'LCHARG': {'INCAR1': True, 'INCAR2': None},
'LPLANE': {'INCAR1': True, 'INCAR2': None},
'ISMEAR': {'INCAR1': 0, 'INCAR2': -5},
'NPAR': {'INCAR1': 8, 'INCAR2': 1},
'SYSTEM': {
'INCAR1': 'Id=[0] dblock_code=[97763-icsd] formula=[li mn (p o4)] sg_name=[p n m a]',
'INCAR2': 'Id=[91090] dblock_code=[20070929235612linio-59.53134651-vasp] formula=[li3 ni3 o6] '
'sg_name=[r-3m]'},
'ALGO': {'INCAR1': 'Damped', 'INCAR2': 'Fast'},
'LHFCALC': {'INCAR1': True, 'INCAR2': None},
'TIME': {'INCAR1': 0.4, 'INCAR2': None}},
'Same': {'IBRION': 2, 'PREC': 'Accurate', 'ISIF': 3,
'LMAXMIX': 4,
'LREAL': 'Auto', 'ISPIN': 2, 'EDIFF': 0.0001,
'LORBIT': 11, 'SIGMA': 0.05}})
self.assertEqual(
incar1.diff(incar3),
{'Different': {
'NELM': {'INCAR1': None, 'INCAR2': 100},
'ISPIND': {'INCAR1': 2, 'INCAR2': None},
'LWAVE': {'INCAR1': True, 'INCAR2': False},
'LDAUPRINT': {'INCAR1': None, 'INCAR2': 1},
'MAGMOM': {'INCAR1': [6, -6, -6, 6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6,
0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6],
'INCAR2': None},
'NELMIN': {'INCAR1': None, 'INCAR2': 3},
'ENCUTFOCK': {'INCAR1': 0.0, 'INCAR2': None},
'HFSCREEN': {'INCAR1': 0.207, 'INCAR2': None},
'LSCALU': {'INCAR1': False, 'INCAR2': None},
'ENCUT': {'INCAR1': 500, 'INCAR2': None},
'NSIM': {'INCAR1': 1, 'INCAR2': None},
'ICHARG': {'INCAR1': None, 'INCAR2': 1},
'NSW': {'INCAR1': 99, 'INCAR2': 51},
'NKRED': {'INCAR1': 2, 'INCAR2': None},
'NUPDOWN': {'INCAR1': 0, 'INCAR2': None},
'LCHARG': {'INCAR1': True, 'INCAR2': None},
'LPLANE': {'INCAR1': True, 'INCAR2': None},
'ISMEAR': {'INCAR1': 0, 'INCAR2': -5},
'NPAR': {'INCAR1': 8, 'INCAR2': 1},
'SYSTEM': {
'INCAR1': 'Id=[0] dblock_code=[97763-icsd] formula=[li mn (p o4)] sg_name=[p n m a]',
'INCAR2': 'Id=[91090] dblock_code=[20070929235612linio-59.53134651-vasp] formula=[li3 ni3 o6] '
'sg_name=[r-3m]'},
'ALGO': {'INCAR1': 'Damped', 'INCAR2': 'Fast'},
'LHFCALC': {'INCAR1': True, 'INCAR2': None},
'TIME': {'INCAR1': 0.4, 'INCAR2': None}},
'Same': {'IBRION': 2, 'PREC': 'Accurate', 'ISIF': 3,
'LMAXMIX': 4,
'LREAL': 'Auto', 'ISPIN': 2, 'EDIFF': 0.0001,
'LORBIT': 11, 'SIGMA': 0.05}})
def test_as_dict_and_from_dict(self):
d = self.incar.as_dict()
incar2 = Incar.from_dict(d)
self.assertEqual(self.incar, incar2)
d["MAGMOM"] = [Magmom([1, 2, 3]).as_dict()]
incar3 = Incar.from_dict(d)
self.assertEqual(incar3["MAGMOM"], [Magmom([1, 2, 3])])
def test_write(self):
tempfname = Path("INCAR.testing")
self.incar.write_file(tempfname)
i = Incar.from_file(tempfname)
self.assertEqual(i, self.incar)
tempfname.unlink()
def test_get_string(self):
s = self.incar.get_string(pretty=True, sort_keys=True)
ans = """ALGO = Damped
EDIFF = 0.0001
ENCUT = 500
ENCUTFOCK = 0.0
HFSCREEN = 0.207
IBRION = 2
ISIF = 3
ISMEAR = 0
ISPIN = 2
ISPIND = 2
LCHARG = True
LHFCALC = True
LMAXMIX = 4
LORBIT = 11
LPLANE = True
LREAL = Auto
LSCALU = False
LWAVE = True
MAGMOM = 1*6.0 2*-6.0 1*6.0 20*0.6
NKRED = 2
NPAR = 8
NSIM = 1
NSW = 99
NUPDOWN = 0
PREC = Accurate
SIGMA = 0.05
SYSTEM = Id=[0] dblock_code=[97763-icsd] formula=[li mn (p o4)] sg_name=[p n m a]
TIME = 0.4"""
self.assertEqual(s, ans)
def test_lsorbit_magmom(self):
magmom1 = [[0.0, 0.0, 3.0], [0, 1, 0], [2, 1, 2]]
magmom2 = [-1, -1, -1, 0, 0, 0, 0, 0]
magmom4 = [Magmom([1.0, 2.0, 2.0])]
ans_string1 = "LANGEVIN_GAMMA = 10 10 10\nLSORBIT = True\n" \
"MAGMOM = 0.0 0.0 3.0 0 1 0 2 1 2\n"
ans_string2 = "LANGEVIN_GAMMA = 10\nLSORBIT = True\n" \
"MAGMOM = 3*3*-1 3*5*0\n"
ans_string3 = "LSORBIT = False\nMAGMOM = 2*-1 2*9\n"
ans_string4_nolsorbit = "LANGEVIN_GAMMA = 10\nLSORBIT = False\nMAGMOM = 1*3.0\n"
ans_string4_lsorbit = "LANGEVIN_GAMMA = 10\nLSORBIT = True\nMAGMOM = 1.0 2.0 2.0\n"
incar = Incar({})
incar["MAGMOM"] = magmom1
incar["LSORBIT"] = "T"
incar["LANGEVIN_GAMMA"] = [10, 10, 10]
self.assertEqual(ans_string1, str(incar))
incar["MAGMOM"] = magmom2
incar["LSORBIT"] = "T"
incar["LANGEVIN_GAMMA"] = 10
self.assertEqual(ans_string2, str(incar))
incar["MAGMOM"] = magmom4
incar["LSORBIT"] = "F"
self.assertEqual(ans_string4_nolsorbit, str(incar))
incar["LSORBIT"] = "T"
self.assertEqual(ans_string4_lsorbit, str(incar))
incar = Incar.from_string(ans_string1)
self.assertEqual(incar["MAGMOM"],
[[0.0, 0.0, 3.0], [0, 1, 0], [2, 1, 2]])
self.assertEqual(incar["LANGEVIN_GAMMA"], [10, 10, 10])
incar = Incar.from_string(ans_string2)
self.assertEqual(incar["MAGMOM"], [[-1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [0, 0, 0],
[0, 0, 0], [0, 0, 0],
[0, 0, 0], [0, 0, 0]])
self.assertEqual(incar["LANGEVIN_GAMMA"], [10])
incar = Incar.from_string(ans_string3)
self.assertFalse(incar["LSORBIT"])
self.assertEqual(incar["MAGMOM"], [-1, -1, 9, 9])
def test_quad_efg(self):
incar1 = Incar({})
incar1["LEFG"] = True
incar1["QUAD_EFG"] = [0.0, 146.6, -25.58]
ans_string1 = "LEFG = True\nQUAD_EFG = 0.0 146.6 -25.58\n"
self.assertEqual(ans_string1, str(incar1))
incar2 = Incar.from_string(ans_string1)
self.assertEqual(ans_string1, str(incar2))
def test_types(self):
incar_str = """ALGO = Fast
ECUT = 510
EDIFF = 1e-07
EINT = -0.85 0.85
IBRION = -1
ICHARG = 11
ISIF = 3
ISMEAR = 1
ISPIN = 1
LPARD = True
NBMOD = -3
PREC = Accurate
SIGMA = 0.1"""
i = Incar.from_string(incar_str)
self.assertIsInstance(i["EINT"], list)
self.assertEqual(i["EINT"][0], -0.85)
incar_str += "\nLHFCALC = .TRUE. ; HFSCREEN = 0.2"
incar_str += "\nALGO = All;"
i = Incar.from_string(incar_str)
self.assertTrue(i["LHFCALC"])
self.assertEqual(i["HFSCREEN"], 0.2)
self.assertEqual(i["ALGO"], "All")
def test_proc_types(self):
self.assertEqual(Incar.proc_val("HELLO", "-0.85 0.85"), "-0.85 0.85")
def test_check_params(self):
# Triggers warnings when running into nonsensical parameters
with self.assertWarns(BadIncarWarning) as cm:
incar = Incar({
'ADDGRID': True,
'ALGO': 'Normal',
'AMIN': 0.01,
'AMIX': 0.2,
'BMIX': 0.001,
'EDIFF': 5 + 1j, # EDIFF needs to be real
'EDIFFG': -0.01,
'ENCUT': 520,
'IBRION': 2,
'ICHARG': 1,
'ISIF': 3,
'ISMEAR': 1,
'ISPIN': 2,
'LASPH': 5, # Should be a bool
'LORBIT': 11,
'LREAL': 'Auto',
'LWAVE': False,
'MAGMOM': [1, 2, 4, 5],
'METAGGA': 'SCAM', # spelling mistake
'NELM': 200,
'NPAR': 4,
'NSW': 99,
'PREC': 'Accurate',
'SIGMA': 0.2,
'NBAND': 250, # spelling mistake
'PHON_TLIST': 'is_a_str', # this parameter should be a list
'LATTICE_CONSTRAINTS': [True, False, 'f'], # Should be a list of bools
'M_CONSTR': [True, 1, 'string'] # Should be a list of real numbers
})
incar.check_params()
class KpointsTest(PymatgenTest):
def test_init(self):
filepath = self.TEST_FILES_DIR / 'KPOINTS.auto'
kpoints = Kpoints.from_file(filepath)
self.assertEqual(kpoints.kpts, [[10]], "Wrong kpoint lattice read")
filepath = self.TEST_FILES_DIR / 'KPOINTS.cartesian'
kpoints = Kpoints.from_file(filepath)
self.assertEqual(kpoints.kpts,
[[0.25, 0, 0], [0, 0.25, 0], [0, 0, 0.25]],
"Wrong kpoint lattice read")
self.assertEqual(kpoints.kpts_shift, [0.5, 0.5, 0.5],
"Wrong kpoint shift read")
filepath = self.TEST_FILES_DIR / 'KPOINTS'
kpoints = Kpoints.from_file(filepath)
self.kpoints = kpoints
self.assertEqual(kpoints.kpts, [[2, 4, 6]])
filepath = self.TEST_FILES_DIR / 'KPOINTS.band'
kpoints = Kpoints.from_file(filepath)
self.assertIsNotNone(kpoints.labels)
self.assertEqual(kpoints.style, Kpoints.supported_modes.Line_mode)
kpoints_str = str(kpoints)
self.assertEqual(kpoints_str.split("\n")[3], "Reciprocal")
filepath = self.TEST_FILES_DIR / 'KPOINTS.explicit'
kpoints = Kpoints.from_file(filepath)
self.assertIsNotNone(kpoints.kpts_weights)
self.assertEqual(str(kpoints).strip(), """Example file
4
Cartesian
0.0 0.0 0.0 1 None
0.0 0.0 0.5 1 None
0.0 0.5 0.5 2 None
0.5 0.5 0.5 4 None""")
filepath = self.TEST_FILES_DIR / 'KPOINTS.explicit_tet'
kpoints = Kpoints.from_file(filepath)
self.assertEqual(kpoints.tet_connections, [(6, [1, 2, 3, 4])])
def test_style_setter(self):
filepath = self.TEST_FILES_DIR / 'KPOINTS'
kpoints = Kpoints.from_file(filepath)
self.assertEqual(kpoints.style, Kpoints.supported_modes.Monkhorst)
kpoints.style = "G"
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
def test_static_constructors(self):
kpoints = Kpoints.gamma_automatic([3, 3, 3], [0, 0, 0])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
self.assertEqual(kpoints.kpts, [[3, 3, 3]])
kpoints = Kpoints.monkhorst_automatic([2, 2, 2], [0, 0, 0])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Monkhorst)
self.assertEqual(kpoints.kpts, [[2, 2, 2]])
kpoints = Kpoints.automatic(100)
self.assertEqual(kpoints.style, Kpoints.supported_modes.Automatic)
self.assertEqual(kpoints.kpts, [[100]])
filepath = self.TEST_FILES_DIR / 'POSCAR'
poscar = Poscar.from_file(filepath)
kpoints = Kpoints.automatic_density(poscar.structure, 500)
self.assertEqual(kpoints.kpts, [[1, 3, 3]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
kpoints = Kpoints.automatic_density(poscar.structure, 500, True)
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
kpoints = Kpoints.automatic_density_by_vol(poscar.structure, 1000)
self.assertEqual(kpoints.kpts, [[6, 10, 13]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
s = poscar.structure
s.make_supercell(3)
kpoints = Kpoints.automatic_density(s, 500)
self.assertEqual(kpoints.kpts, [[1, 1, 1]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
kpoints = Kpoints.from_string("""k-point mesh
0
G
10 10 10
0.5 0.5 0.5
""")
self.assertArrayAlmostEqual(kpoints.kpts_shift, [0.5, 0.5, 0.5])
def test_as_dict_from_dict(self):
k = Kpoints.monkhorst_automatic([2, 2, 2], [0, 0, 0])
d = k.as_dict()
k2 = Kpoints.from_dict(d)
self.assertEqual(k.kpts, k2.kpts)
self.assertEqual(k.style, k2.style)
self.assertEqual(k.kpts_shift, k2.kpts_shift)
def test_kpt_bands_as_dict_from_dict(self):
file_name = self.TEST_FILES_DIR / 'KPOINTS.band'
k = Kpoints.from_file(file_name)
d = k.as_dict()
import json
json.dumps(d)
# This doesn't work
k2 = Kpoints.from_dict(d)
self.assertEqual(k.kpts, k2.kpts)
self.assertEqual(k.style, k2.style)
self.assertEqual(k.kpts_shift, k2.kpts_shift)
self.assertEqual(k.num_kpts, k2.num_kpts)
def test_pickle(self):
k = Kpoints.gamma_automatic()
pickle.dumps(k)
def test_automatic_kpoint(self):
# s = PymatgenTest.get_structure("Li2O")
p = Poscar.from_string("""Al1
1.0
2.473329 0.000000 1.427977
0.824443 2.331877 1.427977
0.000000 0.000000 2.855955
Al
1
direct
0.000000 0.000000 0.000000 Al""")
kpoints = Kpoints.automatic_density(p.structure, 1000)
self.assertArrayAlmostEqual(kpoints.kpts[0], [10, 10, 10])
class PotcarSingleTest(PymatgenTest):
_multiprocess_shared_ = True
def setUp(self):
self.psingle = PotcarSingle.from_file(
self.TEST_FILES_DIR / "POT_GGA_PAW_PBE" / "POTCAR.Mn_pv.gz")
def test_keywords(self):
data = {'VRHFIN': 'Mn: 3p4s3d', 'LPAW': True, 'DEXC': -.003,
'STEP': [20.000, 1.050],
'RPACOR': 2.080, 'LEXCH': 'PE',
'ENMAX': 269.865, 'QCUT': -4.454,
'TITEL': 'PAW_PBE Mn_pv 07Sep2000',
'LCOR': True, 'EAUG': 569.085,
'RMAX': 2.807,
'ZVAL': 13.000,
'EATOM': 2024.8347, 'NDATA': 100,
'LULTRA': False,
'QGAM': 8.907,
'ENMIN': 202.399,
'RCLOC': 1.725,
'RCORE': 2.300,
'RDEP': 2.338,
'IUNSCR': 1,
'RAUG': 1.300,
'POMASS': 54.938,
'RWIGS': 1.323}
self.assertEqual(self.psingle.keywords, data)
def test_nelectrons(self):
self.assertEqual(self.psingle.nelectrons, 13)
def test_electron_config(self):
config = self.psingle.electron_configuration
self.assertEqual(config[-1], (3, "p", 6))
def test_attributes(self):
for k in ['DEXC', 'RPACOR', 'ENMAX', 'QCUT', 'EAUG', 'RMAX',
'ZVAL', 'EATOM', 'NDATA', 'QGAM', 'ENMIN', 'RCLOC',
'RCORE', 'RDEP', 'RAUG', 'POMASS', 'RWIGS']:
self.assertIsNotNone(getattr(self.psingle, k))
def test_found_unknown_key(self):
with self.assertRaises(KeyError):
PotcarSingle.parse_functions['BAD_KEY']
def test_bad_value(self):
self.assertRaises(ValueError, PotcarSingle.parse_functions['ENMAX'],
"ThisShouldBeAFloat")
def test_hash(self):
self.assertEqual(self.psingle.get_potcar_hash(),
"fa52f891f234d49bb4cb5ea96aae8f98")
def test_functional_types(self):
self.assertEqual(self.psingle.functional, 'PBE')
self.assertEqual(self.psingle.functional_class, 'GGA')
self.assertEqual(self.psingle.potential_type, 'PAW')
psingle = PotcarSingle.from_file(self.TEST_FILES_DIR / "POT_LDA_PAW" / "POTCAR.Fe.gz")
self.assertEqual(psingle.functional, 'Perdew-Zunger81')
self.assertEqual(psingle.functional_class, 'LDA')
self.assertEqual(psingle.potential_type, 'PAW')
# def test_default_functional(self):
# p = PotcarSingle.from_symbol_and_functional("Fe")
# self.assertEqual(p.functional_class, 'GGA')
# SETTINGS["PMG_DEFAULT_FUNCTIONAL"] = "LDA"
# p = PotcarSingle.from_symbol_and_functional("Fe")
# self.assertEqual(p.functional_class, 'LDA')
# SETTINGS["PMG_DEFAULT_FUNCTIONAL"] = "PBE"
class PotcarTest(PymatgenTest):
def setUp(self):
if "PMG_VASP_PSP_DIR" not in os.environ:
os.environ["PMG_VASP_PSP_DIR"] = str(self.TEST_FILES_DIR)
filepath = self.TEST_FILES_DIR / 'POTCAR'
self.potcar = Potcar.from_file(filepath)
def test_init(self):
self.assertEqual(self.potcar.symbols, ["Fe", "P", "O"],
"Wrong symbols read in for POTCAR")
potcar = Potcar(["Fe_pv", "O"])
self.assertEqual(potcar[0].enmax, 293.238)
def test_potcar_map(self):
fe_potcar = zopen(self.TEST_FILES_DIR / "POT_GGA_PAW_PBE" / "POTCAR.Fe_pv.gz").read().decode(
"utf-8")
# specify V instead of Fe - this makes sure the test won't pass if the
# code just grabs the POTCAR from the config file (the config file would
# grab the V POTCAR)
potcar = Potcar(["V"], sym_potcar_map={"V": fe_potcar})
self.assertEqual(potcar.symbols, ["Fe_pv"], "Wrong symbols read in "
"for POTCAR")
def test_to_from_dict(self):
d = self.potcar.as_dict()
potcar = Potcar.from_dict(d)
self.assertEqual(potcar.symbols, ["Fe", "P", "O"])
def test_write(self):
tempfname = Path("POTCAR.testing")
self.potcar.write_file(tempfname)
p = Potcar.from_file(tempfname)
self.assertEqual(p.symbols, self.potcar.symbols)
tempfname.unlink()
def test_set_symbol(self):
self.assertEqual(self.potcar.symbols, ["Fe", "P", "O"])
self.assertEqual(self.potcar[0].nelectrons, 8)
self.potcar.symbols = ["Fe_pv", "O"]
self.assertEqual(self.potcar.symbols, ["Fe_pv", "O"])
self.assertEqual(self.potcar[0].nelectrons, 14)
# def test_default_functional(self):
# p = Potcar(["Fe", "P"])
# self.assertEqual(p[0].functional_class, 'GGA')
# self.assertEqual(p[1].functional_class, 'GGA')
# SETTINGS["PMG_DEFAULT_FUNCTIONAL"] = "LDA"
# p = Potcar(["Fe", "P"])
# self.assertEqual(p[0].functional_class, 'LDA')
# self.assertEqual(p[1].functional_class, 'LDA')
def test_pickle(self):
pickle.dumps(self.potcar)
# def tearDown(self):
# SETTINGS["PMG_DEFAULT_FUNCTIONAL"] = "PBE"
class VaspInputTest(PymatgenTest):
def setUp(self):
filepath = self.TEST_FILES_DIR / 'INCAR'
incar = Incar.from_file(filepath)
filepath = self.TEST_FILES_DIR / 'POSCAR'
poscar = Poscar.from_file(filepath, check_for_POTCAR=False)
if "PMG_VASP_PSP_DIR" not in os.environ:
os.environ["PMG_VASP_PSP_DIR"] = str(self.TEST_FILES_DIR)
filepath = self.TEST_FILES_DIR / 'POTCAR'
potcar = Potcar.from_file(filepath)
filepath = self.TEST_FILES_DIR / 'KPOINTS.auto'
kpoints = Kpoints.from_file(filepath)
self.vinput = VaspInput(incar, kpoints, poscar, potcar)
def test_to_from_dict(self):
d = self.vinput.as_dict()
vinput = VaspInput.from_dict(d)
comp = vinput["POSCAR"].structure.composition
self.assertEqual(comp, Composition("Fe4P4O16"))
def test_write(self):
tmp_dir = Path("VaspInput.testing")
self.vinput.write_input(tmp_dir)
filepath = tmp_dir / "INCAR"
incar = Incar.from_file(filepath)
self.assertEqual(incar["NSW"], 99)
for name in ("INCAR", "POSCAR", "POTCAR", "KPOINTS"):
(tmp_dir / name).unlink()
tmp_dir.rmdir()
def test_run_vasp(self):
# To add some test.
with ScratchDir(".") as d:
self.vinput.run_vasp(d, vasp_cmd=["cat", "INCAR"])
with open(os.path.join(d, "vasp.out"), "r") as f:
output = f.read()
self.assertEqual(output.split("\n")[0], "ALGO = Damped")
def test_from_directory(self):
vi = VaspInput.from_directory(self.TEST_FILES_DIR,
optional_files={"CONTCAR.Li2O": Poscar})
self.assertEqual(vi["INCAR"]["ALGO"], "Damped")
self.assertIn("CONTCAR.Li2O", vi)
d = vi.as_dict()
vinput = VaspInput.from_dict(d)
self.assertIn("CONTCAR.Li2O", vinput)
if __name__ == "__main__":
unittest.main()
|
fraricci/pymatgen
|
pymatgen/io/vasp/tests/test_inputs.py
|
Python
|
mit
| 33,414
|
[
"VASP",
"pymatgen"
] |
9e225fee51fc872239b87585f6f39d56f3e1ebd9ef5ba0bd1a3ff20f0f18d4c3
|
# -*- coding: utf-8 -*-
"""
@author: zhoujiagen
Created on 15/07/2019 10:16 AM
"""
def construct_test(rule_file_path):
"""构造测试方法."""
with open(rule_file_path) as f:
for rule_raw in f:
rule = rule_raw[0:-1]
rule_capitalize = rule[0].upper() + rule[1:]
print("""
@Test public void test{}() {{
final String input = "";
MySqlParser parser = this.constructParser(input);
MySqlParserVisitor<Object> visitor = new RelationalAlgebraMySqlParserVisitor();
{}Context context = parser.{}();
Object {} = visitor.visit{}(context);
System.out.print({});
}}
""".format(rule_capitalize, rule_capitalize, rule, rule, rule_capitalize, rule))
def construct_factory_methods(rule_file_path):
"""构造工厂中方法."""
with open(rule_file_path) as f:
for rule_raw in f:
rule = rule_raw[0:-1]
rule_capitalize = rule[0].upper() + rule[1:]
print("""public static {} make{}() {{ return new {}(); }}""".format(rule_capitalize, rule_capitalize,
rule_capitalize))
def construct_group_rule_branches(rule_file_path):
"""构造规则分支检测."""
index = 0
with open(rule_file_path) as f:
for rule_raw in f:
rule = rule_raw[0:-1]
rule_capitalize = rule[0].upper() + rule[1:]
#
# for factory
# print("""public static {} make{}() {{ return new {}(); }}""".format(rule_capitalize, rule_capitalize, rule_capitalize))
#
# for group rules
branch = """else if (ctx.{}() != null) {{
return this.visit{}(ctx.{}());
}} """.format(rule, rule_capitalize, rule)
if index == 0:
branch = branch[5:]
print(branch)
index += 1
print("""else {
throw ParserError.make(ctx);
}""")
if __name__ == '__main__':
# root
# sqlStatements
# sqlStatement
# emptyStatement
# ddlStatement
# ...
file_path = "/Users/zhoujiagen/Downloads/rule.txt"
construct_group_rule_branches(file_path)
|
zhoujiagen/giant-data-analysis
|
data-models/datamodel-logic/src/test/resources/test_antlr_rule.py
|
Python
|
mit
| 2,331
|
[
"VisIt"
] |
ccdacb1f4cbf9704d92eeb07886d30face17a985f155d62efa3affaa7898917b
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import numbers
def is_integral(x):
"""Any integer value"""
try:
return isinstance(int(x), numbers.Integral) and not isinstance(x, bool)
except ValueError:
return False
class Netcdf(AutotoolsPackage):
"""NetCDF is a set of software libraries and self-describing,
machine-independent data formats that support the creation, access,
and sharing of array-oriented scientific data."""
homepage = "http://www.unidata.ucar.edu/software/netcdf"
url = "http://www.gfd-dennou.org/arch/netcdf/unidata-mirror/netcdf-4.3.3.tar.gz"
# Version 4.4.1.1 is having problems in tests
# https://github.com/Unidata/netcdf-c/issues/343
version('4.4.1.1', '503a2d6b6035d116ed53b1d80c811bda')
# netcdf@4.4.1 can crash on you (in real life and in tests). See:
# https://github.com/Unidata/netcdf-c/issues/282
version('4.4.1', '7843e35b661c99e1d49e60791d5072d8')
version('4.4.0', 'cffda0cbd97fdb3a06e9274f7aef438e')
version('4.3.3.1', '5c9dad3705a3408d27f696e5b31fb88c')
version('4.3.3', '5fbd0e108a54bd82cb5702a73f56d2ae')
variant('mpi', default=True,
description='Enable parallel I/O for netcdf-4')
variant('parallel-netcdf', default=False,
description='Enable parallel I/O for classic files')
variant('hdf4', default=False, description='Enable HDF4 support')
variant('shared', default=True, description='Enable shared library')
variant('dap', default=False, description='Enable DAP support')
# It's unclear if cdmremote can be enabled if '--enable-netcdf-4' is passed
# to the configure script. Since netcdf-4 support is mandatory we comment
# this variant out.
# variant('cdmremote', default=False,
# description='Enable CDM Remote support')
# These variants control the number of dimensions (i.e. coordinates and
# attributes) and variables (e.g. time, entity ID, number of coordinates)
# that can be used in any particular NetCDF file.
variant(
'maxdims',
default=1024,
description='Defines the maximum dimensions of NetCDF files.',
values=is_integral
)
variant(
'maxvars',
default=8192,
description='Defines the maximum variables of NetCDF files.',
values=is_integral
)
depends_on("m4", type='build')
depends_on("hdf", when='+hdf4')
# curl 7.18.0 or later is required:
# http://www.unidata.ucar.edu/software/netcdf/docs/getting_and_building_netcdf.html
depends_on("curl@7.18.0:", when='+dap')
# depends_on("curl@7.18.0:", when='+cdmremote')
depends_on('parallel-netcdf', when='+parallel-netcdf')
# We need to build with MPI wrappers if any of the two
# parallel I/O features is enabled:
# http://www.unidata.ucar.edu/software/netcdf/docs/getting_and_building_netcdf.html#build_parallel
depends_on('mpi', when='+mpi')
depends_on('mpi', when='+parallel-netcdf')
# zlib 1.2.5 or later is required for netCDF-4 compression:
# http://www.unidata.ucar.edu/software/netcdf/docs/getting_and_building_netcdf.html
depends_on("zlib@1.2.5:")
# High-level API of HDF5 1.8.9 or later is required for netCDF-4 support:
# http://www.unidata.ucar.edu/software/netcdf/docs/getting_and_building_netcdf.html
depends_on('hdf5@1.8.9:+hl')
# Starting version 4.4.0, it became possible to disable parallel I/O even
# if HDF5 supports it. For previous versions of the library we need
# HDF5 without mpi support to disable parallel I/O.
# The following doesn't work if hdf5+mpi by default and netcdf~mpi is
# specified in packages.yaml
# depends_on('hdf5~mpi', when='@:4.3~mpi')
# Thus, we have to introduce a conflict
conflicts('~mpi', when='@:4.3^hdf5+mpi',
msg='netcdf@:4.3~mpi requires hdf5~mpi')
# We need HDF5 with mpi support to enable parallel I/O.
# The following doesn't work if hdf5~mpi by default and netcdf+mpi is
# specified in packages.yaml
# depends_on('hdf5+mpi', when='+mpi')
# Thus, we have to introduce a conflict
conflicts('+mpi', when='^hdf5~mpi',
msg='netcdf+mpi requires hdf5+mpi')
# NetCDF 4.4.0 and prior have compatibility issues with HDF5 1.10 and later
# https://github.com/Unidata/netcdf-c/issues/250
depends_on('hdf5@:1.8.999', when='@:4.4.0')
# The feature was introduced in version 4.1.2
# and was removed in version 4.4.0
# conflicts('+cdmremote', when='@:4.1.1,4.4:')
# The features were introduced in version 4.1.0
conflicts('+parallel-netcdf', when='@:4.0')
conflicts('+hdf4', when='@:4.0')
def patch(self):
try:
max_dims = int(self.spec.variants['maxdims'].value)
max_vars = int(self.spec.variants['maxvars'].value)
except (ValueError, TypeError):
raise TypeError('NetCDF variant values max[dims|vars] must be '
'integer values.')
ff = FileFilter(join_path('include', 'netcdf.h'))
ff.filter(r'^(#define\s+NC_MAX_DIMS\s+)\d+(.*)$',
r'\1{0}\2'.format(max_dims))
ff.filter(r'^(#define\s+NC_MAX_VARS\s+)\d+(.*)$',
r'\1{0}\2'.format(max_vars))
def configure_args(self):
CFLAGS = []
CPPFLAGS = []
LDFLAGS = []
LIBS = []
config_args = ['--enable-v2',
'--enable-utilities',
'--enable-static',
'--enable-largefile',
'--enable-netcdf-4']
# The flag was introduced in version 4.1.0
if self.spec.satisfies('@4.1:'):
config_args.append('--enable-fsync')
# The flag was introduced in version 4.3.1
if self.spec.satisfies('@4.3.1:'):
config_args.append('--enable-dynamic-loading')
config_args += self.enable_or_disable('shared')
if '~shared' in self.spec:
# We don't have shared libraries but we still want it to be
# possible to use this library in shared builds
CFLAGS.append(self.compiler.pic_flag)
config_args += self.enable_or_disable('dap')
# config_args += self.enable_or_disable('cdmremote')
# if '+dap' in self.spec or '+cdmremote' in self.spec:
if '+dap' in self.spec:
# Make sure Netcdf links against Spack's curl, otherwise it may
# pick up system's curl, which can give link errors, e.g.:
# undefined reference to `SSL_CTX_use_certificate_chain_file
curl = self.spec['curl']
curl_libs = curl.libs
LIBS.append(curl_libs.link_flags)
LDFLAGS.append(curl_libs.search_flags)
# TODO: figure out how to get correct flags via headers.cpp_flags
CPPFLAGS.append('-I' + curl.prefix.include)
if self.spec.satisfies('@4.4:'):
if '+mpi' in self.spec:
config_args.append('--enable-parallel4')
else:
config_args.append('--disable-parallel4')
# Starting version 4.1.3, --with-hdf5= and other such configure options
# are removed. Variables CPPFLAGS, LDFLAGS, and LD_LIBRARY_PATH must be
# used instead.
hdf5_hl = self.spec['hdf5:hl']
CPPFLAGS.append(hdf5_hl.headers.cpp_flags)
LDFLAGS.append(hdf5_hl.libs.search_flags)
if '+parallel-netcdf' in self.spec:
config_args.append('--enable-pnetcdf')
pnetcdf = self.spec['parallel-netcdf']
CPPFLAGS.append(pnetcdf.headers.cpp_flags)
# TODO: change to pnetcdf.libs.search_flags once 'parallel-netcdf'
# package gets custom implementation of 'libs'
LDFLAGS.append('-L' + pnetcdf.prefix.lib)
else:
config_args.append('--disable-pnetcdf')
if '+mpi' in self.spec or '+parallel-netcdf' in self.spec:
config_args.append('CC=%s' % self.spec['mpi'].mpicc)
config_args += self.enable_or_disable('hdf4')
if '+hdf4' in self.spec:
hdf4 = self.spec['hdf']
CPPFLAGS.append(hdf4.headers.cpp_flags)
# TODO: change to hdf4.libs.search_flags once 'hdf'
# package gets custom implementation of 'libs' property.
LDFLAGS.append('-L' + hdf4.prefix.lib)
# TODO: change to self.spec['jpeg'].libs.link_flags once the
# implementations of 'jpeg' virtual package get 'jpeg_libs'
# property.
LIBS.append('-ljpeg')
if '+szip' in hdf4:
# This should also come from hdf4.libs
LIBS.append('-lsz')
# Fortran support
# In version 4.2+, NetCDF-C and NetCDF-Fortran have split.
# Use the netcdf-fortran package to install Fortran support.
config_args.append('CFLAGS=' + ' '.join(CFLAGS))
config_args.append('CPPFLAGS=' + ' '.join(CPPFLAGS))
config_args.append('LDFLAGS=' + ' '.join(LDFLAGS))
config_args.append('LIBS=' + ' '.join(LIBS))
return config_args
def check(self):
# h5_test fails when run in parallel
make('check', parallel=False)
|
lgarren/spack
|
var/spack/repos/builtin/packages/netcdf/package.py
|
Python
|
lgpl-2.1
| 10,483
|
[
"NetCDF"
] |
c50fcd9834a6c0704dc1ca7faeb6ade49b8a4b7354e5bc4df4304f9cbb2c43a0
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from scipy import exp
from scipy import __version__
from scipy.constants import pi, Avogadro
if int(__version__.split(".")[1]) < 10:
from scipy.constants import Bolzmann as Boltzmann
else:
from scipy.constants import Boltzmann
from lib.meos import MEoS
from lib import unidades
class Methanol(MEoS):
"""Multiparameter equation of state for methanol"""
name = "methanol"
CASNumber = "67-56-1"
formula = "CH3OH"
synonym = ""
rhoc = unidades.Density(275.5626)
Tc = unidades.Temperature(512.6)
Pc = unidades.Pressure(8103.5, "kPa")
M = 32.04216 # g/mol
Tt = unidades.Temperature(175.61)
Tb = unidades.Temperature(337.632)
f_acent = 0.5625
momentoDipolar = unidades.DipoleMoment(1.7, "Debye")
id = 117
CP1 = {"ao": 3.9007912,
"an": [], "pow": [],
"ao_exp": [0.10992677e2, 0.18336830e2, -0.16366004e2, -0.62332348e1,
0.28035363e1, 0.10778099e1, 0.96965697],
"exp": [2115.01542, 1676.18569, 1935.16717, 1504.97016, 4222.83691,
5296.17127, 273.36934],
"ao_hyp": [], "hyp": []}
CP2 = {"ao": 0.964220/8.3143*32.,
"an": [0.532325e-4/8.3143*32., 0.672819e-5/8.3143*32.,
-0.768411e-8/8.3143*32., 0.275220e-11/8.3143*32.],
"pow": [1, 2, 3, 4],
"ao_exp": [], "exp": [],
"ao_hyp": [], "hyp": []}
helmholtz1 = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for methanol of de Reuck and Craven (1993)",
"__doi__": {"autor": "de Reuck, K.M. and Craven, R.J.B.",
"title": "Methanol, International Thermodynamic Tables of the Fluid State - 12",
"ref": "IUPAC, Blackwell Scientific Publications, London, 1993.",
"doi": ""},
"R": 8.31448,
"cp": CP1,
"ref": "NBP",
"Tref": 513.38, "rhoref": 8.78517*M,
"Tmin": Tt, "Tmax": 620.0, "Pmax": 800000.0, "rhomax": 35.57,
"Pmin": 0.18629e-3, "rhomin": 28.23,
"nr1": [-0.280062505988e1, 0.125636372418e2, -0.130310563173e2,
0.326593134060e1, -0.411425343805e1, 0.346397741254e1,
-0.836443967590e-1, -0.369240098923, 0.313180842152e-2,
0.603201474111, -0.231158593638, 0.106114844945,
-0.792228164995e-1, -0.422419150975e-4, 0.758196739214e-2,
-0.244617434701e-4, 0.115080328802e-5],
"d1": [1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 6, 7],
"t1": [0, 1, 2, 3, 1, 2, 3, 4, 6, 0, 3, 4, 0, 7, 1, 6, 7],
"nr2": [-0.125099747447e2, 0.270392835391e2, -0.212070717086e2,
0.632799472270e1, 0.143687921636e2, -0.287450766617e2,
0.185397216068e2, -0.388720372879e1, -0.416602487963e1,
0.529665875982e1, 0.509360272812, -0.330257604839e1,
-0.311045210826, 0.273460830583, 0.518916583979,
-0.227570803104e-2, 0.211658196182e-1, -0.114335123221e-1,
0.249860798459e-2],
"d2": [1, 1, 1, 1, 2, 2, 2, 2, 3, 4, 5, 5, 5, 5, 6, 9, 6, 6, 4],
"t2": [1, 2, 3, 4, 1, 2, 3, 5, 1, 2, 1, 2, 4, 5, 2, 5, 9, 14, 19],
"c2": [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 6],
"gamma2": [1.01733510223052]*16+[1.03497071023039]*2+[1.05291203329783],
"nr3": [-0.819291988442e1, 0.478601004557, -0.444161392885,
0.179621810410, -0.687602278259, 0.240459848295e1,
-0.688463987466e1, 0.113992982501e1],
"d3": [1, 1, 1, 1, 1, 3, 3, 3],
"t3": [0]*8,
"alfa3": [4.06934040892209, 8.20892015621185, 9.15601592007471,
83.8326275286616, 16.2773616356884, 27.705105527215,
16.2773616356884, 264.95250181898],
"beta3": [-3.8940745646517, -3.8940745646517, -3.8940745646517,
-3.8940745646517, -3.8940745646517, -23.0649031906293,
-23.0649031906293, -23.0649031906293],
"gamma3": [1.54080254509371, 1.54080254509371, 1.54080254509371,
1.54080254509371, 1.54080254509371, 1.08389789427588,
1.08389789427588, 1.08389789427588],
"epsilon3": [0]*8,
"exp1": [2, 3, 2, 4, 2, 3, 2, 4],
"exp2": [1]*8}
helmholtz2 = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for methanol of Sun and Ely (2004)",
"__doi__": {"autor": "Sun, L. and Ely, J.F.",
"title": "Universal equation of state for engineering application: Algorithm and application to non-polar and polar fluids",
"ref": "Fluid Phase Equilib., 222-223:107-118, 2004.",
"doi": "10.1016/j.fluid.2004.06.028"},
"R": 8.3143,
"cp": CP1,
"ref": "NBP",
"Tmin": Tt, "Tmax": 620.0, "Pmax": 800000.0, "rhomax": 40.,
"Pmin": 0.1, "rhomin": 40.,
"nr1": [-2.4578394, 1.39060027, 8.56114069e-1, -4.20843418e-2,
3.63682442e-5, 7.05598662e-1],
"d1": [1, 1, 1, 3, 7, 2],
"t1": [1.5, 0.25, 1.25, 0.25, 0.875, 1.375],
"nr2": [3.70573369e-1, 2.46303468, 1.50253790, 7.47553687e-2,
-3.06417876e-1, -7.48402758e-1, -1.01432849e-1, 8.06830693e-2],
"d2": [1, 1, 2, 5, 1, 1, 4, 2],
"t2": [0, 2.375, 2., 2.125, 3.5, 6.5, 4.75, 12.5],
"c2": [1, 1, 1, 1, 2, 2, 2, 3],
"gamma2": [1]*8}
helmholtz3 = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for methanol of Polt et al. (1992)",
"__doi__": {"autor": "Polt, A., Platzer, B., and Maurer, G.",
"title": "Parameter der thermischen Zustandsgleichung von Bender fuer 14 mehratomige reine Stoffe",
"ref": "Chem. Technik 22(1992)6 , 216/224",
"doi": ""},
"R": 8.3143,
"cp": CP2,
"ref": "NBP",
"Tmin": 298., "Tmax": 703.0, "Pmax": 63000.0, "rhomax": 26.0625,
"Pmin": 16.803, "rhomin": 24.576,
"nr1": [-0.412043979985e1, 0.541210456547e1, -0.974639417666,
-0.909437999343, -0.143467597275, 0.557052459597e1,
-0.697445416557e1, 0.860535902136, 0.244117735035e1,
-0.449073510921e1, 0.223855290012e1, -0.71733653794, 0.876135006507,
0.151777405466, -0.233178058896, 0.140022534721e-1],
"d1": [0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5],
"t1": [3, 4, 5, 0, 1, 2, 3, 4, 0, 1, 2, 0, 1, 0, 1, 1],
"nr2": [0.412043979985e1, -0.541210456547e1, 0.974639417666,
-0.4642672133, 0.944015617353, -0.449348200461],
"d2": [0, 0, 0, 2, 2, 2],
"t2": [3, 4, 5, 3, 4, 5],
"c2": [2]*6,
"gamma2": [0.591872]*6}
eq = helmholtz1, helmholtz2, helmholtz3
_surface = {"sigma": [0.22421, -0.21408, 0.083233],
"exp": [1.3355, 1.677, 4.4402]}
_melting = {"eq": 1, "Tref": Tt, "Pref": 0.187e-3,
"Tmin": Tt, "Tmax": 620.,
"a1": [1], "exp1": [0],
"a2": [5.320770e9, 4.524780e9, 3.888861e10], "exp2": [1, 1.5, 4],
"a3": [], "exp3": []}
_vapor_Pressure = {
"eq": 5,
"ao": [-0.87414e1, 0.15035e1, -0.28720e1, -0.51345],
"exp": [1., 1.5, 2.5, 5.]}
_liquid_Density = {
"eq": 1,
"ao": [0.60230e-1, 0.18855e2, -0.27626e2, 0.11213e2, 0.69039],
"exp": [0.1, 0.65, 0.79, 0.95, 4.4]}
_vapor_Density = {
"eq": 3,
"ao": [-0.81104, -0.55661e1, -0.79326e3, 0.19234e4, -0.29219e4,
0.29660e4, -0.13210e4],
"exp": [0.25, 0.6, 3.5, 4.0, 5.0, 6.0, 7.0]}
visco0 = {"eq": 0,
"method": "_visco0",
"__name__": "Xiang (2006)",
"__doi__": {"autor": "Xiang, H.W., Huber, M.L. and Laesecke, A.",
"title": "A New Reference Correlation for the Viscosity of Methanol",
"ref": "J. Phys. Chem. Ref. Data 35, 1597 (2006)",
"doi": "10.1063/1.2360605"},
"__test__":
# Table 5, Pag 15
"""
>>> st=Methanol(T=175.63, x=0.5)
>>> print "%0.2f %0.3g %0.3g %0.4g %0.5g %0.4g" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Liquido.mu.muPas, st.Gas.rho, st.Gas.mu.muPas)
175.63 1.86e-7 4.09e-6 0.005822 904.56 12.80
>>> st=Methanol(T=200, x=0.5)
>>> print "%0.0f %0.4g %0.3g %0.4g %0.5g %0.4g" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Liquido.mu.muPas, st.Gas.rho, st.Gas.mu.muPas)
200 6.098e-6 1.1754e-4 0.006563 880.28 4.506
>>> st=Methanol(T=250, x=0.5)
>>> print "%0.0f %0.4g %0.3g %0.4g %0.5g %0.4g" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Liquido.mu.muPas, st.Gas.rho, st.Gas.mu.muPas)
250 0.0008103 0.012577 0.008112 831.52 1.236
>>> st=Methanol(T=300, x=0.5)
>>> print "%0.0f %0.5g %0.3g %0.4g %0.5g %0.4g" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Liquido.mu.muPas, st.Gas.rho, st.Gas.mu.muPas)
300 0.018682 0.24623 0.009678 784.51 0.5291
>>> st=Methanol(T=350, x=0.5)
>>> print "%0.0f %0.5g %0.3g %0.4g %0.5g %0.4g" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Liquido.mu.muPas, st.Gas.rho, st.Gas.mu.muPas)
350 0.16172 1.9053 0.01118 735.84 0.2838
>>> st=Methanol(T=400, x=0.5)
>>> print "%0.0f %0.5g %0.3g %0.4g %0.5g %0.4g" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Liquido.mu.muPas, st.Gas.rho, st.Gas.mu.muPas)
400 0.77374 8.7343 0.01251 678.59 0.1714
>>> st=Methanol(T=450, x=0.5)
>>> print "%0.0f %0.5g %0.3g %0.4g %0.5g %0.4g" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Liquido.mu.muPas, st.Gas.rho, st.Gas.mu.muPas)
450 2.5433 30.831 0.01388 600.49 0.1058
>>> st=Methanol(T=500, x=0.5)
>>> print "%0.0f %0.5g %0.3g %0.4g %0.5g %0.4g" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Liquido.mu.muPas, st.Gas.rho, st.Gas.mu.muPas)
500 6.5250 109.88 0.01891 451.53 0.05748
>>> st=Methanol(T=512, x=0.5)
>>> print "%0.0f %0.5g %0.3g %0.4g %0.5g %0.4g" % (\
st.T, st.P.MPa, st.Liquido.rho, st.Liquido.mu.muPas, st.Gas.rho, st.Gas.mu.muPas)
512 8.0195 202.99 0.02838 341.17 0.04174
"""
# Table 6, Pag 16
"""
>>> st=Methanol(T=180, P=1e4)
>>> print "%0.2f %0.5g %0.4g" % (st.P.MPa, st.rho, st.mu.muPas)
0.01 900.27 10.44
>>> st=Methanol(T=200, P=1e5)
>>> print "%0.2f %0.5g %0.4g" % (st.P.MPa, st.rho, st.mu.muPas)
0.10 880.34 4.510
>>> st=Methanol(T=220, P=4e5)
>>> print "%0.2f %0.5g %0.4g" % (st.P.MPa, st.rho, st.mu.muPas)
0.40 860.76 2.460
>>> st=Methanol(T=280, P=1e6)
>>> print "%0.2f %0.5g %0.4g" % (st.P.MPa, st.rho, st.mu.muPas)
1.00 804.12 0.7228
>>> st=Methanol(T=300, P=1e4)
>>> print "%0.2f %0.5g %0.4g" % (st.P.MPa, st.rho, st.mu.muPas)
0.01 0.12955 0.009696
>>> st=Methanol(T=400, P=1e8)
>>> print "%0.2f %0.5g %0.4g" % (st.P.MPa, st.rho, st.mu.muPas)
100 784.82 0.2846
>>> st=Methanol(T=500, P=8e8)
>>> print "%0.2f %0.5g %0.4g" % (st.P.MPa, st.rho, st.mu.muPas)
800 954.52 0.3908
>>> st=Methanol(T=600, P=3e6)
>>> print "%0.2f %0.5g %0.4g" % (st.P.MPa, st.rho, st.mu.muPas)
3.00 20.717 0.01971
"""}
def _visco0(self, rho, T, fase):
# FIXME: No sale
rhoc = 273.
ek = 577.87
sigma0 = 0.3408e-9
delta = 0.4575
sigmac = 0.7193422e-9
a = [1.16145, -0.14874, 0.52487, -0.77320, 2.16178, -2.43787,
0.95976e-3, 0.10225, -0.97346, 0.10657, -0.34528, -0.44557, -2.58055]
b = [-19.572881, 219.73999, -1015.3226, 2471.0125, -3375.1717,
2491.6597, -787.26086, 14.085455, -0.34664158]
d = [-1.181909, 0.5031030, -0.6268461, 0.5169312, -0.2351349,
5.3980235e-2, -4.9069617e-3]
e = [0, 4.018368, -4.239180, 2.245110, -0.5750698, 2.3021026e-2,
2.5696775e-2, -6.8372749e-3, 7.2707189e-4, -2.9255711e-5]
T_ = self.T/ek
OmegaLJ = a[0]*T_**a[1]+a[2]*exp(a[3]*T_)+a[4]*exp(a[5]*T_)
OmegaD = a[7]*T_**a[8]+a[9]*exp(a[10]*T_)+a[11]*exp(a[12]*T_)
OmegaSM = OmegaLJ*(1+delta**2*OmegaD/(1+a[6]*delta**6))
no = 5.*(self.M/Avogadro*Boltzmann*self.T/pi)**0.5/(16*sigma0**2*OmegaSM)
B = (sum([b[i]/T_**(0.25*i) for i in range(7)])+b[7]/T_**2.5+b[8]/T_**5.5)*Avogadro*sigma0**3
C = 1.86222085e-3*T_**3*exp(9.990338/T_**0.5)*(Avogadro*sigma0**3)**2
ng = 1+B*rho/self.M*1000+C*(rho/self.M*1000)**2
Tr = self.T/self.Tc
rhor = rho/rhoc
sigmaHS = sigmac*(sum([d[i]/Tr**i for i in range(7)])+sum([e[i]*rhor**(i) for i in range(1, 10)]))
b = 2*pi*Avogadro*sigmaHS**3/3
Xi = b*rho/self.M*1000/4
g = (1-0.5*Xi)/(1-Xi)**3
ne = 1./g+0.8*b*rho/self.M*1000+0.761*g*sigmaHS*b**2*(rho/self.M*1000)**2
f = 1/(1+exp(5*(rhor-1)))
n = no * (f*ng + (1-f)*ne)
return unidades.Viscosity(n)
_viscosity = visco0,
thermo0 = {"eq": 1,
"__name__": "Perkins (2002)",
"__doi__": {"autor": "",
"title": "",
"ref": "unpublished preliminary correlation, NIST, MLH, Aug. 2006",
"doi": ""},
"Tref": 1., "kref": 1,
"no": [5.7992e-7],
"co": [1.7862],
"Trefb": 513.38, "rhorefb": 8.78517, "krefb": 1.,
"nb": [0.405435, -0.293791, -0.289002, 0.226890, 0.579019e-1,
-0.399576e-1],
"tb": [0, 1]*3,
"db": [1, 1, 2, 2, 3, 3],
"cb": [0]*6,
"critical": 3,
"gnu": 0.63, "gamma": 1.239, "R0": 1.03,
"Xio": 0.194e-9, "gam0": 0.0496, "qd": 0.342e-9, "Tcref": 768.9}
_thermal = thermo0,
|
edusegzy/pychemqt
|
lib/mEoS/Methanol.py
|
Python
|
gpl-3.0
| 14,895
|
[
"Avogadro"
] |
5eb19844ec4ee06e4d757b04dee2515df86ab228172b3a89efcc24be0c240fbb
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# jobschedule - shared scheduling functions
# Copyright (C) 2003-2010 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Job scheduling helper functions"""
# TODO: move shared server.scheduler functions here
|
heromod/migrid
|
mig/shared/jobschedule.py
|
Python
|
gpl-2.0
| 1,013
|
[
"Brian"
] |
01adec3fe4dd4a35ebe2046d80a097be6c1def4f437c58a0e956806d5f3de041
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
import math
import re
import os
import textwrap
import warnings
from collections import OrderedDict, deque
import six
from six.moves import zip, cStringIO
import numpy as np
from functools import partial
from inspect import getargspec
from itertools import groupby
from pymatgen.core.periodic_table import Element, Specie, get_el_sp
from monty.io import zopen
from pymatgen.util.coord_utils import in_coord_list_pbc, pbc_diff
from monty.string import remove_non_ascii
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.core.composition import Composition
from pymatgen.core.operations import SymmOp
from pymatgen.symmetry.groups import SpaceGroup, SYMM_DATA
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
"""
Wrapper classes for Cif input and output from Structures.
"""
__author__ = "Shyue Ping Ong, Will Richards"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "3.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
sub_spgrp = partial(re.sub, r"[\s_]", "")
space_groups = {sub_spgrp(k): k
for k in SYMM_DATA['space_group_encoding'].keys()}
space_groups.update({sub_spgrp(k): k
for k in SYMM_DATA['space_group_encoding'].keys()})
_COD_DATA = None
def _get_cod_data():
global _COD_DATA
if _COD_DATA is None:
import pymatgen
with open(os.path.join(pymatgen.symmetry.__path__[0],
"symm_ops.json")) \
as f:
import json
_COD_DATA = json.load(f)
return _COD_DATA
class CifBlock(object):
maxlen = 70 # not quite 80 so we can deal with semicolons and things
def __init__(self, data, loops, header):
"""
Object for storing cif data. All data is stored in a single dictionary.
Data inside loops are stored in lists in the data dictionary, and
information on which keys are grouped together are stored in the loops
attribute.
Args:
data: dict or OrderedDict of data to go into the cif. Values should
be convertible to string, or lists of these if the key is
in a loop
loops: list of lists of keys, grouped by which loop they should
appear in
header: name of the block (appears after the data_ on the first
line)
"""
self.loops = loops
self.data = data
# AJ says: CIF Block names cannot be more than 75 characters or you
# get an Exception
self.header = header[:74]
def __eq__(self, other):
return self.loops == other.loops \
and self.data == other.data \
and self.header == other.header
def __getitem__(self, key):
return self.data[key]
def __str__(self):
"""
Returns the cif string for the data block
"""
s = ["data_{}".format(self.header)]
keys = self.data.keys()
written = []
for k in keys:
if k in written:
continue
for l in self.loops:
# search for a corresponding loop
if k in l:
s.append(self._loop_to_string(l))
written.extend(l)
break
if k not in written:
# k didn't belong to a loop
v = self._format_field(self.data[k])
if len(k) + len(v) + 3 < self.maxlen:
s.append("{} {}".format(k, v))
else:
s.extend([k, v])
return "\n".join(s)
def _loop_to_string(self, loop):
s = "loop_"
for l in loop:
s += '\n ' + l
for fields in zip(*[self.data[k] for k in loop]):
line = "\n"
for val in map(self._format_field, fields):
if val[0] == ";":
s += line + "\n" + val
line = "\n"
elif len(line) + len(val) + 2 < self.maxlen:
line += " " + val
else:
s += line
line = '\n ' + val
s += line
return s
def _format_field(self, v):
v = v.__str__().strip()
if len(v) > self.maxlen:
return ';\n' + textwrap.fill(v, self.maxlen) + '\n;'
# add quotes if necessary
if v == '':
return '""'
if (" " in v or v[0] == "_") \
and not (v[0] == "'" and v[-1] == "'") \
and not (v[0] == '"' and v[-1] == '"'):
if "'" in v:
q = '"'
else:
q = "'"
v = q + v + q
return v
@classmethod
def _process_string(cls, string):
# remove comments
string = re.sub("(\s|^)#.*$", "", string, flags=re.MULTILINE)
# remove empty lines
string = re.sub("^\s*\n", "", string, flags=re.MULTILINE)
# remove non_ascii
string = remove_non_ascii(string)
# since line breaks in .cif files are mostly meaningless,
# break up into a stream of tokens to parse, rejoining multiline
# strings (between semicolons)
q = deque()
multiline = False
ml = []
# this regex splits on spaces, except when in quotes.
# starting quotes must not be preceded by non-whitespace
# (these get eaten by the first expression)
# ending quotes must not be followed by non-whitespace
p = re.compile(r'''([^'"\s][\S]*)|'(.*?)'(?!\S)|"(.*?)"(?!\S)''')
for l in string.splitlines():
if multiline:
if l.startswith(";"):
multiline = False
q.append(('', '', '', ' '.join(ml)))
ml = []
l = l[1:].strip()
else:
ml.append(l)
continue
if l.startswith(";"):
multiline = True
ml.append(l[1:].strip())
else:
for s in p.findall(l):
# s is tuple. location of the data in the tuple
# depends on whether it was quoted in the input
q.append(s)
return q
@classmethod
def from_string(cls, string):
q = cls._process_string(string)
header = q.popleft()[0][5:]
data = OrderedDict()
loops = []
while q:
s = q.popleft()
# cif keys aren't in quotes, so show up in s[0]
if s[0] == "_eof":
break
if s[0].startswith("_"):
data[s[0]] = "".join(q.popleft())
elif s[0].startswith("loop_"):
columns = []
items = []
while q:
s = q[0]
if s[0].startswith("loop_") or not s[0].startswith("_"):
break
columns.append("".join(q.popleft()))
data[columns[-1]] = []
while q:
s = q[0]
if s[0].startswith("loop_") or s[0].startswith("_"):
break
items.append("".join(q.popleft()))
n = len(items) // len(columns)
assert len(items) % n == 0
loops.append(columns)
for k, v in zip(columns * n, items):
data[k].append(v.strip())
elif "".join(s).strip() != "":
warnings.warn("Possible error in cif format"
" error at {}".format("".join(s).strip()))
return cls(data, loops, header)
class CifFile(object):
"""
Reads and parses CifBlocks from a .cif file
"""
def __init__(self, data, orig_string=None, comment=None):
"""
Args:
data (OrderedDict): Of CifBlock objects.å
orig_string (str): The original cif string.
comment (str): Comment string.
"""
self.data = data
self.orig_string = orig_string
self.comment = comment or "# generated using pymatgen"
def __str__(self):
s = ["%s" % v for v in self.data.values()]
return self.comment + "\n" + "\n".join(s) + "\n"
@classmethod
def from_string(cls, string):
d = OrderedDict()
for x in re.split("^\s*data_", "x\n" + string,
flags=re.MULTILINE | re.DOTALL)[1:]:
# Skip over Cif block that contains powder diffraction data.
# Some elements in this block were missing from CIF files in Springer materials/Pauling file DBs.
# This block anyway does not contain any structure information, and CifParser was also not parsing it.
if 'powder_pattern' in re.split("\n", x, 1)[0]:
continue
c = CifBlock.from_string("data_" + x)
d[c.header] = c
return cls(d, string)
@classmethod
def from_file(cls, filename):
with zopen(filename, "rt") as f:
return cls.from_string(f.read())
class CifParser(object):
"""
Parses a cif file
Args:
filename (str): Cif filename. bzipped or gzipped cifs are fine too.
occupancy_tolerance (float): If total occupancy of a site is between 1
and occupancy_tolerance, the occupancies will be scaled down to 1.
site_tolerance (float): This tolerance is used to determine if two
sites are sitting in the same position, in which case they will be
combined to a single disordered site. Defaults to 1e-4.
"""
def __init__(self, filename, occupancy_tolerance=1., site_tolerance=1e-4):
self._occupancy_tolerance = occupancy_tolerance
self._site_tolerance = site_tolerance
if isinstance(filename, six.string_types):
self._cif = CifFile.from_file(filename)
else:
self._cif = CifFile.from_string(filename.read())
@staticmethod
def from_string(cif_string, occupancy_tolerance=1.):
"""
Creates a CifParser from a string.
Args:
cif_string (str): String representation of a CIF.
occupancy_tolerance (float): If total occupancy of a site is
between 1 and occupancy_tolerance, the occupancies will be
scaled down to 1.
Returns:
CifParser
"""
stream = cStringIO(cif_string)
return CifParser(stream, occupancy_tolerance)
def _unique_coords(self, coords_in):
"""
Generate unique coordinates using coord and symmetry positions.
"""
coords = []
for tmp_coord in coords_in:
for op in self.symmetry_operations:
coord = op.operate(tmp_coord)
coord = np.array([i - math.floor(i) for i in coord])
if not in_coord_list_pbc(coords, coord,
atol=self._site_tolerance):
coords.append(coord)
return coords
def get_lattice(self, data, length_strings=("a", "b", "c"),
angle_strings=("alpha", "beta", "gamma"),
lattice_type=None):
"""
Generate the lattice from the provided lattice parameters. In
the absence of all six lattice parameters, the crystal system
and necessary parameters are parsed
"""
try:
lengths = [str2float(data["_cell_length_" + i])
for i in length_strings]
angles = [str2float(data["_cell_angle_" + i])
for i in angle_strings]
if not lattice_type:
return Lattice.from_lengths_and_angles(lengths, angles)
else:
return getattr(Lattice, lattice_type)(*(lengths + angles))
except KeyError:
# Missing Key search for cell setting
for lattice_lable in ["_symmetry_cell_setting",
"_space_group_crystal_system"]:
if data.data.get(lattice_lable):
lattice_type = data.data.get(lattice_lable).lower()
try:
required_args = getargspec(
getattr(Lattice, lattice_type)).args
lengths = (l for l in length_strings
if l in required_args)
angles = (a for a in angle_strings
if a in required_args)
return self.get_lattice(data, lengths, angles,
lattice_type=lattice_type)
except AttributeError as exc:
warnings.warn(exc)
else:
return None
def get_symops(self, data):
"""
In order to generate symmetry equivalent positions, the symmetry
operations are parsed. If the symops are not present, the space
group symbol is parsed, and symops are generated.
"""
symops = []
for symmetry_label in ["_symmetry_equiv_pos_as_xyz",
"_symmetry_equiv_pos_as_xyz_",
"_space_group_symop_operation_xyz",
"_space_group_symop_operation_xyz_"]:
if data.data.get(symmetry_label):
xyz = data.data.get(symmetry_label)
if isinstance(xyz, six.string_types):
warnings.warn("A 1-line symmetry op P1 CIF is detected!")
xyz = [xyz]
try:
symops = [SymmOp.from_xyz_string(s)
for s in xyz]
break
except ValueError:
continue
if not symops:
# Try to parse symbol
for symmetry_label in ["_symmetry_space_group_name_H-M",
"_symmetry_space_group_name_H_M",
"_symmetry_space_group_name_H-M_",
"_symmetry_space_group_name_H_M_",
"_space_group_name_Hall",
"_space_group_name_Hall_",
"_space_group_name_H-M_alt",
"_space_group_name_H-M_alt_",
"_symmetry_space_group_name_hall",
"_symmetry_space_group_name_hall_",
"_symmetry_space_group_name_h-m",
"_symmetry_space_group_name_h-m_"]:
sg = data.data.get(symmetry_label)
if sg:
sg = sub_spgrp(sg)
try:
spg = space_groups.get(sg)
if spg:
symops = SpaceGroup(spg).symmetry_ops
warnings.warn(
"No _symmetry_equiv_pos_as_xyz type key found. "
"Spacegroup from %s used." % symmetry_label)
break
except ValueError:
# Ignore any errors
pass
try:
for d in _get_cod_data():
if sg == re.sub("\s+", "",
d["hermann_mauguin"]) :
xyz = d["symops"]
symops = [SymmOp.from_xyz_string(s)
for s in xyz]
warnings.warn(
"No _symmetry_equiv_pos_as_xyz type key found. "
"Spacegroup from %s used." % symmetry_label)
break
except Exception as ex:
continue
if symops:
break
if not symops:
# Try to parse International number
for symmetry_label in ["_space_group_IT_number",
"_space_group_IT_number_",
"_symmetry_Int_Tables_number",
"_symmetry_Int_Tables_number_"]:
if data.data.get(symmetry_label):
try:
i = int(str2float(data.data.get(symmetry_label)))
symops = SpaceGroup.from_int_number(i).symmetry_ops
break
except ValueError:
continue
if not symops:
warnings.warn("No _symmetry_equiv_pos_as_xyz type key found. "
"Defaulting to P1.")
symops = [SymmOp.from_xyz_string(s) for s in ['x', 'y', 'z']]
return symops
def parse_oxi_states(self, data):
"""
Parse oxidation states from data dictionary
"""
try:
oxi_states = {
data["_atom_type_symbol"][i]:
str2float(data["_atom_type_oxidation_number"][i])
for i in range(len(data["_atom_type_symbol"]))}
# attempt to strip oxidation state from _atom_type_symbol
# in case the label does not contain an oxidation state
for i, symbol in enumerate(data["_atom_type_symbol"]):
oxi_states[re.sub(r"\d?[\+,\-]?$", "", symbol)] = \
str2float(data["_atom_type_oxidation_number"][i])
except (ValueError, KeyError):
oxi_states = None
return oxi_states
def _get_structure(self, data, primitive, substitution_dictionary=None):
"""
Generate structure from part of the cif.
"""
# Symbols often representing
# common representations for elements/water in cif files
special_symbols = {"D": "D", "Hw": "H", "Ow": "O", "Wat": "O",
"wat": "O"}
elements = [el.symbol for el in Element]
lattice = self.get_lattice(data)
self.symmetry_operations = self.get_symops(data)
oxi_states = self.parse_oxi_states(data)
coord_to_species = OrderedDict()
def parse_symbol(sym):
if substitution_dictionary:
return substitution_dictionary.get(sym)
elif sym in ['OH', 'OH2']:
warnings.warn("Symbol '{}' not recognized".format(sym))
return ""
else:
m = re.findall(r"w?[A-Z][a-z]*", sym)
if m and m != "?":
return m[0]
return ""
def get_matching_coord(coord):
for op in self.symmetry_operations:
c = op.operate(coord)
for k in coord_to_species.keys():
if np.allclose(pbc_diff(c, k), (0, 0, 0),
atol=self._site_tolerance):
return tuple(k)
return False
############################################################
"""
This part of the code deals with handling formats of data as found in
CIF files extracted from the Springer Materials/Pauling File
databases, and that are different from standard ICSD formats.
"""
# Check to see if "_atom_site_type_symbol" exists, as some test CIFs do
# not contain this key.
if "_atom_site_type_symbol" in data.data.keys():
# Keep a track of which data row needs to be removed.
# Example of a row: Nb,Zr '0.8Nb + 0.2Zr' .2a .m-3m 0 0 0 1 14
# 'rhombic dodecahedron, Nb<sub>14</sub>'
# Without this code, the above row in a structure would be parsed
# as an ordered site with only Nb (since
# CifParser would try to parse the first two characters of the
# label "Nb,Zr") and occupancy=1.
# However, this site is meant to be a disordered site with 0.8 of
# Nb and 0.2 of Zr.
idxs_to_remove = []
for idx, el_row in enumerate(data["_atom_site_label"]):
# CIF files from the Springer Materials/Pauling File have
# switched the label and symbol. Thus, in the
# above shown example row, '0.8Nb + 0.2Zr' is the symbol.
# Below, we split the strings on ' + ' to
# check if the length (or number of elements) in the label and
# symbol are equal.
if len(data["_atom_site_type_symbol"][idx].split(' + ')) > \
len(data["_atom_site_label"][idx].split(' + ')):
# Dictionary to hold extracted elements and occupancies
els_occu = {}
# parse symbol to get element names and occupancy and store
# in "els_occu"
symbol_str = data["_atom_site_type_symbol"][idx]
symbol_str_lst = symbol_str.split(' + ')
for elocc_idx in range(len(symbol_str_lst)):
# Remove any bracketed items in the string
symbol_str_lst[elocc_idx] = re.sub(
'\([0-9]*\)', '', symbol_str_lst[elocc_idx].strip())
# Extract element name and its occupancy from the
# string, and store it as a
# key-value pair in "els_occ".
els_occu[str(re.findall('\D+', symbol_str_lst[
elocc_idx].strip())[1]).replace('<sup>', '')] = \
float('0' + re.findall('\.?\d+', symbol_str_lst[
elocc_idx].strip())[1])
x = str2float(data["_atom_site_fract_x"][idx])
y = str2float(data["_atom_site_fract_y"][idx])
z = str2float(data["_atom_site_fract_z"][idx])
coord = (x, y, z)
# Add each partially occupied element on the site coordinate
for et in els_occu:
match = get_matching_coord(coord)
if not match:
coord_to_species[coord] = Composition(
{parse_symbol(et): els_occu[parse_symbol(et)]})
else:
coord_to_species[match] += {
parse_symbol(et): els_occu[parse_symbol(et)]}
idxs_to_remove.append(idx)
# Remove the original row by iterating over all keys in the CIF
# data looking for lists, which indicates
# multiple data items, one for each row, and remove items from the
# list that corresponds to the removed row,
# so that it's not processed by the rest of this function (which
# would result in an error).
for cif_key in data.data:
if type(data.data[cif_key]) == list:
for id in sorted(idxs_to_remove, reverse=True):
del data.data[cif_key][id]
############################################################
for i in range(len(data["_atom_site_label"])):
symbol = parse_symbol(data["_atom_site_label"][i])
if symbol:
if symbol not in elements and symbol not in special_symbols:
symbol = symbol[:2]
else:
continue
# make sure symbol was properly parsed from _atom_site_label
# otherwise get it from _atom_site_type_symbol
try:
if symbol in special_symbols:
get_el_sp(special_symbols.get(symbol))
else:
Element(symbol)
except (KeyError, ValueError):
# sometimes the site doesn't have the type_symbol.
# we then hope the type_symbol can be parsed from the label
if "_atom_site_type_symbol" in data.data.keys():
symbol = data["_atom_site_type_symbol"][i]
if oxi_states is not None:
if symbol in special_symbols:
el = get_el_sp(special_symbols.get(symbol) +
str(oxi_states[symbol]))
else:
el = Specie(symbol, oxi_states.get(symbol, 0))
else:
el = get_el_sp(special_symbols.get(symbol, symbol))
x = str2float(data["_atom_site_fract_x"][i])
y = str2float(data["_atom_site_fract_y"][i])
z = str2float(data["_atom_site_fract_z"][i])
try:
occu = str2float(data["_atom_site_occupancy"][i])
except (KeyError, ValueError):
occu = 1
if occu > 0:
coord = (x, y, z)
match = get_matching_coord(coord)
if not match:
coord_to_species[coord] = Composition({el: occu})
else:
coord_to_species[match] += {el: occu}
if any([sum(c.values()) > 1 for c in coord_to_species.values()]):
warnings.warn("Some occupancies sum to > 1! If they are within "
"the tolerance, they will be rescaled.")
allspecies = []
allcoords = []
if coord_to_species.items():
for species, group in groupby(
sorted(list(coord_to_species.items()), key=lambda x: x[1]),
key=lambda x: x[1]):
tmp_coords = [site[0] for site in group]
coords = self._unique_coords(tmp_coords)
allcoords.extend(coords)
allspecies.extend(len(coords) * [species])
# rescale occupancies if necessary
for i, species in enumerate(allspecies):
totaloccu = sum(species.values())
if 1 < totaloccu <= self._occupancy_tolerance:
allspecies[i] = species / totaloccu
if allspecies and len(allspecies) == len(allcoords):
struct = Structure(lattice, allspecies, allcoords)
struct = struct.get_sorted_structure()
if primitive:
struct = struct.get_primitive_structure()
struct = struct.get_reduced_structure()
return struct
def get_structures(self, primitive=True):
"""
Return list of structures in CIF file. primitive boolean sets whether a
conventional cell structure or primitive cell structure is returned.
Args:
primitive (bool): Set to False to return conventional unit cells.
Defaults to True.
Returns:
List of Structures.
"""
structures = []
for d in self._cif.data.values():
try:
s = self._get_structure(d, primitive)
if s:
structures.append(s)
except (KeyError, ValueError) as exc:
# Warn the user (Errors should never pass silently)
# A user reported a problem with cif files produced by Avogadro
# in which the atomic coordinates are in Cartesian coords.
warnings.warn(str(exc))
if len(structures) == 0:
raise ValueError("Invalid cif file with no structures!")
return structures
def as_dict(self):
d = OrderedDict()
for k, v in self._cif.data.items():
d[k] = {}
for k2, v2 in v.data.items():
d[k][k2] = v2
return d
class CifWriter(object):
"""
A wrapper around CifFile to write CIF files from pymatgen structures.
Args:
struct (Structure): structure to write
symprec (float): If not none, finds the symmetry of the structure
and writes the cif with symmetry information. Passes symprec
to the SpacegroupAnalyzer
"""
def __init__(self, struct, symprec=None):
format_str = "{:.8f}"
block = OrderedDict()
loops = []
spacegroup = ("P 1", 1)
if symprec is not None:
sf = SpacegroupAnalyzer(struct, symprec)
spacegroup = (sf.get_space_group_symbol(),
sf.get_space_group_number())
# Needs the refined struture when using symprec. This converts
# primitive to conventional structures, the standard for CIF.
struct = sf.get_refined_structure()
latt = struct.lattice
comp = struct.composition
no_oxi_comp = comp.element_composition
block["_symmetry_space_group_name_H-M"] = spacegroup[0]
for cell_attr in ['a', 'b', 'c']:
block["_cell_length_" + cell_attr] = format_str.format(
getattr(latt, cell_attr))
for cell_attr in ['alpha', 'beta', 'gamma']:
block["_cell_angle_" + cell_attr] = format_str.format(
getattr(latt, cell_attr))
block["_symmetry_Int_Tables_number"] = spacegroup[1]
block["_chemical_formula_structural"] = no_oxi_comp.reduced_formula
block["_chemical_formula_sum"] = no_oxi_comp.formula
block["_cell_volume"] = latt.volume.__str__()
reduced_comp, fu = no_oxi_comp.get_reduced_composition_and_factor()
block["_cell_formula_units_Z"] = str(int(fu))
if symprec is None:
block["_symmetry_equiv_pos_site_id"] = ["1"]
block["_symmetry_equiv_pos_as_xyz"] = ["x, y, z"]
else:
sf = SpacegroupAnalyzer(struct, symprec)
symmops = []
for op in sf.get_symmetry_operations():
v = op.translation_vector
symmops.append(SymmOp.from_rotation_and_translation(
op.rotation_matrix, v))
ops = [op.as_xyz_string() for op in symmops]
block["_symmetry_equiv_pos_site_id"] = \
["%d" % i for i in range(1, len(ops) + 1)]
block["_symmetry_equiv_pos_as_xyz"] = ops
loops.append(["_symmetry_equiv_pos_site_id",
"_symmetry_equiv_pos_as_xyz"])
contains_oxidation = True
try:
symbol_to_oxinum = OrderedDict([
(el.__str__(),
float(el.oxi_state))
for el in sorted(comp.elements)])
except AttributeError:
symbol_to_oxinum = OrderedDict([(el.symbol, 0) for el in
sorted(comp.elements)])
contains_oxidation = False
if contains_oxidation:
block["_atom_type_symbol"] = symbol_to_oxinum.keys()
block["_atom_type_oxidation_number"] = symbol_to_oxinum.values()
loops.append(["_atom_type_symbol", "_atom_type_oxidation_number"])
atom_site_type_symbol = []
atom_site_symmetry_multiplicity = []
atom_site_fract_x = []
atom_site_fract_y = []
atom_site_fract_z = []
atom_site_label = []
atom_site_occupancy = []
count = 1
if symprec is None:
for site in struct:
for sp, occu in sorted(site.species_and_occu.items()):
atom_site_type_symbol.append(sp.__str__())
atom_site_symmetry_multiplicity.append("1")
atom_site_fract_x.append("{0:f}".format(site.a))
atom_site_fract_y.append("{0:f}".format(site.b))
atom_site_fract_z.append("{0:f}".format(site.c))
atom_site_label.append("{}{}".format(sp.symbol, count))
atom_site_occupancy.append(occu.__str__())
count += 1
else:
# The following just presents a deterministic ordering.
unique_sites = [
(sorted(sites, key=lambda s: tuple([abs(x) for x in
s.frac_coords]))[0],
len(sites))
for sites in sf.get_symmetrized_structure().equivalent_sites
]
for site, mult in sorted(
unique_sites,
key=lambda t: (t[0].species_and_occu.average_electroneg,
-t[1], t[0].a, t[0].b, t[0].c)):
for sp, occu in site.species_and_occu.items():
atom_site_type_symbol.append(sp.__str__())
atom_site_symmetry_multiplicity.append("%d" % mult)
atom_site_fract_x.append("{0:f}".format(site.a))
atom_site_fract_y.append("{0:f}".format(site.b))
atom_site_fract_z.append("{0:f}".format(site.c))
atom_site_label.append("{}{}".format(sp.symbol, count))
atom_site_occupancy.append(occu.__str__())
count += 1
block["_atom_site_type_symbol"] = atom_site_type_symbol
block["_atom_site_label"] = atom_site_label
block["_atom_site_symmetry_multiplicity"] = \
atom_site_symmetry_multiplicity
block["_atom_site_fract_x"] = atom_site_fract_x
block["_atom_site_fract_y"] = atom_site_fract_y
block["_atom_site_fract_z"] = atom_site_fract_z
block["_atom_site_occupancy"] = atom_site_occupancy
loops.append(["_atom_site_type_symbol",
"_atom_site_label",
"_atom_site_symmetry_multiplicity",
"_atom_site_fract_x",
"_atom_site_fract_y",
"_atom_site_fract_z",
"_atom_site_occupancy"])
d = OrderedDict()
d[comp.reduced_formula] = CifBlock(block, loops, comp.reduced_formula)
self._cf = CifFile(d)
def __str__(self):
"""
Returns the cif as a string.
"""
return self._cf.__str__()
def write_file(self, filename):
"""
Write the cif file.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
def str2float(text):
"""
Remove uncertainty brackets from strings and return the float.
"""
try:
return float(re.sub("\(.+\)", "", text))
except TypeError:
if isinstance(text, list) and len(text) == 1:
return float(re.sub("\(.+\)", "", text[0]))
except ValueError as ex:
if text.strip() == ".":
return 0
raise ex
|
aykol/pymatgen
|
pymatgen/io/cif.py
|
Python
|
mit
| 35,271
|
[
"Avogadro",
"CRYSTAL",
"pymatgen"
] |
e5b672bf6fdd021daf9d5e8610a3530148b21bd2431c38801febf43b7e88829a
|
from jinja2.ext import Extension
import os
from pyjade import Parser, Compiler as _Compiler
from pyjade.runtime import attrs
from jinja2.debug import fake_exc_info
from pyjade.utils import process
ATTRS_FUNC = '__pyjade_attrs'
class Compiler(_Compiler):
def visitCodeBlock(self,block):
self.buffer('{%% block %s %%}'%block.name)
if block.mode=='append': self.buffer('{{super()}}')
self.visitBlock(block)
if block.mode=='prepend': self.buffer('{{super()}}')
self.buffer('{% endblock %}')
def visitMixin(self,mixin):
if mixin.block:
self.buffer('{%% macro %s(%s) %%}'%(mixin.name,mixin.args))
self.visitBlock(mixin.block)
self.buffer('{% endmacro %}')
else:
self.buffer('{{%s(%s)}}'%(mixin.name,mixin.args))
def visitAssignment(self,assignment):
self.buffer('{%% set %s = %s %%}'%(assignment.name,assignment.val))
def visitExtends(self,node):
self.buffer('{%% extends "%s" %%}'%(node.path))
def visitInclude(self,node):
self.buffer('{%% include "%s" %%}'%(node.path))
def visitCode(self,code):
if code.buffer:
val = code.val.lstrip()
self.buf.append('{{%s%s}}'%(val,'|escape' if code.escape else ''))
else:
self.buf.append('{%% %s %%}'%code.val)
if code.block:
# if not code.buffer: self.buf.append('{')
self.visit(code.block)
# if not code.buffer: self.buf.append('}')
if not code.buffer:
codeTag = code.val.strip().split(' ',1)[0]
if codeTag in self.autocloseCode:
self.buf.append('{%% end%s %%}'%codeTag)
def visitEach(self,each):
self.buf.append('{%% for %s in %s %%}'%(','.join(each.keys),each.obj))
self.visit(each.block)
self.buf.append('{% endfor %}')
def attributes(self,attrs):
return "{{%s(%s)}}"%(ATTRS_FUNC,attrs)
class PyJadeExtension(Extension):
# def exception_handler(self,pt):
# # print '******************************'
# # print pt.exc_type
# # print pt.exc_value
# # print pt.frames[0].tb
# # line = pt.frames[0].tb.tb_lineno
# # pt.frames[0].tb.tb_lineno = line+10
# # print '******************************'
# _,_,tb = fake_exc_info((pt.exc_type,pt.exc_value, pt.frames[0].tb),'asfdasfdasdf',7)
# # pt.frames = [tb]
# raise pt.exc_type, pt.exc_value, tb
def __init__(self, environment):
super(PyJadeExtension, self).__init__(environment)
environment.extend(
jade_file_extensions=('.jade',),
# jade_env=JinjaEnvironment(),
)
# environment.exception_handler = self.exception_handler
environment.globals[ATTRS_FUNC] = attrs
def preprocess(self, source, name, filename=None):
if name and not os.path.splitext(name)[1] in self.environment.jade_file_extensions:
return source
return process(source,filename=name,compiler=Compiler)
|
xlk521/cloudguantou
|
pyjade/ext/jinja.py
|
Python
|
bsd-3-clause
| 3,086
|
[
"VisIt"
] |
974ebc7963b6dd2d081430ed719de0af85d3e9b246cfd9575a52b8fe775bcae5
|
from __future__ import division
import numpy as np
from auxiliary import rotation_matrix2 as rotation_matrix
import time
def compute_S_matrix_fast(zdir, xtal):
'''
Computes the compliance and stiffness matrices S and C a given z-direction.
The x- and y-directions are determined automatically
'''
xtal = xtal.lower()
#TODO: read the elastic constants from a file
if xtal == 'ge':
c1111, c1122, c2323 = 1.292, 0.479, 0.670
elif xtal == 'si':
c1111, c1122, c2323 = 1.657, 0.639, 0.796
else:
raise ValueError('Elastic parameters for the crystal not found!')
#TODO: generalize to other systems alongside the cubic as well
Cc = np.zeros((3,3,3,3))
Cc[0,0,0,0], Cc[1,1,1,1], Cc[2,2,2,2] = c1111, c1111, c1111
Cc[0,0,1,1], Cc[0,0,2,2], Cc[1,1,0,0] = c1122, c1122, c1122
Cc[1,1,2,2], Cc[2,2,0,0], Cc[2,2,1,1] = c1122, c1122, c1122
Cc[0,2,0,2], Cc[2,0,0,2], Cc[0,2,2,0], Cc[2,0,2,0] = c2323, c2323, c2323, c2323
Cc[1,2,1,2], Cc[2,1,1,2], Cc[1,2,2,1], Cc[2,1,2,1] = c2323, c2323, c2323, c2323
Cc[0,1,0,1], Cc[1,0,0,1], Cc[0,1,1,0], Cc[1,0,1,0] = c2323, c2323, c2323, c2323
Q = rotation_matrix(zdir)
#Rotate the tensor
#New faster version according to
#http://stackoverflow.com/questions/4962606/fast-tensor-rotation-with-numpy
QQ = np.outer(Q,Q)
QQQQ = np.outer(QQ,QQ).reshape(4*Q.shape)
axes = ((0, 2, 4, 6), (0, 1, 2, 3))
Crot = np.tensordot(QQQQ, Cc, axes)
#Assemble the stiffness matrix
C = np.array([
[Crot[0,0,0,0], Crot[0,0,1,1], Crot[0,0,2,2], Crot[0,0,1,2], Crot[0,0,0,2], Crot[0,0,0,1]],
[Crot[1,1,0,0], Crot[1,1,1,1], Crot[1,1,2,2], Crot[1,1,1,2], Crot[1,1,0,2], Crot[1,1,0,1]],
[Crot[2,2,0,0], Crot[2,2,1,1], Crot[2,2,2,2], Crot[2,2,1,2], Crot[2,2,0,2], Crot[2,2,0,1]],
[Crot[2,1,0,0], Crot[2,1,1,1], Crot[2,1,2,2], Crot[1,2,1,2], Crot[1,2,0,2], Crot[1,2,0,1]],
[Crot[2,0,0,0], Crot[2,0,1,1], Crot[2,0,2,2], Crot[2,0,1,2], Crot[0,2,0,2], Crot[2,0,0,1]],
[Crot[1,0,0,0], Crot[1,0,1,1], Crot[1,0,2,2], Crot[1,0,1,2], Crot[1,0,0,2], Crot[0,1,0,1]]
]).reshape((6,6))
C=C*1e11 #in pascal
S = np.linalg.inv(C)
return S, C
def mean_poisson(S):
'''Computes the mean of the Poisson ratio over 2*pi (xy-plane) from given S matrix'''
#Rotated matrix components (computed with Maxima)
x = np.linspace(0,2*np.pi,500)
Sr11 = S[0,0]*np.cos(x)**4 + S[1,1]*np.sin(x)**4 \
+ 2*np.sin(x)*np.cos(x)*(S[0,5]*np.cos(x)**2 + S[1,5]*np.sin(x)**2) \
+ (2*S[0,1]+S[5,5])*np.cos(x)**2*np.sin(x)**2
Sr31 = S[2,0]*np.cos(x)**2 + S[2,1]*np.sin(x)**2 + S[2,5]*np.cos(x)*np.sin(x)
return np.trapz(-Sr31/Sr11,x)/(2*np.pi)
def compute_S_matrix(zdir,xtal):
if xtal.lower() == 'ge':
c1111, c1122, c2323 = 1.292, 0.479, 0.670
elif xtal.lower() == 'si':
c1111, c1122, c2323 = 1.657, 0.639, 0.796
Cc = np.zeros((3,3,3,3))
Cc[0,0,0,0], Cc[1,1,1,1], Cc[2,2,2,2] = c1111, c1111, c1111
Cc[0,0,1,1], Cc[0,0,2,2], Cc[1,1,0,0] = c1122, c1122, c1122
Cc[1,1,2,2], Cc[2,2,0,0], Cc[2,2,1,1] = c1122, c1122, c1122
Cc[0,2,0,2], Cc[2,0,0,2], Cc[0,2,2,0], Cc[2,0,2,0] = c2323, c2323, c2323, c2323
Cc[1,2,1,2], Cc[2,1,1,2], Cc[1,2,2,1], Cc[2,1,2,1] = c2323, c2323, c2323, c2323
Cc[0,1,0,1], Cc[1,0,0,1], Cc[0,1,1,0], Cc[1,0,1,0] = c2323, c2323, c2323, c2323
N = 500
theta = np.linspace(0,2*np.pi,N)
Q2 = rotation_matrix(zdir)
S,C=[],[]
for th in theta:
Q = np.array([[np.cos(th), np.sin(th), 0],[-np.sin(th), np.cos(th), 0], [0, 0, 1]])
Q = np.dot(Q, Q2)
Crot = np.zeros((3,3,3,3))
for i in range(3):
for j in range(3):
for k in range(3):
for l in range(3):
for p in range(3):
for q in range(3):
for r in range(3):
for s in range(3):
Crot[i,j,k,l]=Crot[i,j,k,l]+Q[i,p]*Q[j,q]*Q[k,r]*Q[l,s]*Cc[p,q,r,s]
Cr = np.array([
[Crot[0,0,0,0], Crot[0,0,1,1], Crot[0,0,2,2], Crot[0,0,1,2], Crot[0,0,0,2], Crot[0,0,0,1]],
[Crot[1,1,0,0], Crot[1,1,1,1], Crot[1,1,2,2], Crot[1,1,1,2], Crot[1,1,0,2], Crot[1,1,0,1]],
[Crot[2,2,0,0], Crot[2,2,1,1], Crot[2,2,2,2], Crot[2,2,1,2], Crot[2,2,0,2], Crot[2,2,0,1]],
[Crot[2,1,0,0], Crot[2,1,1,1], Crot[2,1,2,2], Crot[1,2,1,2], Crot[1,2,0,2], Crot[1,2,0,1]],
[Crot[2,0,0,0], Crot[2,0,1,1], Crot[2,0,2,2], Crot[2,0,1,2], Crot[0,2,0,2], Crot[2,0,0,1]],
[Crot[1,0,0,0], Crot[1,0,1,1], Crot[1,0,2,2], Crot[1,0,1,2], Crot[1,0,0,2], Crot[0,1,0,1]]
])
Cr=Cr*1e11 #in pascal
#By definitions
S.append(np.linalg.inv(Cr))
C.append(Cr)
angle = 180*theta/np.pi
return angle, S, C
|
aripekka/pytakagitaupin
|
smatrix.py
|
Python
|
mit
| 4,871
|
[
"CRYSTAL"
] |
ec90fa3242a998b4f19863e7948e40253f65f9ed93a94ffbcbccf82f5a604e66
|
# commands.py - command processing for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from node import hex, nullid, nullrev, short
from lock import release
from i18n import _, gettext
import os, re, sys, difflib, time, tempfile
import hg, util, revlog, bundlerepo, extensions, copies, error
import patch, help, mdiff, url, encoding, templatekw
import archival, changegroup, cmdutil, sshserver, hbisect
from hgweb import server
import merge as merge_
import minirst
# Commands start here, listed alphabetically
def add(ui, repo, *pats, **opts):
"""add the specified files on the next commit
Schedule files to be version controlled and added to the
repository.
The files will be added to the repository at the next commit. To
undo an add before that, see hg forget.
If no names are given, add all files to the repository.
.. container:: verbose
An example showing how new (unknown) files are added
automatically by ``hg add``::
$ ls
foo.c
$ hg status
? foo.c
$ hg add
adding foo.c
$ hg status
A foo.c
"""
bad = []
names = []
m = cmdutil.match(repo, pats, opts)
oldbad = m.bad
m.bad = lambda x, y: bad.append(x) or oldbad(x, y)
for f in repo.walk(m):
exact = m.exact(f)
if exact or f not in repo.dirstate:
names.append(f)
if ui.verbose or not exact:
ui.status(_('adding %s\n') % m.rel(f))
if not opts.get('dry_run'):
bad += [f for f in repo.add(names) if f in m.files()]
return bad and 1 or 0
def addremove(ui, repo, *pats, **opts):
"""add all new files, delete all missing files
Add all new files and remove all missing files from the
repository.
New files are ignored if they match any of the patterns in
.hgignore. As with add, these changes take effect at the next
commit.
Use the -s/--similarity option to detect renamed files. With a
parameter greater than 0, this compares every removed file with
every added file and records those similar enough as renames. This
option takes a percentage between 0 (disabled) and 100 (files must
be identical) as its parameter. Detecting renamed files this way
can be expensive.
"""
try:
sim = float(opts.get('similarity') or 0)
except ValueError:
raise util.Abort(_('similarity must be a number'))
if sim < 0 or sim > 100:
raise util.Abort(_('similarity must be between 0 and 100'))
return cmdutil.addremove(repo, pats, opts, similarity=sim / 100.0)
def annotate(ui, repo, *pats, **opts):
"""show changeset information by line for each file
List changes in files, showing the revision id responsible for
each line
This command is useful for discovering when a change was made and
by whom.
Without the -a/--text option, annotate will avoid processing files
it detects as binary. With -a, annotate will annotate the file
anyway, although the results will probably be neither useful
nor desirable.
"""
datefunc = ui.quiet and util.shortdate or util.datestr
getdate = util.cachefunc(lambda x: datefunc(x[0].date()))
if not pats:
raise util.Abort(_('at least one filename or pattern is required'))
opmap = [('user', lambda x: ui.shortuser(x[0].user())),
('number', lambda x: str(x[0].rev())),
('changeset', lambda x: short(x[0].node())),
('date', getdate),
('file', lambda x: x[0].path()),
]
if (not opts.get('user') and not opts.get('changeset')
and not opts.get('date') and not opts.get('file')):
opts['number'] = 1
linenumber = opts.get('line_number') is not None
if linenumber and (not opts.get('changeset')) and (not opts.get('number')):
raise util.Abort(_('at least one of -n/-c is required for -l'))
funcmap = [func for op, func in opmap if opts.get(op)]
if linenumber:
lastfunc = funcmap[-1]
funcmap[-1] = lambda x: "%s:%s" % (lastfunc(x), x[1])
ctx = repo[opts.get('rev')]
m = cmdutil.match(repo, pats, opts)
follow = not opts.get('no_follow')
for abs in ctx.walk(m):
fctx = ctx[abs]
if not opts.get('text') and util.binary(fctx.data()):
ui.write(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs))
continue
lines = fctx.annotate(follow=follow, linenumber=linenumber)
pieces = []
for f in funcmap:
l = [f(n) for n, dummy in lines]
if l:
ml = max(map(len, l))
pieces.append(["%*s" % (ml, x) for x in l])
if pieces:
for p, l in zip(zip(*pieces), lines):
ui.write("%s: %s" % (" ".join(p), l[1]))
def archive(ui, repo, dest, **opts):
'''create an unversioned archive of a repository revision
By default, the revision used is the parent of the working
directory; use -r/--rev to specify a different revision.
To specify the type of archive to create, use -t/--type. Valid
types are:
:``files``: a directory full of files (default)
:``tar``: tar archive, uncompressed
:``tbz2``: tar archive, compressed using bzip2
:``tgz``: tar archive, compressed using gzip
:``uzip``: zip archive, uncompressed
:``zip``: zip archive, compressed using deflate
The exact name of the destination archive or directory is given
using a format string; see 'hg help export' for details.
Each member added to an archive file has a directory prefix
prepended. Use -p/--prefix to specify a format string for the
prefix. The default is the basename of the archive, with suffixes
removed.
'''
ctx = repo[opts.get('rev')]
if not ctx:
raise util.Abort(_('no working directory: please specify a revision'))
node = ctx.node()
dest = cmdutil.make_filename(repo, dest, node)
if os.path.realpath(dest) == repo.root:
raise util.Abort(_('repository root cannot be destination'))
matchfn = cmdutil.match(repo, [], opts)
kind = opts.get('type') or 'files'
prefix = opts.get('prefix')
if dest == '-':
if kind == 'files':
raise util.Abort(_('cannot archive plain files to stdout'))
dest = sys.stdout
if not prefix:
prefix = os.path.basename(repo.root) + '-%h'
prefix = cmdutil.make_filename(repo, prefix, node)
archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
matchfn, prefix)
def backout(ui, repo, node=None, rev=None, **opts):
'''reverse effect of earlier changeset
Commit the backed out changes as a new changeset. The new
changeset is a child of the backed out changeset.
If you backout a changeset other than the tip, a new head is
created. This head will be the new tip and you should merge this
backout changeset with another head.
The --merge option remembers the parent of the working directory
before starting the backout, then merges the new head with that
changeset afterwards. This saves you from doing the merge by hand.
The result of this merge is not committed, as with a normal merge.
See 'hg help dates' for a list of formats valid for -d/--date.
'''
if rev and node:
raise util.Abort(_("please specify just one revision"))
if not rev:
rev = node
if not rev:
raise util.Abort(_("please specify a revision to backout"))
date = opts.get('date')
if date:
opts['date'] = util.parsedate(date)
cmdutil.bail_if_changed(repo)
node = repo.lookup(rev)
op1, op2 = repo.dirstate.parents()
a = repo.changelog.ancestor(op1, node)
if a != node:
raise util.Abort(_('cannot backout change on a different branch'))
p1, p2 = repo.changelog.parents(node)
if p1 == nullid:
raise util.Abort(_('cannot backout a change with no parents'))
if p2 != nullid:
if not opts.get('parent'):
raise util.Abort(_('cannot backout a merge changeset without '
'--parent'))
p = repo.lookup(opts['parent'])
if p not in (p1, p2):
raise util.Abort(_('%s is not a parent of %s') %
(short(p), short(node)))
parent = p
else:
if opts.get('parent'):
raise util.Abort(_('cannot use --parent on non-merge changeset'))
parent = p1
# the backout should appear on the same branch
branch = repo.dirstate.branch()
hg.clean(repo, node, show_stats=False)
repo.dirstate.setbranch(branch)
revert_opts = opts.copy()
revert_opts['date'] = None
revert_opts['all'] = True
revert_opts['rev'] = hex(parent)
revert_opts['no_backup'] = None
revert(ui, repo, **revert_opts)
commit_opts = opts.copy()
commit_opts['addremove'] = False
if not commit_opts['message'] and not commit_opts['logfile']:
# we don't translate commit messages
commit_opts['message'] = "Backed out changeset %s" % short(node)
commit_opts['force_editor'] = True
commit(ui, repo, **commit_opts)
def nice(node):
return '%d:%s' % (repo.changelog.rev(node), short(node))
ui.status(_('changeset %s backs out changeset %s\n') %
(nice(repo.changelog.tip()), nice(node)))
if op1 != node:
hg.clean(repo, op1, show_stats=False)
if opts.get('merge'):
ui.status(_('merging with changeset %s\n')
% nice(repo.changelog.tip()))
hg.merge(repo, hex(repo.changelog.tip()))
else:
ui.status(_('the backout changeset is a new head - '
'do not forget to merge\n'))
ui.status(_('(use "backout --merge" '
'if you want to auto-merge)\n'))
def bisect(ui, repo, rev=None, extra=None, command=None,
reset=None, good=None, bad=None, skip=None, noupdate=None):
"""subdivision search of changesets
This command helps to find changesets which introduce problems. To
use, mark the earliest changeset you know exhibits the problem as
bad, then mark the latest changeset which is free from the problem
as good. Bisect will update your working directory to a revision
for testing (unless the -U/--noupdate option is specified). Once
you have performed tests, mark the working directory as good or
bad, and bisect will either update to another candidate changeset
or announce that it has found the bad revision.
As a shortcut, you can also use the revision argument to mark a
revision as good or bad without checking it out first.
If you supply a command, it will be used for automatic bisection.
Its exit status will be used to mark revisions as good or bad:
status 0 means good, 125 means to skip the revision, 127
(command not found) will abort the bisection, and any other
non-zero exit status means the revision is bad.
"""
def print_result(nodes, good):
displayer = cmdutil.show_changeset(ui, repo, {})
if len(nodes) == 1:
# narrowed it down to a single revision
if good:
ui.write(_("The first good revision is:\n"))
else:
ui.write(_("The first bad revision is:\n"))
displayer.show(repo[nodes[0]])
else:
# multiple possible revisions
if good:
ui.write(_("Due to skipped revisions, the first "
"good revision could be any of:\n"))
else:
ui.write(_("Due to skipped revisions, the first "
"bad revision could be any of:\n"))
for n in nodes:
displayer.show(repo[n])
displayer.close()
def check_state(state, interactive=True):
if not state['good'] or not state['bad']:
if (good or bad or skip or reset) and interactive:
return
if not state['good']:
raise util.Abort(_('cannot bisect (no known good revisions)'))
else:
raise util.Abort(_('cannot bisect (no known bad revisions)'))
return True
# backward compatibility
if rev in "good bad reset init".split():
ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n"))
cmd, rev, extra = rev, extra, None
if cmd == "good":
good = True
elif cmd == "bad":
bad = True
else:
reset = True
elif extra or good + bad + skip + reset + bool(command) > 1:
raise util.Abort(_('incompatible arguments'))
if reset:
p = repo.join("bisect.state")
if os.path.exists(p):
os.unlink(p)
return
state = hbisect.load_state(repo)
if command:
changesets = 1
try:
while changesets:
# update state
status = util.system(command)
if status == 125:
transition = "skip"
elif status == 0:
transition = "good"
# status < 0 means process was killed
elif status == 127:
raise util.Abort(_("failed to execute %s") % command)
elif status < 0:
raise util.Abort(_("%s killed") % command)
else:
transition = "bad"
ctx = repo[rev or '.']
state[transition].append(ctx.node())
ui.status(_('Changeset %d:%s: %s\n') % (ctx, ctx, transition))
check_state(state, interactive=False)
# bisect
nodes, changesets, good = hbisect.bisect(repo.changelog, state)
# update to next check
cmdutil.bail_if_changed(repo)
hg.clean(repo, nodes[0], show_stats=False)
finally:
hbisect.save_state(repo, state)
return print_result(nodes, good)
# update state
node = repo.lookup(rev or '.')
if good or bad or skip:
if good:
state['good'].append(node)
elif bad:
state['bad'].append(node)
elif skip:
state['skip'].append(node)
hbisect.save_state(repo, state)
if not check_state(state):
return
# actually bisect
nodes, changesets, good = hbisect.bisect(repo.changelog, state)
if changesets == 0:
print_result(nodes, good)
else:
assert len(nodes) == 1 # only a single node can be tested next
node = nodes[0]
# compute the approximate number of remaining tests
tests, size = 0, 2
while size <= changesets:
tests, size = tests + 1, size * 2
rev = repo.changelog.rev(node)
ui.write(_("Testing changeset %d:%s "
"(%d changesets remaining, ~%d tests)\n")
% (rev, short(node), changesets, tests))
if not noupdate:
cmdutil.bail_if_changed(repo)
return hg.clean(repo, node)
def branch(ui, repo, label=None, **opts):
"""set or show the current branch name
With no argument, show the current branch name. With one argument,
set the working directory branch name (the branch will not exist
in the repository until the next commit). Standard practice
recommends that primary development take place on the 'default'
branch.
Unless -f/--force is specified, branch will not let you set a
branch name that already exists, even if it's inactive.
Use -C/--clean to reset the working directory branch to that of
the parent of the working directory, negating a previous branch
change.
Use the command 'hg update' to switch to an existing branch. Use
'hg commit --close-branch' to mark this branch as closed.
"""
if opts.get('clean'):
label = repo[None].parents()[0].branch()
repo.dirstate.setbranch(label)
ui.status(_('reset working directory to branch %s\n') % label)
elif label:
utflabel = encoding.fromlocal(label)
if not opts.get('force') and utflabel in repo.branchtags():
if label not in [p.branch() for p in repo.parents()]:
raise util.Abort(_('a branch of the same name already exists'
' (use --force to override)'))
repo.dirstate.setbranch(utflabel)
ui.status(_('marked working directory as branch %s\n') % label)
else:
ui.write("%s\n" % encoding.tolocal(repo.dirstate.branch()))
def branches(ui, repo, active=False, closed=False):
"""list repository named branches
List the repository's named branches, indicating which ones are
inactive. If -c/--closed is specified, also list branches which have
been marked closed (see hg commit --close-branch).
If -a/--active is specified, only show active branches. A branch
is considered active if it contains repository heads.
Use the command 'hg update' to switch to an existing branch.
"""
hexfunc = ui.debugflag and hex or short
activebranches = [repo[n].branch() for n in repo.heads()]
def testactive(tag, node):
realhead = tag in activebranches
open = node in repo.branchheads(tag, closed=False)
return realhead and open
branches = sorted([(testactive(tag, node), repo.changelog.rev(node), tag)
for tag, node in repo.branchtags().items()],
reverse=True)
for isactive, node, tag in branches:
if (not active) or isactive:
encodedtag = encoding.tolocal(tag)
if ui.quiet:
ui.write("%s\n" % encodedtag)
else:
hn = repo.lookup(node)
if isactive:
notice = ''
elif hn not in repo.branchheads(tag, closed=False):
if not closed:
continue
notice = _(' (closed)')
else:
notice = _(' (inactive)')
rev = str(node).rjust(31 - encoding.colwidth(encodedtag))
data = encodedtag, rev, hexfunc(hn), notice
ui.write("%s %s:%s%s\n" % data)
def bundle(ui, repo, fname, dest=None, **opts):
"""create a changegroup file
Generate a compressed changegroup file collecting changesets not
known to be in another repository.
If you omit the destination repository, then hg assumes the
destination will have all the nodes you specify with --base
parameters. To create a bundle containing all changesets, use
-a/--all (or --base null).
You can change compression method with the -t/--type option.
The available compression methods are: none, bzip2, and
gzip (by default, bundles are compressed using bzip2).
The bundle file can then be transferred using conventional means
and applied to another repository with the unbundle or pull
command. This is useful when direct push and pull are not
available or when exporting an entire repository is undesirable.
Applying bundles preserves all changeset contents including
permissions, copy/rename information, and revision history.
"""
revs = opts.get('rev') or None
if revs:
revs = [repo.lookup(rev) for rev in revs]
if opts.get('all'):
base = ['null']
else:
base = opts.get('base')
if base:
if dest:
raise util.Abort(_("--base is incompatible with specifying "
"a destination"))
base = [repo.lookup(rev) for rev in base]
# create the right base
# XXX: nodesbetween / changegroup* should be "fixed" instead
o = []
has = set((nullid,))
for n in base:
has.update(repo.changelog.reachable(n))
if revs:
visit = list(revs)
else:
visit = repo.changelog.heads()
seen = {}
while visit:
n = visit.pop(0)
parents = [p for p in repo.changelog.parents(n) if p not in has]
if len(parents) == 0:
o.insert(0, n)
else:
for p in parents:
if p not in seen:
seen[p] = 1
visit.append(p)
else:
dest = ui.expandpath(dest or 'default-push', dest or 'default')
dest, branches = hg.parseurl(dest, opts.get('branch'))
other = hg.repository(cmdutil.remoteui(repo, opts), dest)
revs, checkout = hg.addbranchrevs(repo, other, branches, revs)
o = repo.findoutgoing(other, force=opts.get('force'))
if revs:
cg = repo.changegroupsubset(o, revs, 'bundle')
else:
cg = repo.changegroup(o, 'bundle')
bundletype = opts.get('type', 'bzip2').lower()
btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
bundletype = btypes.get(bundletype)
if bundletype not in changegroup.bundletypes:
raise util.Abort(_('unknown bundle type specified with --type'))
changegroup.writebundle(cg, fname, bundletype)
def cat(ui, repo, file1, *pats, **opts):
"""output the current or given revision of files
Print the specified files as they were at the given revision. If
no revision is given, the parent of the working directory is used,
or tip if no revision is checked out.
Output may be to a file, in which case the name of the file is
given using a format string. The formatting rules are the same as
for the export command, with the following additions:
:``%s``: basename of file being printed
:``%d``: dirname of file being printed, or '.' if in repository root
:``%p``: root-relative path name of file being printed
"""
ctx = repo[opts.get('rev')]
err = 1
m = cmdutil.match(repo, (file1,) + pats, opts)
for abs in ctx.walk(m):
fp = cmdutil.make_file(repo, opts.get('output'), ctx.node(), pathname=abs)
data = ctx[abs].data()
if opts.get('decode'):
data = repo.wwritedata(abs, data)
fp.write(data)
err = 0
return err
def clone(ui, source, dest=None, **opts):
"""make a copy of an existing repository
Create a copy of an existing repository in a new directory.
If no destination directory name is specified, it defaults to the
basename of the source.
The location of the source is added to the new repository's
.hg/hgrc file, as the default to be used for future pulls.
See 'hg help urls' for valid source format details.
It is possible to specify an ``ssh://`` URL as the destination, but no
.hg/hgrc and working directory will be created on the remote side.
Please see 'hg help urls' for important details about ``ssh://`` URLs.
If the -U/--noupdate option is specified, the new clone will contain
only a repository (.hg) and no working copy (the working copy parent
will be the null changeset). Otherwise, clone will initially check
out (in order of precedence):
a) the changeset, tag or branch specified with -u/--updaterev
b) the changeset, tag or branch given with the first -r/--rev
c) the branch given with the first -b/--branch
d) the branch given with the url#branch source syntax
e) the head of the default branch
Use 'hg clone -u . src dst' to checkout the source repository's
parent changeset (applicable for local source repositories only).
A set of changesets (tags, or branch names) to pull may be specified
by listing each changeset (tag, or branch name) with -r/--rev.
If -r/--rev is used, the cloned repository will contain only a subset
of the changesets of the source repository. Only the set of changesets
defined by all -r/--rev options (including all their ancestors)
will be pulled into the destination repository.
No subsequent changesets (including subsequent tags) will be present
in the destination.
Using -r/--rev (or 'clone src#rev dest') implies --pull, even for
local source repositories.
For efficiency, hardlinks are used for cloning whenever the source
and destination are on the same filesystem (note this applies only
to the repository data, not to the checked out files). Some
filesystems, such as AFS, implement hardlinking incorrectly, but
do not report errors. In these cases, use the --pull option to
avoid hardlinking.
In some cases, you can clone repositories and checked out files
using full hardlinks with ::
$ cp -al REPO REPOCLONE
This is the fastest way to clone, but it is not always safe. The
operation is not atomic (making sure REPO is not modified during
the operation is up to you) and you have to make sure your editor
breaks hardlinks (Emacs and most Linux Kernel tools do so). Also,
this is not compatible with certain extensions that place their
metadata under the .hg directory, such as mq.
"""
if opts.get('noupdate') and opts.get('updaterev'):
raise util.Abort(_("cannot specify both --noupdate and --updaterev"))
hg.clone(cmdutil.remoteui(ui, opts), source, dest,
pull=opts.get('pull'),
stream=opts.get('uncompressed'),
rev=opts.get('rev'),
update=opts.get('updaterev') or not opts.get('noupdate'),
branch=opts.get('branch'))
def commit(ui, repo, *pats, **opts):
"""commit the specified files or all outstanding changes
Commit changes to the given files into the repository. Unlike a
centralized RCS, this operation is a local operation. See hg push
for a way to actively distribute your changes.
If a list of files is omitted, all changes reported by "hg status"
will be committed.
If you are committing the result of a merge, do not provide any
filenames or -I/-X filters.
If no commit message is specified, the configured editor is
started to prompt you for a message.
See 'hg help dates' for a list of formats valid for -d/--date.
"""
extra = {}
if opts.get('close_branch'):
extra['close'] = 1
e = cmdutil.commiteditor
if opts.get('force_editor'):
e = cmdutil.commitforceeditor
def commitfunc(ui, repo, message, match, opts):
return repo.commit(message, opts.get('user'), opts.get('date'), match,
editor=e, extra=extra)
node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
if not node:
ui.status(_("nothing changed\n"))
return
cl = repo.changelog
rev = cl.rev(node)
parents = cl.parentrevs(rev)
if rev - 1 in parents:
# one of the parents was the old tip
pass
elif (parents == (nullrev, nullrev) or
len(cl.heads(cl.node(parents[0]))) > 1 and
(parents[1] == nullrev or len(cl.heads(cl.node(parents[1]))) > 1)):
ui.status(_('created new head\n'))
if ui.debugflag:
ui.write(_('committed changeset %d:%s\n') % (rev, hex(node)))
elif ui.verbose:
ui.write(_('committed changeset %d:%s\n') % (rev, short(node)))
def copy(ui, repo, *pats, **opts):
"""mark files as copied for the next commit
Mark dest as having copies of source files. If dest is a
directory, copies are put in that directory. If dest is a file,
the source must be a single file.
By default, this command copies the contents of files as they
exist in the working directory. If invoked with -A/--after, the
operation is recorded, but no copying is performed.
This command takes effect with the next commit. To undo a copy
before that, see hg revert.
"""
wlock = repo.wlock(False)
try:
return cmdutil.copy(ui, repo, pats, opts)
finally:
wlock.release()
def debugancestor(ui, repo, *args):
"""find the ancestor revision of two revisions in a given index"""
if len(args) == 3:
index, rev1, rev2 = args
r = revlog.revlog(util.opener(os.getcwd(), audit=False), index)
lookup = r.lookup
elif len(args) == 2:
if not repo:
raise util.Abort(_("There is no Mercurial repository here "
"(.hg not found)"))
rev1, rev2 = args
r = repo.changelog
lookup = repo.lookup
else:
raise util.Abort(_('either two or three arguments required'))
a = r.ancestor(lookup(rev1), lookup(rev2))
ui.write("%d:%s\n" % (r.rev(a), hex(a)))
def debugcommands(ui, cmd='', *args):
for cmd, vals in sorted(table.iteritems()):
cmd = cmd.split('|')[0].strip('^')
opts = ', '.join([i[1] for i in vals[1]])
ui.write('%s: %s\n' % (cmd, opts))
def debugcomplete(ui, cmd='', **opts):
"""returns the completion list associated with the given command"""
if opts.get('options'):
options = []
otables = [globalopts]
if cmd:
aliases, entry = cmdutil.findcmd(cmd, table, False)
otables.append(entry[1])
for t in otables:
for o in t:
if o[0]:
options.append('-%s' % o[0])
options.append('--%s' % o[1])
ui.write("%s\n" % "\n".join(options))
return
cmdlist = cmdutil.findpossible(cmd, table)
if ui.verbose:
cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
ui.write("%s\n" % "\n".join(sorted(cmdlist)))
def debugfsinfo(ui, path = "."):
open('.debugfsinfo', 'w').write('')
ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no'))
ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no'))
ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo')
and 'yes' or 'no'))
os.unlink('.debugfsinfo')
def debugrebuildstate(ui, repo, rev="tip"):
"""rebuild the dirstate as it would look like for the given revision"""
ctx = repo[rev]
wlock = repo.wlock()
try:
repo.dirstate.rebuild(ctx.node(), ctx.manifest())
finally:
wlock.release()
def debugcheckstate(ui, repo):
"""validate the correctness of the current dirstate"""
parent1, parent2 = repo.dirstate.parents()
m1 = repo[parent1].manifest()
m2 = repo[parent2].manifest()
errors = 0
for f in repo.dirstate:
state = repo.dirstate[f]
if state in "nr" and f not in m1:
ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
errors += 1
if state in "a" and f in m1:
ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
errors += 1
if state in "m" and f not in m1 and f not in m2:
ui.warn(_("%s in state %s, but not in either manifest\n") %
(f, state))
errors += 1
for f in m1:
state = repo.dirstate[f]
if state not in "nrm":
ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
errors += 1
if errors:
error = _(".hg/dirstate inconsistent with current parent's manifest")
raise util.Abort(error)
def showconfig(ui, repo, *values, **opts):
"""show combined config settings from all hgrc files
With no arguments, print names and values of all config items.
With one argument of the form section.name, print just the value
of that config item.
With multiple arguments, print names and values of all config
items with matching section names.
With --debug, the source (filename and line number) is printed
for each config item.
"""
untrusted = bool(opts.get('untrusted'))
if values:
if len([v for v in values if '.' in v]) > 1:
raise util.Abort(_('only one config item permitted'))
for section, name, value in ui.walkconfig(untrusted=untrusted):
sectname = section + '.' + name
if values:
for v in values:
if v == section:
ui.debug('%s: ' %
ui.configsource(section, name, untrusted))
ui.write('%s=%s\n' % (sectname, value))
elif v == sectname:
ui.debug('%s: ' %
ui.configsource(section, name, untrusted))
ui.write(value, '\n')
else:
ui.debug('%s: ' %
ui.configsource(section, name, untrusted))
ui.write('%s=%s\n' % (sectname, value))
def debugsetparents(ui, repo, rev1, rev2=None):
"""manually set the parents of the current working directory
This is useful for writing repository conversion tools, but should
be used with care.
"""
if not rev2:
rev2 = hex(nullid)
wlock = repo.wlock()
try:
repo.dirstate.setparents(repo.lookup(rev1), repo.lookup(rev2))
finally:
wlock.release()
def debugstate(ui, repo, nodates=None):
"""show the contents of the current dirstate"""
timestr = ""
showdate = not nodates
for file_, ent in sorted(repo.dirstate._map.iteritems()):
if showdate:
if ent[3] == -1:
# Pad or slice to locale representation
locale_len = len(time.strftime("%Y-%m-%d %H:%M:%S ",
time.localtime(0)))
timestr = 'unset'
timestr = (timestr[:locale_len] +
' ' * (locale_len - len(timestr)))
else:
timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
time.localtime(ent[3]))
if ent[1] & 020000:
mode = 'lnk'
else:
mode = '%3o' % (ent[1] & 0777)
ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
for f in repo.dirstate.copies():
ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
def debugsub(ui, repo, rev=None):
if rev == '':
rev = None
for k, v in sorted(repo[rev].substate.items()):
ui.write('path %s\n' % k)
ui.write(' source %s\n' % v[0])
ui.write(' revision %s\n' % v[1])
def debugdata(ui, file_, rev):
"""dump the contents of a data file revision"""
r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_[:-2] + ".i")
try:
ui.write(r.revision(r.lookup(rev)))
except KeyError:
raise util.Abort(_('invalid revision identifier %s') % rev)
def debugdate(ui, date, range=None, **opts):
"""parse and display a date"""
if opts["extended"]:
d = util.parsedate(date, util.extendeddateformats)
else:
d = util.parsedate(date)
ui.write("internal: %s %s\n" % d)
ui.write("standard: %s\n" % util.datestr(d))
if range:
m = util.matchdate(range)
ui.write("match: %s\n" % m(d[0]))
def debugindex(ui, file_):
"""dump the contents of an index file"""
r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
ui.write(" rev offset length base linkrev"
" nodeid p1 p2\n")
for i in r:
node = r.node(i)
try:
pp = r.parents(node)
except:
pp = [nullid, nullid]
ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
i, r.start(i), r.length(i), r.base(i), r.linkrev(i),
short(node), short(pp[0]), short(pp[1])))
def debugindexdot(ui, file_):
"""dump an index DAG as a graphviz dot file"""
r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
ui.write("digraph G {\n")
for i in r:
node = r.node(i)
pp = r.parents(node)
ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
if pp[1] != nullid:
ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
ui.write("}\n")
def debuginstall(ui):
'''test Mercurial installation'''
def writetemp(contents):
(fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
f = os.fdopen(fd, "wb")
f.write(contents)
f.close()
return name
problems = 0
# encoding
ui.status(_("Checking encoding (%s)...\n") % encoding.encoding)
try:
encoding.fromlocal("test")
except util.Abort, inst:
ui.write(" %s\n" % inst)
ui.write(_(" (check that your locale is properly set)\n"))
problems += 1
# compiled modules
ui.status(_("Checking extensions...\n"))
try:
import bdiff, mpatch, base85
except Exception, inst:
ui.write(" %s\n" % inst)
ui.write(_(" One or more extensions could not be found"))
ui.write(_(" (check that you compiled the extensions)\n"))
problems += 1
# templates
ui.status(_("Checking templates...\n"))
try:
import templater
templater.templater(templater.templatepath("map-cmdline.default"))
except Exception, inst:
ui.write(" %s\n" % inst)
ui.write(_(" (templates seem to have been installed incorrectly)\n"))
problems += 1
# patch
ui.status(_("Checking patch...\n"))
patchproblems = 0
a = "1\n2\n3\n4\n"
b = "1\n2\n3\ninsert\n4\n"
fa = writetemp(a)
d = mdiff.unidiff(a, None, b, None, os.path.basename(fa),
os.path.basename(fa))
fd = writetemp(d)
files = {}
try:
patch.patch(fd, ui, cwd=os.path.dirname(fa), files=files)
except util.Abort, e:
ui.write(_(" patch call failed:\n"))
ui.write(" " + str(e) + "\n")
patchproblems += 1
else:
if list(files) != [os.path.basename(fa)]:
ui.write(_(" unexpected patch output!\n"))
patchproblems += 1
a = open(fa).read()
if a != b:
ui.write(_(" patch test failed!\n"))
patchproblems += 1
if patchproblems:
if ui.config('ui', 'patch'):
ui.write(_(" (Current patch tool may be incompatible with patch,"
" or misconfigured. Please check your .hgrc file)\n"))
else:
ui.write(_(" Internal patcher failure, please report this error"
" to http://mercurial.selenic.com/bts/\n"))
problems += patchproblems
os.unlink(fa)
os.unlink(fd)
# editor
ui.status(_("Checking commit editor...\n"))
editor = ui.geteditor()
cmdpath = util.find_exe(editor) or util.find_exe(editor.split()[0])
if not cmdpath:
if editor == 'vi':
ui.write(_(" No commit editor set and can't find vi in PATH\n"))
ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
else:
ui.write(_(" Can't find editor '%s' in PATH\n") % editor)
ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
problems += 1
# check username
ui.status(_("Checking username...\n"))
try:
user = ui.username()
except util.Abort, e:
ui.write(" %s\n" % e)
ui.write(_(" (specify a username in your .hgrc file)\n"))
problems += 1
if not problems:
ui.status(_("No problems detected\n"))
else:
ui.write(_("%s problems detected,"
" please check your install!\n") % problems)
return problems
def debugrename(ui, repo, file1, *pats, **opts):
"""dump rename information"""
ctx = repo[opts.get('rev')]
m = cmdutil.match(repo, (file1,) + pats, opts)
for abs in ctx.walk(m):
fctx = ctx[abs]
o = fctx.filelog().renamed(fctx.filenode())
rel = m.rel(abs)
if o:
ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
else:
ui.write(_("%s not renamed\n") % rel)
def debugwalk(ui, repo, *pats, **opts):
"""show how files match on given patterns"""
m = cmdutil.match(repo, pats, opts)
items = list(repo.walk(m))
if not items:
return
fmt = 'f %%-%ds %%-%ds %%s' % (
max([len(abs) for abs in items]),
max([len(m.rel(abs)) for abs in items]))
for abs in items:
line = fmt % (abs, m.rel(abs), m.exact(abs) and 'exact' or '')
ui.write("%s\n" % line.rstrip())
def diff(ui, repo, *pats, **opts):
"""diff repository (or selected files)
Show differences between revisions for the specified files.
Differences between files are shown using the unified diff format.
NOTE: diff may generate unexpected results for merges, as it will
default to comparing against the working directory's first parent
changeset if no revisions are specified.
When two revision arguments are given, then changes are shown
between those revisions. If only one revision is specified then
that revision is compared to the working directory, and, when no
revisions are specified, the working directory files are compared
to its parent.
Without the -a/--text option, diff will avoid generating diffs of
files it detects as binary. With -a, diff will generate a diff
anyway, probably with undesirable results.
Use the -g/--git option to generate diffs in the git extended diff
format. For more information, read 'hg help diffs'.
"""
revs = opts.get('rev')
change = opts.get('change')
stat = opts.get('stat')
reverse = opts.get('reverse')
if revs and change:
msg = _('cannot specify --rev and --change at the same time')
raise util.Abort(msg)
elif change:
node2 = repo.lookup(change)
node1 = repo[node2].parents()[0].node()
else:
node1, node2 = cmdutil.revpair(repo, revs)
if reverse:
node1, node2 = node2, node1
if stat:
opts['unified'] = '0'
diffopts = patch.diffopts(ui, opts)
m = cmdutil.match(repo, pats, opts)
it = patch.diff(repo, node1, node2, match=m, opts=diffopts)
if stat:
width = ui.interactive() and util.termwidth() or 80
ui.write(patch.diffstat(util.iterlines(it), width=width,
git=diffopts.git))
else:
for chunk in it:
ui.write(chunk)
def export(ui, repo, *changesets, **opts):
"""dump the header and diffs for one or more changesets
Print the changeset header and diffs for one or more revisions.
The information shown in the changeset header is: author, date,
branch name (if non-default), changeset hash, parent(s) and commit
comment.
NOTE: export may generate unexpected diff output for merge
changesets, as it will compare the merge changeset against its
first parent only.
Output may be to a file, in which case the name of the file is
given using a format string. The formatting rules are as follows:
:``%%``: literal "%" character
:``%H``: changeset hash (40 bytes of hexadecimal)
:``%N``: number of patches being generated
:``%R``: changeset revision number
:``%b``: basename of the exporting repository
:``%h``: short-form changeset hash (12 bytes of hexadecimal)
:``%n``: zero-padded sequence number, starting at 1
:``%r``: zero-padded changeset revision number
Without the -a/--text option, export will avoid generating diffs
of files it detects as binary. With -a, export will generate a
diff anyway, probably with undesirable results.
Use the -g/--git option to generate diffs in the git extended diff
format. See 'hg help diffs' for more information.
With the --switch-parent option, the diff will be against the
second parent. It can be useful to review a merge.
"""
changesets += tuple(opts.get('rev', []))
if not changesets:
raise util.Abort(_("export requires at least one changeset"))
revs = cmdutil.revrange(repo, changesets)
if len(revs) > 1:
ui.note(_('exporting patches:\n'))
else:
ui.note(_('exporting patch:\n'))
patch.export(repo, revs, template=opts.get('output'),
switch_parent=opts.get('switch_parent'),
opts=patch.diffopts(ui, opts))
def forget(ui, repo, *pats, **opts):
"""forget the specified files on the next commit
Mark the specified files so they will no longer be tracked
after the next commit.
This only removes files from the current branch, not from the
entire project history, and it does not delete them from the
working directory.
To undo a forget before the next commit, see hg add.
"""
if not pats:
raise util.Abort(_('no files specified'))
m = cmdutil.match(repo, pats, opts)
s = repo.status(match=m, clean=True)
forget = sorted(s[0] + s[1] + s[3] + s[6])
for f in m.files():
if f not in repo.dirstate and not os.path.isdir(m.rel(f)):
ui.warn(_('not removing %s: file is already untracked\n')
% m.rel(f))
for f in forget:
if ui.verbose or not m.exact(f):
ui.status(_('removing %s\n') % m.rel(f))
repo.remove(forget, unlink=False)
def grep(ui, repo, pattern, *pats, **opts):
"""search for a pattern in specified files and revisions
Search revisions of files for a regular expression.
This command behaves differently than Unix grep. It only accepts
Python/Perl regexps. It searches repository history, not the
working directory. It always prints the revision number in which a
match appears.
By default, grep only prints output for the first revision of a
file in which it finds a match. To get it to print every revision
that contains a change in match status ("-" for a match that
becomes a non-match, or "+" for a non-match that becomes a match),
use the --all flag.
"""
reflags = 0
if opts.get('ignore_case'):
reflags |= re.I
try:
regexp = re.compile(pattern, reflags)
except Exception, inst:
ui.warn(_("grep: invalid match pattern: %s\n") % inst)
return None
sep, eol = ':', '\n'
if opts.get('print0'):
sep = eol = '\0'
getfile = util.lrucachefunc(repo.file)
def matchlines(body):
begin = 0
linenum = 0
while True:
match = regexp.search(body, begin)
if not match:
break
mstart, mend = match.span()
linenum += body.count('\n', begin, mstart) + 1
lstart = body.rfind('\n', begin, mstart) + 1 or begin
begin = body.find('\n', mend) + 1 or len(body)
lend = begin - 1
yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
class linestate(object):
def __init__(self, line, linenum, colstart, colend):
self.line = line
self.linenum = linenum
self.colstart = colstart
self.colend = colend
def __hash__(self):
return hash((self.linenum, self.line))
def __eq__(self, other):
return self.line == other.line
matches = {}
copies = {}
def grepbody(fn, rev, body):
matches[rev].setdefault(fn, [])
m = matches[rev][fn]
for lnum, cstart, cend, line in matchlines(body):
s = linestate(line, lnum, cstart, cend)
m.append(s)
def difflinestates(a, b):
sm = difflib.SequenceMatcher(None, a, b)
for tag, alo, ahi, blo, bhi in sm.get_opcodes():
if tag == 'insert':
for i in xrange(blo, bhi):
yield ('+', b[i])
elif tag == 'delete':
for i in xrange(alo, ahi):
yield ('-', a[i])
elif tag == 'replace':
for i in xrange(alo, ahi):
yield ('-', a[i])
for i in xrange(blo, bhi):
yield ('+', b[i])
def display(fn, ctx, pstates, states):
rev = ctx.rev()
datefunc = ui.quiet and util.shortdate or util.datestr
found = False
filerevmatches = {}
if opts.get('all'):
iter = difflinestates(pstates, states)
else:
iter = [('', l) for l in states]
for change, l in iter:
cols = [fn, str(rev)]
if opts.get('line_number'):
cols.append(str(l.linenum))
if opts.get('all'):
cols.append(change)
if opts.get('user'):
cols.append(ui.shortuser(ctx.user()))
if opts.get('date'):
cols.append(datefunc(ctx.date()))
if opts.get('files_with_matches'):
c = (fn, rev)
if c in filerevmatches:
continue
filerevmatches[c] = 1
else:
cols.append(l.line)
ui.write(sep.join(cols), eol)
found = True
return found
skip = {}
revfiles = {}
matchfn = cmdutil.match(repo, pats, opts)
found = False
follow = opts.get('follow')
def prep(ctx, fns):
rev = ctx.rev()
pctx = ctx.parents()[0]
parent = pctx.rev()
matches.setdefault(rev, {})
matches.setdefault(parent, {})
files = revfiles.setdefault(rev, [])
for fn in fns:
flog = getfile(fn)
try:
fnode = ctx.filenode(fn)
except error.LookupError:
continue
copied = flog.renamed(fnode)
copy = follow and copied and copied[0]
if copy:
copies.setdefault(rev, {})[fn] = copy
if fn in skip:
if copy:
skip[copy] = True
continue
files.append(fn)
if fn not in matches[rev]:
grepbody(fn, rev, flog.read(fnode))
pfn = copy or fn
if pfn not in matches[parent]:
try:
fnode = pctx.filenode(pfn)
grepbody(pfn, parent, flog.read(fnode))
except error.LookupError:
pass
for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
rev = ctx.rev()
parent = ctx.parents()[0].rev()
for fn in sorted(revfiles.get(rev, [])):
states = matches[rev][fn]
copy = copies.get(rev, {}).get(fn)
if fn in skip:
if copy:
skip[copy] = True
continue
pstates = matches.get(parent, {}).get(copy or fn, [])
if pstates or states:
r = display(fn, ctx, pstates, states)
found = found or r
if r and not opts.get('all'):
skip[fn] = True
if copy:
skip[copy] = True
del matches[rev]
del revfiles[rev]
def heads(ui, repo, *branchrevs, **opts):
"""show current repository heads or show branch heads
With no arguments, show all repository branch heads.
Repository "heads" are changesets with no child changesets. They are
where development generally takes place and are the usual targets
for update and merge operations. Branch heads are changesets that have
no child changeset on the same branch.
If one or more REVs are given, only branch heads on the branches
associated with the specified changesets are shown.
If -c/--closed is specified, also show branch heads marked closed
(see hg commit --close-branch).
If STARTREV is specified, only those heads that are descendants of
STARTREV will be displayed.
If -t/--topo is specified, named branch mechanics will be ignored and only
changesets without children will be shown.
"""
if opts.get('rev'):
start = repo.lookup(opts['rev'])
else:
start = None
if opts.get('topo'):
heads = [repo[h] for h in repo.heads(start)]
else:
heads = []
for b, ls in repo.branchmap().iteritems():
if start is None:
heads += [repo[h] for h in ls]
continue
startrev = repo.changelog.rev(start)
descendants = set(repo.changelog.descendants(startrev))
descendants.add(startrev)
rev = repo.changelog.rev
heads += [repo[h] for h in ls if rev(h) in descendants]
if branchrevs:
decode, encode = encoding.fromlocal, encoding.tolocal
branches = set(repo[decode(br)].branch() for br in branchrevs)
heads = [h for h in heads if h.branch() in branches]
if not opts.get('closed'):
heads = [h for h in heads if not h.extra().get('close')]
if opts.get('active') and branchrevs:
dagheads = repo.heads(start)
heads = [h for h in heads if h.node() in dagheads]
if branchrevs:
haveheads = set(h.branch() for h in heads)
if branches - haveheads:
headless = ', '.join(encode(b) for b in branches - haveheads)
msg = _('no open branch heads found on branches %s')
if opts.get('rev'):
msg += _(' (started at %s)' % opts['rev'])
ui.warn((msg + '\n') % headless)
if not heads:
return 1
heads = sorted(heads, key=lambda x: -x.rev())
displayer = cmdutil.show_changeset(ui, repo, opts)
for ctx in heads:
displayer.show(ctx)
displayer.close()
def help_(ui, name=None, with_version=False, unknowncmd=False):
"""show help for a given topic or a help overview
With no arguments, print a list of commands with short help messages.
Given a topic, extension, or command name, print help for that
topic."""
option_lists = []
textwidth = util.termwidth() - 2
def addglobalopts(aliases):
if ui.verbose:
option_lists.append((_("global options:"), globalopts))
if name == 'shortlist':
option_lists.append((_('use "hg help" for the full list '
'of commands'), ()))
else:
if name == 'shortlist':
msg = _('use "hg help" for the full list of commands '
'or "hg -v" for details')
elif aliases:
msg = _('use "hg -v help%s" to show aliases and '
'global options') % (name and " " + name or "")
else:
msg = _('use "hg -v help %s" to show global options') % name
option_lists.append((msg, ()))
def helpcmd(name):
if with_version:
version_(ui)
ui.write('\n')
try:
aliases, entry = cmdutil.findcmd(name, table, strict=unknowncmd)
except error.AmbiguousCommand, inst:
# py3k fix: except vars can't be used outside the scope of the
# except block, nor can be used inside a lambda. python issue4617
prefix = inst.args[0]
select = lambda c: c.lstrip('^').startswith(prefix)
helplist(_('list of commands:\n\n'), select)
return
# check if it's an invalid alias and display its error if it is
if getattr(entry[0], 'badalias', False):
if not unknowncmd:
entry[0](ui)
return
# synopsis
if len(entry) > 2:
if entry[2].startswith('hg'):
ui.write("%s\n" % entry[2])
else:
ui.write('hg %s %s\n' % (aliases[0], entry[2]))
else:
ui.write('hg %s\n' % aliases[0])
# aliases
if not ui.quiet and len(aliases) > 1:
ui.write(_("\naliases: %s\n") % ', '.join(aliases[1:]))
# description
doc = gettext(entry[0].__doc__)
if not doc:
doc = _("(no help text available)")
if ui.quiet:
doc = doc.splitlines()[0]
keep = ui.verbose and ['verbose'] or []
formatted, pruned = minirst.format(doc, textwidth, keep=keep)
ui.write("\n%s\n" % formatted)
if pruned:
ui.write(_('\nuse "hg -v help %s" to show verbose help\n') % name)
if not ui.quiet:
# options
if entry[1]:
option_lists.append((_("options:\n"), entry[1]))
addglobalopts(False)
def helplist(header, select=None):
h = {}
cmds = {}
for c, e in table.iteritems():
f = c.split("|", 1)[0]
if select and not select(f):
continue
if (not select and name != 'shortlist' and
e[0].__module__ != __name__):
continue
if name == "shortlist" and not f.startswith("^"):
continue
f = f.lstrip("^")
if not ui.debugflag and f.startswith("debug"):
continue
doc = e[0].__doc__
if doc and 'DEPRECATED' in doc and not ui.verbose:
continue
doc = gettext(doc)
if not doc:
doc = _("(no help text available)")
h[f] = doc.splitlines()[0].rstrip()
cmds[f] = c.lstrip("^")
if not h:
ui.status(_('no commands defined\n'))
return
ui.status(header)
fns = sorted(h)
m = max(map(len, fns))
for f in fns:
if ui.verbose:
commands = cmds[f].replace("|",", ")
ui.write(" %s:\n %s\n"%(commands, h[f]))
else:
ui.write(' %-*s %s\n' % (m, f, util.wrap(h[f], m + 4)))
if not ui.quiet:
addglobalopts(True)
def helptopic(name):
for names, header, doc in help.helptable:
if name in names:
break
else:
raise error.UnknownCommand(name)
# description
if not doc:
doc = _("(no help text available)")
if hasattr(doc, '__call__'):
doc = doc()
ui.write("%s\n\n" % header)
ui.write("%s\n" % minirst.format(doc, textwidth, indent=4))
def helpext(name):
try:
mod = extensions.find(name)
doc = gettext(mod.__doc__) or _('no help text available')
except KeyError:
mod = None
doc = extensions.disabledext(name)
if not doc:
raise error.UnknownCommand(name)
if '\n' not in doc:
head, tail = doc, ""
else:
head, tail = doc.split('\n', 1)
ui.write(_('%s extension - %s\n\n') % (name.split('.')[-1], head))
if tail:
ui.write(minirst.format(tail, textwidth))
ui.status('\n\n')
if mod:
try:
ct = mod.cmdtable
except AttributeError:
ct = {}
modcmds = set([c.split('|', 1)[0] for c in ct])
helplist(_('list of commands:\n\n'), modcmds.__contains__)
else:
ui.write(_('use "hg help extensions" for information on enabling '
'extensions\n'))
def helpextcmd(name):
cmd, ext, mod = extensions.disabledcmd(name, ui.config('ui', 'strict'))
doc = gettext(mod.__doc__).splitlines()[0]
msg = help.listexts(_("'%s' is provided by the following "
"extension:") % cmd, {ext: doc}, len(ext),
indent=4)
ui.write(minirst.format(msg, textwidth))
ui.write('\n\n')
ui.write(_('use "hg help extensions" for information on enabling '
'extensions\n'))
if name and name != 'shortlist':
i = None
if unknowncmd:
queries = (helpextcmd,)
else:
queries = (helptopic, helpcmd, helpext, helpextcmd)
for f in queries:
try:
f(name)
i = None
break
except error.UnknownCommand, inst:
i = inst
if i:
raise i
else:
# program name
if ui.verbose or with_version:
version_(ui)
else:
ui.status(_("Mercurial Distributed SCM\n"))
ui.status('\n')
# list of commands
if name == "shortlist":
header = _('basic commands:\n\n')
else:
header = _('list of commands:\n\n')
helplist(header)
if name != 'shortlist':
exts, maxlength = extensions.enabled()
text = help.listexts(_('enabled extensions:'), exts, maxlength)
if text:
ui.write("\n%s\n" % minirst.format(text, textwidth))
# list all option lists
opt_output = []
for title, options in option_lists:
opt_output.append(("\n%s" % title, None))
for shortopt, longopt, default, desc in options:
if _("DEPRECATED") in desc and not ui.verbose:
continue
opt_output.append(("%2s%s" % (shortopt and "-%s" % shortopt,
longopt and " --%s" % longopt),
"%s%s" % (desc,
default
and _(" (default: %s)") % default
or "")))
if not name:
ui.write(_("\nadditional help topics:\n\n"))
topics = []
for names, header, doc in help.helptable:
topics.append((sorted(names, key=len, reverse=True)[0], header))
topics_len = max([len(s[0]) for s in topics])
for t, desc in topics:
ui.write(" %-*s %s\n" % (topics_len, t, desc))
if opt_output:
opts_len = max([len(line[0]) for line in opt_output if line[1]] or [0])
for first, second in opt_output:
if second:
second = util.wrap(second, opts_len + 3)
ui.write(" %-*s %s\n" % (opts_len, first, second))
else:
ui.write("%s\n" % first)
def identify(ui, repo, source=None,
rev=None, num=None, id=None, branch=None, tags=None):
"""identify the working copy or specified revision
With no revision, print a summary of the current state of the
repository.
Specifying a path to a repository root or Mercurial bundle will
cause lookup to operate on that repository/bundle.
This summary identifies the repository state using one or two
parent hash identifiers, followed by a "+" if there are
uncommitted changes in the working directory, a list of tags for
this revision and a branch name for non-default branches.
"""
if not repo and not source:
raise util.Abort(_("There is no Mercurial repository here "
"(.hg not found)"))
hexfunc = ui.debugflag and hex or short
default = not (num or id or branch or tags)
output = []
revs = []
if source:
source, branches = hg.parseurl(ui.expandpath(source))
repo = hg.repository(ui, source)
revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
if not repo.local():
if not rev and revs:
rev = revs[0]
if not rev:
rev = "tip"
if num or branch or tags:
raise util.Abort(
"can't query remote revision number, branch, or tags")
output = [hexfunc(repo.lookup(rev))]
elif not rev:
ctx = repo[None]
parents = ctx.parents()
changed = False
if default or id or num:
changed = ctx.files() + ctx.deleted()
if default or id:
output = ["%s%s" % ('+'.join([hexfunc(p.node()) for p in parents]),
(changed) and "+" or "")]
if num:
output.append("%s%s" % ('+'.join([str(p.rev()) for p in parents]),
(changed) and "+" or ""))
else:
ctx = repo[rev]
if default or id:
output = [hexfunc(ctx.node())]
if num:
output.append(str(ctx.rev()))
if repo.local() and default and not ui.quiet:
b = encoding.tolocal(ctx.branch())
if b != 'default':
output.append("(%s)" % b)
# multiple tags for a single parent separated by '/'
t = "/".join(ctx.tags())
if t:
output.append(t)
if branch:
output.append(encoding.tolocal(ctx.branch()))
if tags:
output.extend(ctx.tags())
ui.write("%s\n" % ' '.join(output))
def import_(ui, repo, patch1, *patches, **opts):
"""import an ordered set of patches
Import a list of patches and commit them individually (unless
--no-commit is specified).
If there are outstanding changes in the working directory, import
will abort unless given the -f/--force flag.
You can import a patch straight from a mail message. Even patches
as attachments work (to use the body part, it must have type
text/plain or text/x-patch). From and Subject headers of email
message are used as default committer and commit message. All
text/plain body parts before first diff are added to commit
message.
If the imported patch was generated by hg export, user and
description from patch override values from message headers and
body. Values given on command line with -m/--message and -u/--user
override these.
If --exact is specified, import will set the working directory to
the parent of each patch before applying it, and will abort if the
resulting changeset has a different ID than the one recorded in
the patch. This may happen due to character set problems or other
deficiencies in the text patch format.
With -s/--similarity, hg will attempt to discover renames and
copies in the patch in the same way as 'addremove'.
To read a patch from standard input, use "-" as the patch name. If
a URL is specified, the patch will be downloaded from it.
See 'hg help dates' for a list of formats valid for -d/--date.
"""
patches = (patch1,) + patches
date = opts.get('date')
if date:
opts['date'] = util.parsedate(date)
try:
sim = float(opts.get('similarity') or 0)
except ValueError:
raise util.Abort(_('similarity must be a number'))
if sim < 0 or sim > 100:
raise util.Abort(_('similarity must be between 0 and 100'))
if opts.get('exact') or not opts.get('force'):
cmdutil.bail_if_changed(repo)
d = opts["base"]
strip = opts["strip"]
wlock = lock = None
def tryone(ui, hunk):
tmpname, message, user, date, branch, nodeid, p1, p2 = \
patch.extract(ui, hunk)
if not tmpname:
return None
commitid = _('to working directory')
try:
cmdline_message = cmdutil.logmessage(opts)
if cmdline_message:
# pickup the cmdline msg
message = cmdline_message
elif message:
# pickup the patch msg
message = message.strip()
else:
# launch the editor
message = None
ui.debug('message:\n%s\n' % message)
wp = repo.parents()
if opts.get('exact'):
if not nodeid or not p1:
raise util.Abort(_('not a Mercurial patch'))
p1 = repo.lookup(p1)
p2 = repo.lookup(p2 or hex(nullid))
if p1 != wp[0].node():
hg.clean(repo, p1)
repo.dirstate.setparents(p1, p2)
elif p2:
try:
p1 = repo.lookup(p1)
p2 = repo.lookup(p2)
if p1 == wp[0].node():
repo.dirstate.setparents(p1, p2)
except error.RepoError:
pass
if opts.get('exact') or opts.get('import_branch'):
repo.dirstate.setbranch(branch or 'default')
files = {}
try:
patch.patch(tmpname, ui, strip=strip, cwd=repo.root,
files=files, eolmode=None)
finally:
files = patch.updatedir(ui, repo, files,
similarity=sim / 100.0)
if not opts.get('no_commit'):
if opts.get('exact'):
m = None
else:
m = cmdutil.matchfiles(repo, files or [])
n = repo.commit(message, opts.get('user') or user,
opts.get('date') or date, match=m,
editor=cmdutil.commiteditor)
if opts.get('exact'):
if hex(n) != nodeid:
repo.rollback()
raise util.Abort(_('patch is damaged'
' or loses information'))
# Force a dirstate write so that the next transaction
# backups an up-do-date file.
repo.dirstate.write()
if n:
commitid = short(n)
return commitid
finally:
os.unlink(tmpname)
try:
wlock = repo.wlock()
lock = repo.lock()
lastcommit = None
for p in patches:
pf = os.path.join(d, p)
if pf == '-':
ui.status(_("applying patch from stdin\n"))
pf = sys.stdin
else:
ui.status(_("applying %s\n") % p)
pf = url.open(ui, pf)
haspatch = False
for hunk in patch.split(pf):
commitid = tryone(ui, hunk)
if commitid:
haspatch = True
if lastcommit:
ui.status(_('applied %s\n') % lastcommit)
lastcommit = commitid
if not haspatch:
raise util.Abort(_('no diffs found'))
finally:
release(lock, wlock)
def incoming(ui, repo, source="default", **opts):
"""show new changesets found in source
Show new changesets found in the specified path/URL or the default
pull location. These are the changesets that would have been pulled
if a pull at the time you issued this command.
For remote repository, using --bundle avoids downloading the
changesets twice if the incoming is followed by a pull.
See pull for valid source format details.
"""
limit = cmdutil.loglimit(opts)
source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
other = hg.repository(cmdutil.remoteui(repo, opts), source)
ui.status(_('comparing with %s\n') % url.hidepassword(source))
revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
if revs:
revs = [other.lookup(rev) for rev in revs]
common, incoming, rheads = repo.findcommonincoming(other, heads=revs,
force=opts["force"])
if not incoming:
try:
os.unlink(opts["bundle"])
except:
pass
ui.status(_("no changes found\n"))
return 1
cleanup = None
try:
fname = opts["bundle"]
if fname or not other.local():
# create a bundle (uncompressed if other repo is not local)
if revs is None and other.capable('changegroupsubset'):
revs = rheads
if revs is None:
cg = other.changegroup(incoming, "incoming")
else:
cg = other.changegroupsubset(incoming, revs, 'incoming')
bundletype = other.local() and "HG10BZ" or "HG10UN"
fname = cleanup = changegroup.writebundle(cg, fname, bundletype)
# keep written bundle?
if opts["bundle"]:
cleanup = None
if not other.local():
# use the created uncompressed bundlerepo
other = bundlerepo.bundlerepository(ui, repo.root, fname)
o = other.changelog.nodesbetween(incoming, revs)[0]
if opts.get('newest_first'):
o.reverse()
displayer = cmdutil.show_changeset(ui, other, opts)
count = 0
for n in o:
if limit is not None and count >= limit:
break
parents = [p for p in other.changelog.parents(n) if p != nullid]
if opts.get('no_merges') and len(parents) == 2:
continue
count += 1
displayer.show(other[n])
displayer.close()
finally:
if hasattr(other, 'close'):
other.close()
if cleanup:
os.unlink(cleanup)
def init(ui, dest=".", **opts):
"""create a new repository in the given directory
Initialize a new repository in the given directory. If the given
directory does not exist, it will be created.
If no directory is given, the current directory is used.
It is possible to specify an ``ssh://`` URL as the destination.
See 'hg help urls' for more information.
"""
hg.repository(cmdutil.remoteui(ui, opts), dest, create=1)
def locate(ui, repo, *pats, **opts):
"""locate files matching specific patterns
Print files under Mercurial control in the working directory whose
names match the given patterns.
By default, this command searches all directories in the working
directory. To search just the current directory and its
subdirectories, use "--include .".
If no patterns are given to match, this command prints the names
of all files under Mercurial control in the working directory.
If you want to feed the output of this command into the "xargs"
command, use the -0 option to both this command and "xargs". This
will avoid the problem of "xargs" treating single filenames that
contain whitespace as multiple filenames.
"""
end = opts.get('print0') and '\0' or '\n'
rev = opts.get('rev') or None
ret = 1
m = cmdutil.match(repo, pats, opts, default='relglob')
m.bad = lambda x, y: False
for abs in repo[rev].walk(m):
if not rev and abs not in repo.dirstate:
continue
if opts.get('fullpath'):
ui.write(repo.wjoin(abs), end)
else:
ui.write(((pats and m.rel(abs)) or abs), end)
ret = 0
return ret
def log(ui, repo, *pats, **opts):
"""show revision history of entire repository or files
Print the revision history of the specified files or the entire
project.
File history is shown without following rename or copy history of
files. Use -f/--follow with a filename to follow history across
renames and copies. --follow without a filename will only show
ancestors or descendants of the starting revision. --follow-first
only follows the first parent of merge revisions.
If no revision range is specified, the default is tip:0 unless
--follow is set, in which case the working directory parent is
used as the starting revision.
See 'hg help dates' for a list of formats valid for -d/--date.
By default this command prints revision number and changeset id,
tags, non-trivial parents, user, date and time, and a summary for
each commit. When the -v/--verbose switch is used, the list of
changed files and full commit message are shown.
NOTE: log -p/--patch may generate unexpected diff output for merge
changesets, as it will only compare the merge changeset against
its first parent. Also, only files different from BOTH parents
will appear in files:.
"""
matchfn = cmdutil.match(repo, pats, opts)
limit = cmdutil.loglimit(opts)
count = 0
endrev = None
if opts.get('copies') and opts.get('rev'):
endrev = max(cmdutil.revrange(repo, opts.get('rev'))) + 1
df = False
if opts["date"]:
df = util.matchdate(opts["date"])
displayer = cmdutil.show_changeset(ui, repo, opts, True, matchfn)
def prep(ctx, fns):
rev = ctx.rev()
parents = [p for p in repo.changelog.parentrevs(rev)
if p != nullrev]
if opts.get('no_merges') and len(parents) == 2:
return
if opts.get('only_merges') and len(parents) != 2:
return
if opts.get('only_branch') and ctx.branch() not in opts['only_branch']:
return
if df and not df(ctx.date()[0]):
return
if opts['user'] and not [k for k in opts['user'] if k in ctx.user()]:
return
if opts.get('keyword'):
for k in [kw.lower() for kw in opts['keyword']]:
if (k in ctx.user().lower() or
k in ctx.description().lower() or
k in " ".join(ctx.files()).lower()):
break
else:
return
copies = None
if opts.get('copies') and rev:
copies = []
getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
for fn in ctx.files():
rename = getrenamed(fn, rev)
if rename:
copies.append((fn, rename[0]))
displayer.show(ctx, copies=copies)
for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
if count == limit:
break
if displayer.flush(ctx.rev()):
count += 1
displayer.close()
def manifest(ui, repo, node=None, rev=None):
"""output the current or given revision of the project manifest
Print a list of version controlled files for the given revision.
If no revision is given, the first parent of the working directory
is used, or the null revision if no revision is checked out.
With -v, print file permissions, symlink and executable bits.
With --debug, print file revision hashes.
"""
if rev and node:
raise util.Abort(_("please specify just one revision"))
if not node:
node = rev
decor = {'l':'644 @ ', 'x':'755 * ', '':'644 '}
ctx = repo[node]
for f in ctx:
if ui.debugflag:
ui.write("%40s " % hex(ctx.manifest()[f]))
if ui.verbose:
ui.write(decor[ctx.flags(f)])
ui.write("%s\n" % f)
def merge(ui, repo, node=None, **opts):
"""merge working directory with another revision
The current working directory is updated with all changes made in
the requested revision since the last common predecessor revision.
Files that changed between either parent are marked as changed for
the next commit and a commit must be performed before any further
updates to the repository are allowed. The next commit will have
two parents.
If no revision is specified, the working directory's parent is a
head revision, and the current branch contains exactly one other
head, the other head is merged with by default. Otherwise, an
explicit revision with which to merge with must be provided.
"""
if opts.get('rev') and node:
raise util.Abort(_("please specify just one revision"))
if not node:
node = opts.get('rev')
if not node:
branch = repo.changectx(None).branch()
bheads = repo.branchheads(branch)
if len(bheads) > 2:
ui.warn(_("abort: branch '%s' has %d heads - "
"please merge with an explicit rev\n")
% (branch, len(bheads)))
ui.status(_("(run 'hg heads .' to see heads)\n"))
return False
parent = repo.dirstate.parents()[0]
if len(bheads) == 1:
if len(repo.heads()) > 1:
ui.warn(_("abort: branch '%s' has one head - "
"please merge with an explicit rev\n" % branch))
ui.status(_("(run 'hg heads' to see all heads)\n"))
return False
msg = _('there is nothing to merge')
if parent != repo.lookup(repo[None].branch()):
msg = _('%s - use "hg update" instead') % msg
raise util.Abort(msg)
if parent not in bheads:
raise util.Abort(_('working dir not at a head rev - '
'use "hg update" or merge with an explicit rev'))
node = parent == bheads[0] and bheads[-1] or bheads[0]
if opts.get('preview'):
p1 = repo['.']
p2 = repo[node]
common = p1.ancestor(p2)
roots, heads = [common.node()], [p2.node()]
displayer = cmdutil.show_changeset(ui, repo, opts)
for node in repo.changelog.nodesbetween(roots=roots, heads=heads)[0]:
if node not in roots:
displayer.show(repo[node])
displayer.close()
return 0
return hg.merge(repo, node, force=opts.get('force'))
def outgoing(ui, repo, dest=None, **opts):
"""show changesets not found in the destination
Show changesets not found in the specified destination repository
or the default push location. These are the changesets that would
be pushed if a push was requested.
See pull for details of valid destination formats.
"""
limit = cmdutil.loglimit(opts)
dest = ui.expandpath(dest or 'default-push', dest or 'default')
dest, branches = hg.parseurl(dest, opts.get('branch'))
revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
if revs:
revs = [repo.lookup(rev) for rev in revs]
other = hg.repository(cmdutil.remoteui(repo, opts), dest)
ui.status(_('comparing with %s\n') % url.hidepassword(dest))
o = repo.findoutgoing(other, force=opts.get('force'))
if not o:
ui.status(_("no changes found\n"))
return 1
o = repo.changelog.nodesbetween(o, revs)[0]
if opts.get('newest_first'):
o.reverse()
displayer = cmdutil.show_changeset(ui, repo, opts)
count = 0
for n in o:
if limit is not None and count >= limit:
break
parents = [p for p in repo.changelog.parents(n) if p != nullid]
if opts.get('no_merges') and len(parents) == 2:
continue
count += 1
displayer.show(repo[n])
displayer.close()
def parents(ui, repo, file_=None, **opts):
"""show the parents of the working directory or revision
Print the working directory's parent revisions. If a revision is
given via -r/--rev, the parent of that revision will be printed.
If a file argument is given, the revision in which the file was
last changed (before the working directory revision or the
argument to --rev if given) is printed.
"""
rev = opts.get('rev')
if rev:
ctx = repo[rev]
else:
ctx = repo[None]
if file_:
m = cmdutil.match(repo, (file_,), opts)
if m.anypats() or len(m.files()) != 1:
raise util.Abort(_('can only specify an explicit filename'))
file_ = m.files()[0]
filenodes = []
for cp in ctx.parents():
if not cp:
continue
try:
filenodes.append(cp.filenode(file_))
except error.LookupError:
pass
if not filenodes:
raise util.Abort(_("'%s' not found in manifest!") % file_)
fl = repo.file(file_)
p = [repo.lookup(fl.linkrev(fl.rev(fn))) for fn in filenodes]
else:
p = [cp.node() for cp in ctx.parents()]
displayer = cmdutil.show_changeset(ui, repo, opts)
for n in p:
if n != nullid:
displayer.show(repo[n])
displayer.close()
def paths(ui, repo, search=None):
"""show aliases for remote repositories
Show definition of symbolic path name NAME. If no name is given,
show definition of all available names.
Path names are defined in the [paths] section of /etc/mercurial/hgrc
and $HOME/.hgrc. If run inside a repository, .hg/hgrc is used, too.
See 'hg help urls' for more information.
"""
if search:
for name, path in ui.configitems("paths"):
if name == search:
ui.write("%s\n" % url.hidepassword(path))
return
ui.warn(_("not found!\n"))
return 1
else:
for name, path in ui.configitems("paths"):
ui.write("%s = %s\n" % (name, url.hidepassword(path)))
def postincoming(ui, repo, modheads, optupdate, checkout):
if modheads == 0:
return
if optupdate:
if (modheads <= 1 or len(repo.branchheads()) == 1) or checkout:
return hg.update(repo, checkout)
else:
ui.status(_("not updating, since new heads added\n"))
if modheads > 1:
ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
else:
ui.status(_("(run 'hg update' to get a working copy)\n"))
def pull(ui, repo, source="default", **opts):
"""pull changes from the specified source
Pull changes from a remote repository to a local one.
This finds all changes from the repository at the specified path
or URL and adds them to a local repository (the current one unless
-R is specified). By default, this does not update the copy of the
project in the working directory.
Use hg incoming if you want to see what would have been added by a
pull at the time you issued this command. If you then decide to
added those changes to the repository, you should use pull -r X
where X is the last changeset listed by hg incoming.
If SOURCE is omitted, the 'default' path will be used.
See 'hg help urls' for more information.
"""
source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
other = hg.repository(cmdutil.remoteui(repo, opts), source)
ui.status(_('pulling from %s\n') % url.hidepassword(source))
revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
if revs:
try:
revs = [other.lookup(rev) for rev in revs]
except error.CapabilityError:
err = _("Other repository doesn't support revision lookup, "
"so a rev cannot be specified.")
raise util.Abort(err)
modheads = repo.pull(other, heads=revs, force=opts.get('force'))
if checkout:
checkout = str(repo.changelog.rev(other.lookup(checkout)))
return postincoming(ui, repo, modheads, opts.get('update'), checkout)
def push(ui, repo, dest=None, **opts):
"""push changes to the specified destination
Push changes from the local repository to the specified destination.
This is the symmetrical operation for pull. It moves changes from
the current repository to a different one. If the destination is
local this is identical to a pull in that directory from the
current one.
By default, push will refuse to run if it detects the result would
increase the number of remote heads. This generally indicates the
user forgot to pull and merge before pushing.
If -r/--rev is used, the named revision and all its ancestors will
be pushed to the remote repository.
Please see 'hg help urls' for important details about ``ssh://``
URLs. If DESTINATION is omitted, a default path will be used.
"""
dest = ui.expandpath(dest or 'default-push', dest or 'default')
dest, branches = hg.parseurl(dest, opts.get('branch'))
revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
other = hg.repository(cmdutil.remoteui(repo, opts), dest)
ui.status(_('pushing to %s\n') % url.hidepassword(dest))
if revs:
revs = [repo.lookup(rev) for rev in revs]
# push subrepos depth-first for coherent ordering
c = repo['']
subs = c.substate # only repos that are committed
for s in sorted(subs):
c.sub(s).push(opts.get('force'))
r = repo.push(other, opts.get('force'), revs=revs)
return r == 0
def recover(ui, repo):
"""roll back an interrupted transaction
Recover from an interrupted commit or pull.
This command tries to fix the repository status after an
interrupted operation. It should only be necessary when Mercurial
suggests it.
"""
if repo.recover():
return hg.verify(repo)
return 1
def remove(ui, repo, *pats, **opts):
"""remove the specified files on the next commit
Schedule the indicated files for removal from the repository.
This only removes files from the current branch, not from the
entire project history. -A/--after can be used to remove only
files that have already been deleted, -f/--force can be used to
force deletion, and -Af can be used to remove files from the next
revision without deleting them from the working directory.
The following table details the behavior of remove for different
file states (columns) and option combinations (rows). The file
states are Added [A], Clean [C], Modified [M] and Missing [!] (as
reported by hg status). The actions are Warn, Remove (from branch)
and Delete (from disk)::
A C M !
none W RD W R
-f R RD RD R
-A W W W R
-Af R R R R
This command schedules the files to be removed at the next commit.
To undo a remove before that, see hg revert.
"""
after, force = opts.get('after'), opts.get('force')
if not pats and not after:
raise util.Abort(_('no files specified'))
m = cmdutil.match(repo, pats, opts)
s = repo.status(match=m, clean=True)
modified, added, deleted, clean = s[0], s[1], s[3], s[6]
for f in m.files():
if f not in repo.dirstate and not os.path.isdir(m.rel(f)):
ui.warn(_('not removing %s: file is untracked\n') % m.rel(f))
def warn(files, reason):
for f in files:
ui.warn(_('not removing %s: file %s (use -f to force removal)\n')
% (m.rel(f), reason))
if force:
remove, forget = modified + deleted + clean, added
elif after:
remove, forget = deleted, []
warn(modified + added + clean, _('still exists'))
else:
remove, forget = deleted + clean, []
warn(modified, _('is modified'))
warn(added, _('has been marked for add'))
for f in sorted(remove + forget):
if ui.verbose or not m.exact(f):
ui.status(_('removing %s\n') % m.rel(f))
repo.forget(forget)
repo.remove(remove, unlink=not after)
def rename(ui, repo, *pats, **opts):
"""rename files; equivalent of copy + remove
Mark dest as copies of sources; mark sources for deletion. If dest
is a directory, copies are put in that directory. If dest is a
file, there can only be one source.
By default, this command copies the contents of files as they
exist in the working directory. If invoked with -A/--after, the
operation is recorded, but no copying is performed.
This command takes effect at the next commit. To undo a rename
before that, see hg revert.
"""
wlock = repo.wlock(False)
try:
return cmdutil.copy(ui, repo, pats, opts, rename=True)
finally:
wlock.release()
def resolve(ui, repo, *pats, **opts):
"""retry file merges from a merge or update
This command can cleanly retry unresolved file merges using file
revisions preserved from the last update or merge.
If a conflict is resolved manually, please note that the changes
will be overwritten if the merge is retried with resolve. The
-m/--mark switch should be used to mark the file as resolved.
You can specify a set of files to operate on, or use the -a/--all
switch to select all unresolved files.
This command also allows listing resolved files and manually
indicating whether or not files are resolved. All files must be
marked as resolved before a commit is permitted.
The codes used to show the status of files are::
U = unresolved
R = resolved
"""
all, mark, unmark, show, nostatus = \
[opts.get(o) for o in 'all mark unmark list no_status'.split()]
if (show and (mark or unmark)) or (mark and unmark):
raise util.Abort(_("too many options specified"))
if pats and all:
raise util.Abort(_("can't specify --all and patterns"))
if not (all or pats or show or mark or unmark):
raise util.Abort(_('no files or directories specified; '
'use --all to remerge all files'))
ms = merge_.mergestate(repo)
m = cmdutil.match(repo, pats, opts)
for f in ms:
if m(f):
if show:
if nostatus:
ui.write("%s\n" % f)
else:
ui.write("%s %s\n" % (ms[f].upper(), f))
elif mark:
ms.mark(f, "r")
elif unmark:
ms.mark(f, "u")
else:
wctx = repo[None]
mctx = wctx.parents()[-1]
# backup pre-resolve (merge uses .orig for its own purposes)
a = repo.wjoin(f)
util.copyfile(a, a + ".resolve")
# resolve file
ms.resolve(f, wctx, mctx)
# replace filemerge's .orig file with our resolve file
util.rename(a + ".resolve", a + ".orig")
def revert(ui, repo, *pats, **opts):
"""restore individual files or directories to an earlier state
(Use update -r to check out earlier revisions, revert does not
change the working directory parents.)
With no revision specified, revert the named files or directories
to the contents they had in the parent of the working directory.
This restores the contents of the affected files to an unmodified
state and unschedules adds, removes, copies, and renames. If the
working directory has two parents, you must explicitly specify a
revision.
Using the -r/--rev option, revert the given files or directories
to their contents as of a specific revision. This can be helpful
to "roll back" some or all of an earlier change. See 'hg help
dates' for a list of formats valid for -d/--date.
Revert modifies the working directory. It does not commit any
changes, or change the parent of the working directory. If you
revert to a revision other than the parent of the working
directory, the reverted files will thus appear modified
afterwards.
If a file has been deleted, it is restored. If the executable mode
of a file was changed, it is reset.
If names are given, all files matching the names are reverted.
If no arguments are given, no files are reverted.
Modified files are saved with a .orig suffix before reverting.
To disable these backups, use --no-backup.
"""
if opts["date"]:
if opts["rev"]:
raise util.Abort(_("you can't specify a revision and a date"))
opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
if not pats and not opts.get('all'):
raise util.Abort(_('no files or directories specified; '
'use --all to revert the whole repo'))
parent, p2 = repo.dirstate.parents()
if not opts.get('rev') and p2 != nullid:
raise util.Abort(_('uncommitted merge - please provide a '
'specific revision'))
ctx = repo[opts.get('rev')]
node = ctx.node()
mf = ctx.manifest()
if node == parent:
pmf = mf
else:
pmf = None
# need all matching names in dirstate and manifest of target rev,
# so have to walk both. do not print errors if files exist in one
# but not other.
names = {}
wlock = repo.wlock()
try:
# walk dirstate.
m = cmdutil.match(repo, pats, opts)
m.bad = lambda x, y: False
for abs in repo.walk(m):
names[abs] = m.rel(abs), m.exact(abs)
# walk target manifest.
def badfn(path, msg):
if path in names:
return
path_ = path + '/'
for f in names:
if f.startswith(path_):
return
ui.warn("%s: %s\n" % (m.rel(path), msg))
m = cmdutil.match(repo, pats, opts)
m.bad = badfn
for abs in repo[node].walk(m):
if abs not in names:
names[abs] = m.rel(abs), m.exact(abs)
m = cmdutil.matchfiles(repo, names)
changes = repo.status(match=m)[:4]
modified, added, removed, deleted = map(set, changes)
# if f is a rename, also revert the source
cwd = repo.getcwd()
for f in added:
src = repo.dirstate.copied(f)
if src and src not in names and repo.dirstate[src] == 'r':
removed.add(src)
names[src] = (repo.pathto(src, cwd), True)
def removeforget(abs):
if repo.dirstate[abs] == 'a':
return _('forgetting %s\n')
return _('removing %s\n')
revert = ([], _('reverting %s\n'))
add = ([], _('adding %s\n'))
remove = ([], removeforget)
undelete = ([], _('undeleting %s\n'))
disptable = (
# dispatch table:
# file state
# action if in target manifest
# action if not in target manifest
# make backup if in target manifest
# make backup if not in target manifest
(modified, revert, remove, True, True),
(added, revert, remove, True, False),
(removed, undelete, None, False, False),
(deleted, revert, remove, False, False),
)
for abs, (rel, exact) in sorted(names.items()):
mfentry = mf.get(abs)
target = repo.wjoin(abs)
def handle(xlist, dobackup):
xlist[0].append(abs)
if dobackup and not opts.get('no_backup') and util.lexists(target):
bakname = "%s.orig" % rel
ui.note(_('saving current version of %s as %s\n') %
(rel, bakname))
if not opts.get('dry_run'):
util.copyfile(target, bakname)
if ui.verbose or not exact:
msg = xlist[1]
if not isinstance(msg, basestring):
msg = msg(abs)
ui.status(msg % rel)
for table, hitlist, misslist, backuphit, backupmiss in disptable:
if abs not in table:
continue
# file has changed in dirstate
if mfentry:
handle(hitlist, backuphit)
elif misslist is not None:
handle(misslist, backupmiss)
break
else:
if abs not in repo.dirstate:
if mfentry:
handle(add, True)
elif exact:
ui.warn(_('file not managed: %s\n') % rel)
continue
# file has not changed in dirstate
if node == parent:
if exact:
ui.warn(_('no changes needed to %s\n') % rel)
continue
if pmf is None:
# only need parent manifest in this unlikely case,
# so do not read by default
pmf = repo[parent].manifest()
if abs in pmf:
if mfentry:
# if version of file is same in parent and target
# manifests, do nothing
if (pmf[abs] != mfentry or
pmf.flags(abs) != mf.flags(abs)):
handle(revert, False)
else:
handle(remove, False)
if not opts.get('dry_run'):
def checkout(f):
fc = ctx[f]
repo.wwrite(f, fc.data(), fc.flags())
audit_path = util.path_auditor(repo.root)
for f in remove[0]:
if repo.dirstate[f] == 'a':
repo.dirstate.forget(f)
continue
audit_path(f)
try:
util.unlink(repo.wjoin(f))
except OSError:
pass
repo.dirstate.remove(f)
normal = None
if node == parent:
# We're reverting to our parent. If possible, we'd like status
# to report the file as clean. We have to use normallookup for
# merges to avoid losing information about merged/dirty files.
if p2 != nullid:
normal = repo.dirstate.normallookup
else:
normal = repo.dirstate.normal
for f in revert[0]:
checkout(f)
if normal:
normal(f)
for f in add[0]:
checkout(f)
repo.dirstate.add(f)
normal = repo.dirstate.normallookup
if node == parent and p2 == nullid:
normal = repo.dirstate.normal
for f in undelete[0]:
checkout(f)
normal(f)
finally:
wlock.release()
def rollback(ui, repo):
"""roll back the last transaction
This command should be used with care. There is only one level of
rollback, and there is no way to undo a rollback. It will also
restore the dirstate at the time of the last transaction, losing
any dirstate changes since that time. This command does not alter
the working directory.
Transactions are used to encapsulate the effects of all commands
that create new changesets or propagate existing changesets into a
repository. For example, the following commands are transactional,
and their effects can be rolled back:
- commit
- import
- pull
- push (with this repository as the destination)
- unbundle
This command is not intended for use on public repositories. Once
changes are visible for pull by other users, rolling a transaction
back locally is ineffective (someone else may already have pulled
the changes). Furthermore, a race is possible with readers of the
repository; for example an in-progress pull from the repository
may fail if a rollback is performed.
"""
repo.rollback()
def root(ui, repo):
"""print the root (top) of the current working directory
Print the root directory of the current repository.
"""
ui.write(repo.root + "\n")
def serve(ui, repo, **opts):
"""export the repository via HTTP
Start a local HTTP repository browser and pull server.
By default, the server logs accesses to stdout and errors to
stderr. Use the -A/--accesslog and -E/--errorlog options to log to
files.
"""
if opts["stdio"]:
if repo is None:
raise error.RepoError(_("There is no Mercurial repository here"
" (.hg not found)"))
s = sshserver.sshserver(ui, repo)
s.serve_forever()
baseui = repo and repo.baseui or ui
optlist = ("name templates style address port prefix ipv6"
" accesslog errorlog webdir_conf certificate encoding")
for o in optlist.split():
if opts.get(o, None):
baseui.setconfig("web", o, str(opts[o]))
if (repo is not None) and (repo.ui != baseui):
repo.ui.setconfig("web", o, str(opts[o]))
if repo is None and not ui.config("web", "webdir_conf"):
raise error.RepoError(_("There is no Mercurial repository here"
" (.hg not found)"))
class service(object):
def init(self):
util.set_signal_handler()
self.httpd = server.create_server(baseui, repo)
if not ui.verbose:
return
if self.httpd.prefix:
prefix = self.httpd.prefix.strip('/') + '/'
else:
prefix = ''
port = ':%d' % self.httpd.port
if port == ':80':
port = ''
bindaddr = self.httpd.addr
if bindaddr == '0.0.0.0':
bindaddr = '*'
elif ':' in bindaddr: # IPv6
bindaddr = '[%s]' % bindaddr
fqaddr = self.httpd.fqaddr
if ':' in fqaddr:
fqaddr = '[%s]' % fqaddr
ui.status(_('listening at http://%s%s/%s (bound to %s:%d)\n') %
(fqaddr, port, prefix, bindaddr, self.httpd.port))
def run(self):
self.httpd.serve_forever()
service = service()
cmdutil.service(opts, initfn=service.init, runfn=service.run)
def status(ui, repo, *pats, **opts):
"""show changed files in the working directory
Show status of files in the repository. If names are given, only
files that match are shown. Files that are clean or ignored or
the source of a copy/move operation, are not listed unless
-c/--clean, -i/--ignored, -C/--copies or -A/--all are given.
Unless options described with "show only ..." are given, the
options -mardu are used.
Option -q/--quiet hides untracked (unknown and ignored) files
unless explicitly requested with -u/--unknown or -i/--ignored.
NOTE: status may appear to disagree with diff if permissions have
changed or a merge has occurred. The standard diff format does not
report permission changes and diff only reports changes relative
to one merge parent.
If one revision is given, it is used as the base revision.
If two revisions are given, the differences between them are
shown. The --change option can also be used as a shortcut to list
the changed files of a revision from its first parent.
The codes used to show the status of files are::
M = modified
A = added
R = removed
C = clean
! = missing (deleted by non-hg command, but still tracked)
? = not tracked
I = ignored
= origin of the previous file listed as A (added)
"""
revs = opts.get('rev')
change = opts.get('change')
if revs and change:
msg = _('cannot specify --rev and --change at the same time')
raise util.Abort(msg)
elif change:
node2 = repo.lookup(change)
node1 = repo[node2].parents()[0].node()
else:
node1, node2 = cmdutil.revpair(repo, revs)
cwd = (pats and repo.getcwd()) or ''
end = opts.get('print0') and '\0' or '\n'
copy = {}
states = 'modified added removed deleted unknown ignored clean'.split()
show = [k for k in states if opts.get(k)]
if opts.get('all'):
show += ui.quiet and (states[:4] + ['clean']) or states
if not show:
show = ui.quiet and states[:4] or states[:5]
stat = repo.status(node1, node2, cmdutil.match(repo, pats, opts),
'ignored' in show, 'clean' in show, 'unknown' in show)
changestates = zip(states, 'MAR!?IC', stat)
if (opts.get('all') or opts.get('copies')) and not opts.get('no_status'):
ctxn = repo[nullid]
ctx1 = repo[node1]
ctx2 = repo[node2]
added = stat[1]
if node2 is None:
added = stat[0] + stat[1] # merged?
for k, v in copies.copies(repo, ctx1, ctx2, ctxn)[0].iteritems():
if k in added:
copy[k] = v
elif v in added:
copy[v] = k
for state, char, files in changestates:
if state in show:
format = "%s %%s%s" % (char, end)
if opts.get('no_status'):
format = "%%s%s" % end
for f in files:
ui.write(format % repo.pathto(f, cwd))
if f in copy:
ui.write(' %s%s' % (repo.pathto(copy[f], cwd), end))
def summary(ui, repo, **opts):
"""summarize working directory state
This generates a brief summary of the working directory state,
including parents, branch, commit status, and available updates.
With the --remote option, this will check the default paths for
incoming and outgoing changes. This can be time-consuming.
"""
ctx = repo[None]
parents = ctx.parents()
pnode = parents[0].node()
tags = repo.tags()
for p in parents:
t = ' '.join([t for t in tags if tags[t] == p.node()])
if p.rev() == -1:
if not len(repo):
t += _(' (empty repository)')
else:
t += _(' (no revision checked out)')
ui.write(_('parent: %d:%s %s\n') % (p.rev(), str(p), t))
if p.description():
ui.status(' ' + p.description().splitlines()[0].strip() + '\n')
branch = ctx.branch()
bheads = repo.branchheads(branch)
m = _('branch: %s\n') % branch
if branch != 'default':
ui.write(m)
else:
ui.status(m)
st = list(repo.status(unknown=True))[:6]
ms = merge_.mergestate(repo)
st.append([f for f in ms if ms[f] == 'u'])
labels = [_('%d modified'), _('%d added'), _('%d removed'),
_('%d deleted'), _('%d unknown'), _('%d ignored'),
_('%d unresolved')]
t = []
for s, l in zip(st, labels):
if s:
t.append(l % len(s))
t = ', '.join(t)
cleanworkdir = False
if len(parents) > 1:
t += _(' (merge)')
elif branch != parents[0].branch():
t += _(' (new branch)')
elif (not st[0] and not st[1] and not st[2]):
t += _(' (clean)')
cleanworkdir = True
elif pnode not in bheads:
t += _(' (new branch head)')
if cleanworkdir:
ui.status(_('commit: %s\n') % t.strip())
else:
ui.write(_('commit: %s\n') % t.strip())
# all ancestors of branch heads - all ancestors of parent = new csets
new = [0] * len(repo)
cl = repo.changelog
for a in [cl.rev(n) for n in bheads]:
new[a] = 1
for a in cl.ancestors(*[cl.rev(n) for n in bheads]):
new[a] = 1
for a in [p.rev() for p in parents]:
if a >= 0:
new[a] = 0
for a in cl.ancestors(*[p.rev() for p in parents]):
new[a] = 0
new = sum(new)
if new == 0:
ui.status(_('update: (current)\n'))
elif pnode not in bheads:
ui.write(_('update: %d new changesets (update)\n') % new)
else:
ui.write(_('update: %d new changesets, %d branch heads (merge)\n') %
(new, len(bheads)))
if opts.get('remote'):
t = []
source, branches = hg.parseurl(ui.expandpath('default'))
other = hg.repository(cmdutil.remoteui(repo, {}), source)
revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
ui.debug('comparing with %s\n' % url.hidepassword(source))
repo.ui.pushbuffer()
common, incoming, rheads = repo.findcommonincoming(other)
repo.ui.popbuffer()
if incoming:
t.append(_('1 or more incoming'))
dest, branches = hg.parseurl(ui.expandpath('default-push', 'default'))
revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
other = hg.repository(cmdutil.remoteui(repo, {}), dest)
ui.debug('comparing with %s\n' % url.hidepassword(dest))
repo.ui.pushbuffer()
o = repo.findoutgoing(other)
repo.ui.popbuffer()
o = repo.changelog.nodesbetween(o, None)[0]
if o:
t.append(_('%d outgoing') % len(o))
if t:
ui.write(_('remote: %s\n') % (', '.join(t)))
else:
ui.status(_('remote: (synced)\n'))
def tag(ui, repo, name1, *names, **opts):
"""add one or more tags for the current or given revision
Name a particular revision using <name>.
Tags are used to name particular revisions of the repository and are
very useful to compare different revisions, to go back to significant
earlier versions or to mark branch points as releases, etc.
If no revision is given, the parent of the working directory is
used, or tip if no revision is checked out.
To facilitate version control, distribution, and merging of tags,
they are stored as a file named ".hgtags" which is managed
similarly to other project files and can be hand-edited if
necessary. The file '.hg/localtags' is used for local tags (not
shared among repositories).
See 'hg help dates' for a list of formats valid for -d/--date.
"""
rev_ = "."
names = (name1,) + names
if len(names) != len(set(names)):
raise util.Abort(_('tag names must be unique'))
for n in names:
if n in ['tip', '.', 'null']:
raise util.Abort(_('the name \'%s\' is reserved') % n)
if opts.get('rev') and opts.get('remove'):
raise util.Abort(_("--rev and --remove are incompatible"))
if opts.get('rev'):
rev_ = opts['rev']
message = opts.get('message')
if opts.get('remove'):
expectedtype = opts.get('local') and 'local' or 'global'
for n in names:
if not repo.tagtype(n):
raise util.Abort(_('tag \'%s\' does not exist') % n)
if repo.tagtype(n) != expectedtype:
if expectedtype == 'global':
raise util.Abort(_('tag \'%s\' is not a global tag') % n)
else:
raise util.Abort(_('tag \'%s\' is not a local tag') % n)
rev_ = nullid
if not message:
# we don't translate commit messages
message = 'Removed tag %s' % ', '.join(names)
elif not opts.get('force'):
for n in names:
if n in repo.tags():
raise util.Abort(_('tag \'%s\' already exists '
'(use -f to force)') % n)
if not rev_ and repo.dirstate.parents()[1] != nullid:
raise util.Abort(_('uncommitted merge - please provide a '
'specific revision'))
r = repo[rev_].node()
if not message:
# we don't translate commit messages
message = ('Added tag %s for changeset %s' %
(', '.join(names), short(r)))
date = opts.get('date')
if date:
date = util.parsedate(date)
repo.tag(names, r, message, opts.get('local'), opts.get('user'), date)
def tags(ui, repo):
"""list repository tags
This lists both regular and local tags. When the -v/--verbose
switch is used, a third column "local" is printed for local tags.
"""
hexfunc = ui.debugflag and hex or short
tagtype = ""
for t, n in reversed(repo.tagslist()):
if ui.quiet:
ui.write("%s\n" % t)
continue
try:
hn = hexfunc(n)
r = "%5d:%s" % (repo.changelog.rev(n), hn)
except error.LookupError:
r = " ?:%s" % hn
else:
spaces = " " * (30 - encoding.colwidth(t))
if ui.verbose:
if repo.tagtype(t) == 'local':
tagtype = " local"
else:
tagtype = ""
ui.write("%s%s %s%s\n" % (t, spaces, r, tagtype))
def tip(ui, repo, **opts):
"""show the tip revision
The tip revision (usually just called the tip) is the changeset
most recently added to the repository (and therefore the most
recently changed head).
If you have just made a commit, that commit will be the tip. If
you have just pulled changes from another repository, the tip of
that repository becomes the current tip. The "tip" tag is special
and cannot be renamed or assigned to a different changeset.
"""
displayer = cmdutil.show_changeset(ui, repo, opts)
displayer.show(repo[len(repo) - 1])
displayer.close()
def unbundle(ui, repo, fname1, *fnames, **opts):
"""apply one or more changegroup files
Apply one or more compressed changegroup files generated by the
bundle command.
"""
fnames = (fname1,) + fnames
lock = repo.lock()
try:
for fname in fnames:
f = url.open(ui, fname)
gen = changegroup.readbundle(f, fname)
modheads = repo.addchangegroup(gen, 'unbundle', 'bundle:' + fname)
finally:
lock.release()
return postincoming(ui, repo, modheads, opts.get('update'), None)
def update(ui, repo, node=None, rev=None, clean=False, date=None, check=False):
"""update working directory
Update the repository's working directory to the specified
changeset.
If no changeset is specified, attempt to update to the head of the
current branch. If this head is a descendant of the working
directory's parent, update to it, otherwise abort.
The following rules apply when the working directory contains
uncommitted changes:
1. If neither -c/--check nor -C/--clean is specified, and if
the requested changeset is an ancestor or descendant of
the working directory's parent, the uncommitted changes
are merged into the requested changeset and the merged
result is left uncommitted. If the requested changeset is
not an ancestor or descendant (that is, it is on another
branch), the update is aborted and the uncommitted changes
are preserved.
2. With the -c/--check option, the update is aborted and the
uncommitted changes are preserved.
3. With the -C/--clean option, uncommitted changes are discarded and
the working directory is updated to the requested changeset.
Use null as the changeset to remove the working directory (like 'hg
clone -U').
If you want to update just one file to an older changeset, use 'hg revert'.
See 'hg help dates' for a list of formats valid for -d/--date.
"""
if rev and node:
raise util.Abort(_("please specify just one revision"))
if not rev:
rev = node
if check and clean:
raise util.Abort(_("cannot specify both -c/--check and -C/--clean"))
if check:
# we could use dirty() but we can ignore merge and branch trivia
c = repo[None]
if c.modified() or c.added() or c.removed():
raise util.Abort(_("uncommitted local changes"))
if date:
if rev:
raise util.Abort(_("you can't specify a revision and a date"))
rev = cmdutil.finddate(ui, repo, date)
if clean or check:
return hg.clean(repo, rev)
else:
return hg.update(repo, rev)
def verify(ui, repo):
"""verify the integrity of the repository
Verify the integrity of the current repository.
This will perform an extensive check of the repository's
integrity, validating the hashes and checksums of each entry in
the changelog, manifest, and tracked files, as well as the
integrity of their crosslinks and indices.
"""
return hg.verify(repo)
def version_(ui):
"""output version and copyright information"""
ui.write(_("Mercurial Distributed SCM (version %s)\n")
% util.version())
ui.status(_(
"\nCopyright (C) 2005-2010 Matt Mackall <mpm@selenic.com> and others\n"
"This is free software; see the source for copying conditions. "
"There is NO\nwarranty; "
"not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
))
# Command options and aliases are listed here, alphabetically
globalopts = [
('R', 'repository', '',
_('repository root directory or name of overlay bundle file')),
('', 'cwd', '', _('change working directory')),
('y', 'noninteractive', None,
_('do not prompt, assume \'yes\' for any required answers')),
('q', 'quiet', None, _('suppress output')),
('v', 'verbose', None, _('enable additional output')),
('', 'config', [], _('set/override config option')),
('', 'debug', None, _('enable debugging output')),
('', 'debugger', None, _('start debugger')),
('', 'encoding', encoding.encoding, _('set the charset encoding')),
('', 'encodingmode', encoding.encodingmode,
_('set the charset encoding mode')),
('', 'traceback', None, _('always print a traceback on exception')),
('', 'time', None, _('time how long the command takes')),
('', 'profile', None, _('print command execution profile')),
('', 'version', None, _('output version information and exit')),
('h', 'help', None, _('display help and exit')),
]
dryrunopts = [('n', 'dry-run', None,
_('do not perform actions, just print output'))]
remoteopts = [
('e', 'ssh', '', _('specify ssh command to use')),
('', 'remotecmd', '', _('specify hg command to run on the remote side')),
]
walkopts = [
('I', 'include', [], _('include names matching the given patterns')),
('X', 'exclude', [], _('exclude names matching the given patterns')),
]
commitopts = [
('m', 'message', '', _('use <text> as commit message')),
('l', 'logfile', '', _('read commit message from <file>')),
]
commitopts2 = [
('d', 'date', '', _('record datecode as commit date')),
('u', 'user', '', _('record the specified user as committer')),
]
templateopts = [
('', 'style', '', _('display using template map file')),
('', 'template', '', _('display with template')),
]
logopts = [
('p', 'patch', None, _('show patch')),
('g', 'git', None, _('use git extended diff format')),
('l', 'limit', '', _('limit number of changes displayed')),
('M', 'no-merges', None, _('do not show merges')),
] + templateopts
diffopts = [
('a', 'text', None, _('treat all files as text')),
('g', 'git', None, _('use git extended diff format')),
('', 'nodates', None, _('omit dates from diff headers'))
]
diffopts2 = [
('p', 'show-function', None, _('show which function each change is in')),
('', 'reverse', None, _('produce a diff that undoes the changes')),
('w', 'ignore-all-space', None,
_('ignore white space when comparing lines')),
('b', 'ignore-space-change', None,
_('ignore changes in the amount of white space')),
('B', 'ignore-blank-lines', None,
_('ignore changes whose lines are all blank')),
('U', 'unified', '', _('number of lines of context to show')),
('', 'stat', None, _('output diffstat-style summary of changes')),
]
similarityopts = [
('s', 'similarity', '',
_('guess renamed files by similarity (0<=s<=100)'))
]
table = {
"^add": (add, walkopts + dryrunopts, _('[OPTION]... [FILE]...')),
"addremove":
(addremove, similarityopts + walkopts + dryrunopts,
_('[OPTION]... [FILE]...')),
"^annotate|blame":
(annotate,
[('r', 'rev', '', _('annotate the specified revision')),
('', 'follow', None, _('follow copies and renames (DEPRECATED)')),
('', 'no-follow', None, _("don't follow copies and renames")),
('a', 'text', None, _('treat all files as text')),
('u', 'user', None, _('list the author (long with -v)')),
('f', 'file', None, _('list the filename')),
('d', 'date', None, _('list the date (short with -q)')),
('n', 'number', None, _('list the revision number (default)')),
('c', 'changeset', None, _('list the changeset')),
('l', 'line-number', None,
_('show line number at the first appearance'))
] + walkopts,
_('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...')),
"archive":
(archive,
[('', 'no-decode', None, _('do not pass files through decoders')),
('p', 'prefix', '', _('directory prefix for files in archive')),
('r', 'rev', '', _('revision to distribute')),
('t', 'type', '', _('type of distribution to create')),
] + walkopts,
_('[OPTION]... DEST')),
"backout":
(backout,
[('', 'merge', None,
_('merge with old dirstate parent after backout')),
('', 'parent', '', _('parent to choose when backing out merge')),
('r', 'rev', '', _('revision to backout')),
] + walkopts + commitopts + commitopts2,
_('[OPTION]... [-r] REV')),
"bisect":
(bisect,
[('r', 'reset', False, _('reset bisect state')),
('g', 'good', False, _('mark changeset good')),
('b', 'bad', False, _('mark changeset bad')),
('s', 'skip', False, _('skip testing changeset')),
('c', 'command', '', _('use command to check changeset state')),
('U', 'noupdate', False, _('do not update to target'))],
_("[-gbsr] [-U] [-c CMD] [REV]")),
"branch":
(branch,
[('f', 'force', None,
_('set branch name even if it shadows an existing branch')),
('C', 'clean', None, _('reset branch name to parent branch name'))],
_('[-fC] [NAME]')),
"branches":
(branches,
[('a', 'active', False,
_('show only branches that have unmerged heads')),
('c', 'closed', False,
_('show normal and closed branches'))],
_('[-ac]')),
"bundle":
(bundle,
[('f', 'force', None,
_('run even when the destination is unrelated')),
('r', 'rev', [],
_('a changeset intended to be added to the destination')),
('b', 'branch', [],
_('a specific branch you would like to bundle')),
('', 'base', [],
_('a base changeset assumed to be available at the destination')),
('a', 'all', None, _('bundle all changesets in the repository')),
('t', 'type', 'bzip2', _('bundle compression type to use')),
] + remoteopts,
_('[-f] [-t TYPE] [-a] [-r REV]... [--base REV]... FILE [DEST]')),
"cat":
(cat,
[('o', 'output', '', _('print output to file with formatted name')),
('r', 'rev', '', _('print the given revision')),
('', 'decode', None, _('apply any matching decode filter')),
] + walkopts,
_('[OPTION]... FILE...')),
"^clone":
(clone,
[('U', 'noupdate', None,
_('the clone will include an empty working copy (only a repository)')),
('u', 'updaterev', '',
_('revision, tag or branch to check out')),
('r', 'rev', [],
_('include the specified changeset')),
('b', 'branch', [],
_('clone only the specified branch')),
('', 'pull', None, _('use pull protocol to copy metadata')),
('', 'uncompressed', None,
_('use uncompressed transfer (fast over LAN)')),
] + remoteopts,
_('[OPTION]... SOURCE [DEST]')),
"^commit|ci":
(commit,
[('A', 'addremove', None,
_('mark new/missing files as added/removed before committing')),
('', 'close-branch', None,
_('mark a branch as closed, hiding it from the branch list')),
] + walkopts + commitopts + commitopts2,
_('[OPTION]... [FILE]...')),
"copy|cp":
(copy,
[('A', 'after', None, _('record a copy that has already occurred')),
('f', 'force', None,
_('forcibly copy over an existing managed file')),
] + walkopts + dryrunopts,
_('[OPTION]... [SOURCE]... DEST')),
"debugancestor": (debugancestor, [], _('[INDEX] REV1 REV2')),
"debugcheckstate": (debugcheckstate, [], ''),
"debugcommands": (debugcommands, [], _('[COMMAND]')),
"debugcomplete":
(debugcomplete,
[('o', 'options', None, _('show the command options'))],
_('[-o] CMD')),
"debugdate":
(debugdate,
[('e', 'extended', None, _('try extended date formats'))],
_('[-e] DATE [RANGE]')),
"debugdata": (debugdata, [], _('FILE REV')),
"debugfsinfo": (debugfsinfo, [], _('[PATH]')),
"debugindex": (debugindex, [], _('FILE')),
"debugindexdot": (debugindexdot, [], _('FILE')),
"debuginstall": (debuginstall, [], ''),
"debugrebuildstate":
(debugrebuildstate,
[('r', 'rev', '', _('revision to rebuild to'))],
_('[-r REV] [REV]')),
"debugrename":
(debugrename,
[('r', 'rev', '', _('revision to debug'))],
_('[-r REV] FILE')),
"debugsetparents":
(debugsetparents, [], _('REV1 [REV2]')),
"debugstate":
(debugstate,
[('', 'nodates', None, _('do not display the saved mtime'))],
_('[OPTION]...')),
"debugsub":
(debugsub,
[('r', 'rev', '', _('revision to check'))],
_('[-r REV] [REV]')),
"debugwalk": (debugwalk, walkopts, _('[OPTION]... [FILE]...')),
"^diff":
(diff,
[('r', 'rev', [], _('revision')),
('c', 'change', '', _('change made by revision'))
] + diffopts + diffopts2 + walkopts,
_('[OPTION]... [-r REV1 [-r REV2]] [FILE]...')),
"^export":
(export,
[('o', 'output', '', _('print output to file with formatted name')),
('', 'switch-parent', None, _('diff against the second parent')),
('r', 'rev', [], _('revisions to export')),
] + diffopts,
_('[OPTION]... [-o OUTFILESPEC] REV...')),
"^forget":
(forget,
[] + walkopts,
_('[OPTION]... FILE...')),
"grep":
(grep,
[('0', 'print0', None, _('end fields with NUL')),
('', 'all', None, _('print all revisions that match')),
('f', 'follow', None,
_('follow changeset history,'
' or file history across copies and renames')),
('i', 'ignore-case', None, _('ignore case when matching')),
('l', 'files-with-matches', None,
_('print only filenames and revisions that match')),
('n', 'line-number', None, _('print matching line numbers')),
('r', 'rev', [], _('search in given revision range')),
('u', 'user', None, _('list the author (long with -v)')),
('d', 'date', None, _('list the date (short with -q)')),
] + walkopts,
_('[OPTION]... PATTERN [FILE]...')),
"heads":
(heads,
[('r', 'rev', '', _('show only heads which are descendants of REV')),
('t', 'topo', False, _('show topological heads only')),
('a', 'active', False,
_('show active branchheads only [DEPRECATED]')),
('c', 'closed', False,
_('show normal and closed branch heads')),
] + templateopts,
_('[-ac] [-r STARTREV] [REV]...')),
"help": (help_, [], _('[TOPIC]')),
"identify|id":
(identify,
[('r', 'rev', '', _('identify the specified revision')),
('n', 'num', None, _('show local revision number')),
('i', 'id', None, _('show global revision id')),
('b', 'branch', None, _('show branch')),
('t', 'tags', None, _('show tags'))],
_('[-nibt] [-r REV] [SOURCE]')),
"import|patch":
(import_,
[('p', 'strip', 1,
_('directory strip option for patch. This has the same '
'meaning as the corresponding patch option')),
('b', 'base', '', _('base path')),
('f', 'force', None,
_('skip check for outstanding uncommitted changes')),
('', 'no-commit', None,
_("don't commit, just update the working directory")),
('', 'exact', None,
_('apply patch to the nodes from which it was generated')),
('', 'import-branch', None,
_('use any branch information in patch (implied by --exact)'))] +
commitopts + commitopts2 + similarityopts,
_('[OPTION]... PATCH...')),
"incoming|in":
(incoming,
[('f', 'force', None,
_('run even if remote repository is unrelated')),
('n', 'newest-first', None, _('show newest record first')),
('', 'bundle', '', _('file to store the bundles into')),
('r', 'rev', [],
_('a remote changeset intended to be added')),
('b', 'branch', [],
_('a specific branch you would like to pull')),
] + logopts + remoteopts,
_('[-p] [-n] [-M] [-f] [-r REV]...'
' [--bundle FILENAME] [SOURCE]')),
"^init":
(init,
remoteopts,
_('[-e CMD] [--remotecmd CMD] [DEST]')),
"locate":
(locate,
[('r', 'rev', '', _('search the repository as it is in REV')),
('0', 'print0', None,
_('end filenames with NUL, for use with xargs')),
('f', 'fullpath', None,
_('print complete paths from the filesystem root')),
] + walkopts,
_('[OPTION]... [PATTERN]...')),
"^log|history":
(log,
[('f', 'follow', None,
_('follow changeset history,'
' or file history across copies and renames')),
('', 'follow-first', None,
_('only follow the first parent of merge changesets')),
('d', 'date', '', _('show revisions matching date spec')),
('C', 'copies', None, _('show copied files')),
('k', 'keyword', [], _('do case-insensitive search for a keyword')),
('r', 'rev', [], _('show the specified revision or range')),
('', 'removed', None, _('include revisions where files were removed')),
('m', 'only-merges', None, _('show only merges')),
('u', 'user', [], _('revisions committed by user')),
('b', 'only-branch', [],
_('show only changesets within the given named branch')),
('P', 'prune', [],
_('do not display revision or any of its ancestors')),
] + logopts + walkopts,
_('[OPTION]... [FILE]')),
"manifest":
(manifest,
[('r', 'rev', '', _('revision to display'))],
_('[-r REV]')),
"^merge":
(merge,
[('f', 'force', None, _('force a merge with outstanding changes')),
('r', 'rev', '', _('revision to merge')),
('P', 'preview', None,
_('review revisions to merge (no merge is performed)'))],
_('[-P] [-f] [[-r] REV]')),
"outgoing|out":
(outgoing,
[('f', 'force', None,
_('run even when the destination is unrelated')),
('r', 'rev', [],
_('a changeset intended to be included in the destination')),
('n', 'newest-first', None, _('show newest record first')),
('b', 'branch', [],
_('a specific branch you would like to push')),
] + logopts + remoteopts,
_('[-M] [-p] [-n] [-f] [-r REV]... [DEST]')),
"parents":
(parents,
[('r', 'rev', '', _('show parents of the specified revision')),
] + templateopts,
_('[-r REV] [FILE]')),
"paths": (paths, [], _('[NAME]')),
"^pull":
(pull,
[('u', 'update', None,
_('update to new branch head if changesets were pulled')),
('f', 'force', None,
_('run even when remote repository is unrelated')),
('r', 'rev', [],
_('a remote changeset intended to be added')),
('b', 'branch', [],
_('a specific branch you would like to pull')),
] + remoteopts,
_('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]')),
"^push":
(push,
[('f', 'force', None, _('force push')),
('r', 'rev', [],
_('a changeset intended to be included in the destination')),
('b', 'branch', [],
_('a specific branch you would like to push')),
] + remoteopts,
_('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]')),
"recover": (recover, []),
"^remove|rm":
(remove,
[('A', 'after', None, _('record delete for missing files')),
('f', 'force', None,
_('remove (and delete) file even if added or modified')),
] + walkopts,
_('[OPTION]... FILE...')),
"rename|mv":
(rename,
[('A', 'after', None, _('record a rename that has already occurred')),
('f', 'force', None,
_('forcibly copy over an existing managed file')),
] + walkopts + dryrunopts,
_('[OPTION]... SOURCE... DEST')),
"resolve":
(resolve,
[('a', 'all', None, _('select all unresolved files')),
('l', 'list', None, _('list state of files needing merge')),
('m', 'mark', None, _('mark files as resolved')),
('u', 'unmark', None, _('unmark files as resolved')),
('n', 'no-status', None, _('hide status prefix'))]
+ walkopts,
_('[OPTION]... [FILE]...')),
"revert":
(revert,
[('a', 'all', None, _('revert all changes when no arguments given')),
('d', 'date', '', _('tipmost revision matching date')),
('r', 'rev', '', _('revert to the specified revision')),
('', 'no-backup', None, _('do not save backup copies of files')),
] + walkopts + dryrunopts,
_('[OPTION]... [-r REV] [NAME]...')),
"rollback": (rollback, []),
"root": (root, []),
"^serve":
(serve,
[('A', 'accesslog', '', _('name of access log file to write to')),
('d', 'daemon', None, _('run server in background')),
('', 'daemon-pipefds', '', _('used internally by daemon mode')),
('E', 'errorlog', '', _('name of error log file to write to')),
('p', 'port', 0, _('port to listen on (default: 8000)')),
('a', 'address', '',
_('address to listen on (default: all interfaces)')),
('', 'prefix', '',
_('prefix path to serve from (default: server root)')),
('n', 'name', '',
_('name to show in web pages (default: working directory)')),
('', 'webdir-conf', '', _('name of the webdir config file'
' (serve more than one repository)')),
('', 'pid-file', '', _('name of file to write process ID to')),
('', 'stdio', None, _('for remote clients')),
('t', 'templates', '', _('web templates to use')),
('', 'style', '', _('template style to use')),
('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
('', 'certificate', '', _('SSL certificate file'))],
_('[OPTION]...')),
"showconfig|debugconfig":
(showconfig,
[('u', 'untrusted', None, _('show untrusted configuration options'))],
_('[-u] [NAME]...')),
"^summary|sum":
(summary,
[('', 'remote', None, _('check for push and pull'))], '[--remote]'),
"^status|st":
(status,
[('A', 'all', None, _('show status of all files')),
('m', 'modified', None, _('show only modified files')),
('a', 'added', None, _('show only added files')),
('r', 'removed', None, _('show only removed files')),
('d', 'deleted', None, _('show only deleted (but tracked) files')),
('c', 'clean', None, _('show only files without changes')),
('u', 'unknown', None, _('show only unknown (not tracked) files')),
('i', 'ignored', None, _('show only ignored files')),
('n', 'no-status', None, _('hide status prefix')),
('C', 'copies', None, _('show source of copied files')),
('0', 'print0', None,
_('end filenames with NUL, for use with xargs')),
('', 'rev', [], _('show difference from revision')),
('', 'change', '', _('list the changed files of a revision')),
] + walkopts,
_('[OPTION]... [FILE]...')),
"tag":
(tag,
[('f', 'force', None, _('replace existing tag')),
('l', 'local', None, _('make the tag local')),
('r', 'rev', '', _('revision to tag')),
('', 'remove', None, _('remove a tag')),
# -l/--local is already there, commitopts cannot be used
('m', 'message', '', _('use <text> as commit message')),
] + commitopts2,
_('[-f] [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...')),
"tags": (tags, [], ''),
"tip":
(tip,
[('p', 'patch', None, _('show patch')),
('g', 'git', None, _('use git extended diff format')),
] + templateopts,
_('[-p] [-g]')),
"unbundle":
(unbundle,
[('u', 'update', None,
_('update to new branch head if changesets were unbundled'))],
_('[-u] FILE...')),
"^update|up|checkout|co":
(update,
[('C', 'clean', None, _('discard uncommitted changes (no backup)')),
('c', 'check', None, _('check for uncommitted changes')),
('d', 'date', '', _('tipmost revision matching date')),
('r', 'rev', '', _('revision'))],
_('[-c] [-C] [-d DATE] [[-r] REV]')),
"verify": (verify, []),
"version": (version_, []),
}
norepo = ("clone init version help debugcommands debugcomplete debugdata"
" debugindex debugindexdot debugdate debuginstall debugfsinfo")
optionalrepo = ("identify paths serve showconfig debugancestor")
|
iluxa-com/mercurial-crew-tonfa
|
mercurial/commands.py
|
Python
|
gpl-2.0
| 141,490
|
[
"VisIt"
] |
613f2e4a3c100f75b2c92341db7aa74111eb615d072b69e471de770728237b8b
|
#! /usr/bin/env python
import sys, csv
from functools import partial
e_cutoff = 1e-3
def printHelp():
sys.exit("NAME\n\treciprocalCompare: A tool for sorting through the results of a reciprocal blast.\nSYNOPSIS\n\treciprocalCompare --help\n\treciprocalCompare [-[|]e12] [-s] file1 file2 [outputfile]\nDESCRIPTION\n\treciprocalCompare finds the best matches (smallest e value) between the results of blasting two sequences against each other. Blast result format must be tabular (option 8 on blastall 2.2.24), and the parser expects the first, second, and eleventh columns in the results file to be the query sequence name, the database sequence name, and the e value respectively.\nOPTIONS\n\t-s Write output to standard out instead of to a file. No output file should be specified. The Blast format for the input files must be tabular without comment lines (option \"-m 8\" on blastall 2.2.24)\n\n\t-1 Sorts output alphabetically by the first file's sequence names./n/n/t-2 Sorts output alphabetically by the second file's sequence names.\n\n\t-e Sorts output by e value (default).")
def readArgs():
file1 = ''
file2 = ''
output = ''
arguments = []
for each in sys.argv[1:]:
if each[0] == '-':
arguments += [each]
elif file1 == '':
file1 = each
elif file2 == '':
file2 = each
elif output == '':
output = each
else:
sys.exit("Error: Too many arguments (2 files and 1 output max).")
return [arguments, file1, file2, output]
args = readArgs()
def list_line_sort(data, line):
data.sort(key=lambda x: x[line])
def list_line_num_sort(data, line):
data.sort(key=lambda x: float(x[line]))
line = 2
outputMode = 0
prefdict = {"outputMode":0, "sort":2}
def assign(key, value, dictionary):
dictionary[key] = value
def passign(key, value):
assign(key, value, prefdict)
flagdict = {"e":partial(passign, "sort", 2), "--sort=e":partial(passign, "sort", 2), "1":partial(passign, "sort", 0), "--sort=file1":partial(passign, "sort", 0), "2":partial(passign, "sort", 1), "--sort=file2":partial(passign, "sort", 1), "--stdout":partial(passign, "outputMode", 1), "s":partial(passign, "outputMode", 1), "--help":printHelp()}
def checkDefined(l, b, e):
returnlist = []
for x in range(b, e+1):
try: l[x]
except: return False
if l[x] == '':
return False
return True
for arg in args[0]:
if arg[0] == arg[1] == '-':
try: flagdict[arg]()
except: sys.exit("Error: Unknown argument '" + str(arg) + "' Use --help for help.")
elif arg[0] == '-':
for letter in arg[1:]:
try: flagdict[letter]()
except: sys.exit("Error: Unknown argument '" + str(letter) + "' Use --help for help.")
else:
raise Exception("something is very broken D: Check the function that sorts arguments.")
if prefdict["outputMode"] == 0: # Output in file mode
if checkDefined(args, 1, 3) == False:
sys.exit("Error: Not enough arguments. Requires two files to compare and a name for an output file.")
elif prefdict["outputMode"] == 1: # Output in stdout mode
if checkDefined(args, 3, 3) == True:
sys.exit("Error: Too many arguments. If using stdout as output, no output file name should be given")
elif checkDefined(args, 1, 2) == False:
sys.exit("Error: Too few arguments. Two files are required for comparison. No output file when using stdout as output")
def dataToDict(filename):
d = {}
e_current = None
with open(filename, 'r') as csvdata:
csvreader = csv.reader(csvdata, quotechar="'")
for row in csvreader:
if row[0] in d:
e_current = d[row[0]][1]
else:
e_current = None
if (row[10] < e_cutoff and row[10] < e_current) or e_current == None:
d[row[0]] = [row[1], row[10]]
return d
d1 = dataToDict(args[1])
d2 = dataToDict(args[2])
output = []
for iterate in xrange(0, min(len(d1), len(d2))-1):
seq = d1.keys()[iterate]
opp = d1[seq][0]
reciprocal = d2.get(opp, [None])[0]
if seq == reciprocal:
outlist = [seq, opp, max(d1[seq][1], d2[opp][1])]
output.append(outlist)
if prefdict["sort"] == 2:
list_line_num_sort(output, prefdict["sort"])
else:
list_line_sort(output, prefdict["sort"])
def writeOutput(place):
csvWriter = csv.writer(place, dialect='excel')
for line in output:
csvWriter.writerow(line)
outfile = open(args[3], 'w+')
if prefdict["outputMode"] == 0:
writeOutput(outfile)
elif prefdict["outputMode"] == 1:
writeOutput(sys.stdout)
|
mattsoulanille/Protein-reciprocal-compare
|
reciprocalCompare.py
|
Python
|
mit
| 4,727
|
[
"BLAST"
] |
a711428801f71bf03f9bdbaaadc96eeacc557f63d3235862699c33179b088c43
|
from gpaw import GPAW
from ase.structure import bulk
from ase.dft.kpoints import ibz_points, get_bandpath
import numpy as np
si = bulk('Si', 'diamond', a=5.459)
if 1:
k = 6
calc = GPAW(kpts=(k, k, k),
xc='PBE')
si.set_calculator(calc)
e = si.get_potential_energy()
efermi = calc.get_fermi_level()
calc.write('Si-gs.gpw')
else:
efermi = GPAW('Si-gs.gpw', txt=None).get_fermi_level()
points = ibz_points['fcc']
G = points['Gamma']
X = points['X']
W = points['W']
K = points['K']
L = points['L']
kpts, x, X = get_bandpath([W, L, G, X, W, K], si.cell)
print len(kpts), len(x), len(X)
point_names = ['W', 'L', '\Gamma', 'X', 'W', 'K']
if 1:
calc = GPAW('Si-gs.gpw',
kpts=kpts,
fixdensity=True,
usesymm=None,#False,
basis='dzp',
convergence=dict(nbands=8))
e = calc.get_atoms().get_potential_energy()
calc.write('Si-bs.gpw')
calc = GPAW('Si-bs.gpw', txt=None)
import matplotlib.pyplot as plt
e = np.array([calc.get_eigenvalues(k) for k in range(len(kpts))])
e -= efermi
emin = e.min() - 1
emax = e[:, :8].max() + 1
for n in range(8):
plt.plot(x, e[:, n])
for p in X:
plt.plot([p, p], [emin, emax], 'k-')
plt.xticks(X, ['$%s$' % n for n in point_names])
plt.axis(xmin=0, xmax=X[-1], ymin=emin, ymax=emax)
plt.xlabel('k-vector')
plt.ylabel('Energy (eV)')
plt.title('PBE bandstructure of Silicon')
plt.savefig('Si-bs.png')
plt.show()
|
qsnake/gpaw
|
doc/exercises/band_structure/Si_guc.py
|
Python
|
gpl-3.0
| 1,475
|
[
"ASE",
"GPAW"
] |
c60def6a84ba09364713ca35100ba2a4a6e3078e7ffa7b7687f2a6f036f41861
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('whsites', '0002_auto_20150102_1905'),
('members', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='member',
name='visits',
field=models.ManyToManyField(null=True, to='whsites.Visit'),
preserve_default=True,
),
]
|
robjordan/unesco_project
|
unesco/members/migrations/0002_member_visits.py
|
Python
|
apache-2.0
| 494
|
[
"VisIt"
] |
6fe79cf866c7ae01cfd05c878c7632c98c55edd7d6a65601f923eb8295d796de
|
#!/usr/bin/env python
import sys
import os
import numpy as np
from datetime import datetime
"""
Obtain a tight binding Hamiltonian of Haldane model with Wannier90 format
How to run
python haldane_hr_gen.py
This will generate the tight binding hamiltonian Haldane_hr.dat
LATTICE
Angstrom
2.1377110 -1.2342080 0.0000000
0.0000000 2.4684160 0.0000000
0.0000000 0.0000000 10.000000
ATOM_POSITIONS
2 ! number of atoms for projectors
Direct ! Direct or Cartisen coordinate
C 0.333333 0.666667 0.500000
C 0.666667 0.333333 0.500000
"""
# Define tight-binding parameters
# You can find phase diagram in PRL 61,2015 (1988)
# Chern = 0
#m=0.2; phi= np.pi/2.0; t1=1.0; t2=0.0;
# Gapless phase
m=0.2; phi= np.pi/2.0; t1=1.0; t2=m/3.0/np.sqrt(3);
# Chern = 1
#m=0.2; phi= np.pi/2.0; t1=1.0; t2=m/3.0/np.sqrt(3)*2.0;
# maximum dimension for hr matrix
ndim = 2
nrpts = 7
num_patom=2
# hr matrix
norbs = num_patom*1
hmnr= np.zeros((norbs,norbs,nrpts),dtype = np.complex128)
# WS points
irvec = np.zeros((3,nrpts),dtype = np.int32)
# degeneracy
dege = np.zeros((nrpts),dtype = np.int32)+1
# complex unit
zi=1j
ir= 0
irvec[0, ir]= 0
irvec[1, ir]= 0
hmnr[0, 0, ir]= m
hmnr[1, 1, ir]= -m
hmnr[0, 1, ir]= t1
hmnr[1, 0, ir]= t1
# 1 0
ir= ir+1
irvec[0, ir]= 1
irvec[1, ir]= 0
hmnr[0, 0, ir]= (np.cos(phi)-zi*np.sin(phi)) *t2
hmnr[1, 1, ir]= (np.cos(phi)+zi*np.sin(phi)) *t2
hmnr[0, 1, ir]= t1
# 0 1
ir= ir+1
irvec[0, ir]= 0
irvec[1, ir]= 1
hmnr[0, 0, ir]= (np.cos(phi)-zi*np.sin(phi)) *t2
hmnr[1, 1, ir]= (np.cos(phi)+zi*np.sin(phi)) *t2
hmnr[1, 0, ir]= t1
# 1 1
ir= ir+1
irvec[0, ir]= 1
irvec[1, ir]= 1
hmnr[0, 0, ir]= (np.cos(phi)+zi*np.sin(phi)) *t2
hmnr[1, 1, ir]= (np.cos(phi)-zi*np.sin(phi)) *t2
#-1 0
ir= ir+1
irvec[0, ir]=-1
irvec[1, ir]= 0
hmnr[0, 0, ir]= (np.cos(phi)+zi*np.sin(phi)) *t2
hmnr[1, 1, ir]= (np.cos(phi)-zi*np.sin(phi)) *t2
hmnr[1, 0, ir]= t1
# 0-1
ir= ir+1
irvec[0, ir]= 0
irvec[1, ir]=-1
hmnr[0, 0, ir]= (np.cos(phi)+zi*np.sin(phi)) *t2
hmnr[1, 1, ir]= (np.cos(phi)-zi*np.sin(phi)) *t2
hmnr[0, 1, ir]= t1
#-1-1
ir= ir+1
irvec[0, ir]=-1
irvec[1, ir]=-1
hmnr[0, 0, ir]= (np.cos(phi)-zi*np.sin(phi)) *t2
hmnr[1, 1, ir]= (np.cos(phi)+zi*np.sin(phi)) *t2
#print "dump hr.dat..."
with open('Haldane_hr.dat','w') as f:
line="Haldane model with m="+str(m)+", phi="+str(phi/np.pi)+"pi, t1="+str(t1)+", t2="+str(t2)+"Ref:Physical Review Letters 61, 18(1988)"+'\n'
f.write(line)
nl = np.int32(np.ceil(nrpts/15.0))
f.write(str(norbs)+'\n')
f.write(str(nrpts)+'\n')
for l in range(nl):
line=" "+' '.join([str(np.int32(i)) for i in dege[l*15:(l+1)*15]])
f.write(line)
f.write('\n')
for irpt in range(nrpts):
rx = irvec[0,irpt];ry = irvec[1,irpt];rz = irvec[2,irpt]
for jatomorb in range(norbs):
for iatomorb in range(norbs):
rp =hmnr[iatomorb,jatomorb,irpt].real
ip =hmnr[iatomorb,jatomorb,irpt].imag
line="{:8d}{:8d}{:8d}{:8d}{:8d}{:20.10f}{:20.10f}\n".format(rx,ry,rz,jatomorb+1,iatomorb+1,rp,ip)
f.write(line)
|
quanshengwu/wannier_tools
|
examples/Haldane_model/haldane_hr_gen-gapless.py
|
Python
|
gpl-3.0
| 3,153
|
[
"Wannier90"
] |
faf21bc9b0bc4faa2ccedea1d668cb3a194ef83412355f65b4635b391f83521f
|
#
# Copyright (C) 2017-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import espressomd
import numpy as np
class RescaleTest(ut.TestCase):
"""Test the global box and particle rescaling.
"""
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.cell_system.skin = 0.0
system.time_step = 0.01
def setUp(self):
N = 100
self.system.box_l = 3 * [10]
self.partcls = self.system.part.add(
pos=self.system.box_l * np.random.random((N, 3)))
def tearDown(self):
self.system.part.clear()
def test_iso(self):
"""Test 'isotropic' case (dir="xyz").
"""
scale = 1.3
new_box_l = scale * self.system.box_l[0]
old_pos = self.partcls.pos
self.system.change_volume_and_rescale_particles(new_box_l)
new_pos = self.partcls.pos
max_diff = np.max(np.abs(new_pos / old_pos - scale))
self.assertAlmostEqual(0., max_diff, places=10)
def dir_test(self, dir):
"""Test scaling of a single direction.
"""
scale = 0.7
new_box_l = scale * self.system.box_l[dir]
old_pos = self.partcls.pos
self.system.change_volume_and_rescale_particles(new_box_l, dir=dir)
new_pos = self.partcls.pos
for i in range(3):
if i == dir:
max_diff = np.max(
np.abs(new_pos[:, i] / old_pos[:, i] - scale))
else:
max_diff = np.max(np.abs(new_pos[:, i] - old_pos[:, i]))
self.assertAlmostEqual(0., max_diff, places=10)
def test_x(self):
self.dir_test(0)
def test_y(self):
self.dir_test(1)
def test_z(self):
self.dir_test(2)
if __name__ == "__main__":
ut.main()
|
espressomd/espresso
|
testsuite/python/rescale.py
|
Python
|
gpl-3.0
| 2,433
|
[
"ESPResSo"
] |
954330782f1c337e824dde20d31c245f8d378a9bccf65fcdff81119150aeef86
|
"""User-friendly public interface to polynomial functions. """
from sympy.core import (
S, Basic, Expr, I, Integer, Add, Mul, Dummy, Tuple, Rational
)
from sympy.core.mul import _keep_coeff
from sympy.core.sympify import (
sympify, SympifyError,
)
from sympy.core.decorators import (
_sympifyit,
)
from sympy.polys.polyclasses import DMP
from sympy.polys.polyutils import (
basic_from_dict,
_sort_gens,
_unify_gens,
_dict_reorder,
_dict_from_expr,
_parallel_dict_from_expr,
)
from sympy.polys.rationaltools import (
together,
)
from sympy.polys.rootisolation import (
dup_isolate_real_roots_list,
)
from sympy.polys.groebnertools import groebner as _groebner
from sympy.polys.fglmtools import matrix_fglm
from sympy.polys.monomialtools import (
Monomial, monomial_key,
)
from sympy.polys.polyerrors import (
OperationNotSupported, DomainError,
CoercionFailed, UnificationFailed,
GeneratorsNeeded, PolynomialError,
MultivariatePolynomialError,
ExactQuotientFailed,
PolificationFailed,
ComputationFailed,
GeneratorsError,
)
from sympy.utilities import group
import sympy.polys
import sympy.mpmath
from sympy.polys.domains import FF, QQ
from sympy.polys.constructor import construct_domain
from sympy.polys import polyoptions as options
from sympy.core.compatibility import iterable
class Poly(Expr):
"""Generic class for representing polynomial expressions. """
__slots__ = ['rep', 'gens']
is_commutative = True
is_Poly = True
def __new__(cls, rep, *gens, **args):
"""Create a new polynomial instance out of something useful. """
opt = options.build_options(gens, args)
if 'order' in opt:
raise NotImplementedError("'order' keyword is not implemented yet")
if iterable(rep, exclude=str):
if isinstance(rep, dict):
return cls._from_dict(rep, opt)
else:
return cls._from_list(list(rep), opt)
else:
rep = sympify(rep)
if rep.is_Poly:
return cls._from_poly(rep, opt)
else:
return cls._from_expr(rep, opt)
@classmethod
def new(cls, rep, *gens):
"""Construct :class:`Poly` instance from raw representation. """
if not isinstance(rep, DMP):
raise PolynomialError(
"invalid polynomial representation: %s" % rep)
elif rep.lev != len(gens) - 1:
raise PolynomialError("invalid arguments: %s, %s" % (rep, gens))
obj = Basic.__new__(cls)
obj.rep = rep
obj.gens = gens
return obj
@classmethod
def from_dict(cls, rep, *gens, **args):
"""Construct a polynomial from a ``dict``. """
opt = options.build_options(gens, args)
return cls._from_dict(rep, opt)
@classmethod
def from_list(cls, rep, *gens, **args):
"""Construct a polynomial from a ``list``. """
opt = options.build_options(gens, args)
return cls._from_list(rep, opt)
@classmethod
def from_poly(cls, rep, *gens, **args):
"""Construct a polynomial from a polynomial. """
opt = options.build_options(gens, args)
return cls._from_poly(rep, opt)
@classmethod
def from_expr(cls, rep, *gens, **args):
"""Construct a polynomial from an expression. """
opt = options.build_options(gens, args)
return cls._from_expr(rep, opt)
@classmethod
def _from_dict(cls, rep, opt):
"""Construct a polynomial from a ``dict``. """
gens = opt.gens
if not gens:
raise GeneratorsNeeded(
"can't initialize from 'dict' without generators")
level = len(gens) - 1
domain = opt.domain
if domain is None:
domain, rep = construct_domain(rep, opt=opt)
else:
for monom, coeff in rep.iteritems():
rep[monom] = domain.convert(coeff)
return cls.new(DMP.from_dict(rep, level, domain), *gens)
@classmethod
def _from_list(cls, rep, opt):
"""Construct a polynomial from a ``list``. """
gens = opt.gens
if not gens:
raise GeneratorsNeeded(
"can't initialize from 'list' without generators")
elif len(gens) != 1:
raise MultivariatePolynomialError(
"'list' representation not supported")
level = len(gens) - 1
domain = opt.domain
if domain is None:
domain, rep = construct_domain(rep, opt=opt)
else:
rep = map(domain.convert, rep)
return cls.new(DMP.from_list(rep, level, domain), *gens)
@classmethod
def _from_poly(cls, rep, opt):
"""Construct a polynomial from a polynomial. """
if cls != rep.__class__:
rep = cls.new(rep.rep, *rep.gens)
gens = opt.gens
field = opt.field
domain = opt.domain
if gens and rep.gens != gens:
if set(rep.gens) != set(gens):
return cls._from_expr(rep.as_expr(), opt)
else:
rep = rep.reorder(*gens)
if 'domain' in opt and domain:
rep = rep.set_domain(domain)
elif field is True:
rep = rep.to_field()
return rep
@classmethod
def _from_expr(cls, rep, opt):
"""Construct a polynomial from an expression. """
rep, opt = _dict_from_expr(rep, opt)
return cls._from_dict(rep, opt)
def _hashable_content(self):
"""Allow SymPy to hash Poly instances. """
return (self.rep, self.gens)
def __hash__(self):
return super(Poly, self).__hash__()
@property
def free_symbols(self):
"""
Free symbols of a polynomial expression.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1).free_symbols
set([x])
>>> Poly(x**2 + y).free_symbols
set([x, y])
>>> Poly(x**2 + y, x).free_symbols
set([x, y])
"""
symbols = set([])
for gen in self.gens:
symbols |= gen.free_symbols
return symbols | self.free_symbols_in_domain
@property
def free_symbols_in_domain(self):
"""
Free symbols of the domain of ``self``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1).free_symbols_in_domain
set()
>>> Poly(x**2 + y).free_symbols_in_domain
set()
>>> Poly(x**2 + y, x).free_symbols_in_domain
set([y])
"""
domain, symbols = self.rep.dom, set()
if domain.is_Composite:
for gen in domain.gens:
symbols |= gen.free_symbols
elif domain.is_EX:
for coeff in self.coeffs():
symbols |= coeff.free_symbols
return symbols
@property
def args(self):
"""
Don't mess up with the core.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).args
(x**2 + 1,)
"""
return (self.as_expr(),)
@property
def gen(self):
"""
Return the principal generator.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).gen
x
"""
return self.gens[0]
@property
def domain(self):
"""Get the ground domain of ``self``. """
return self.get_domain()
@property
def zero(self):
"""Return zero polynomial with ``self``'s properties. """
return self.new(self.rep.zero(self.rep.lev, self.rep.dom), *self.gens)
@property
def one(self):
"""Return one polynomial with ``self``'s properties. """
return self.new(self.rep.one(self.rep.lev, self.rep.dom), *self.gens)
@property
def unit(self):
"""Return unit polynomial with ``self``'s properties. """
return self.new(self.rep.unit(self.rep.lev, self.rep.dom), *self.gens)
def unify(f, g):
"""
Make ``f`` and ``g`` belong to the same domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f, g = Poly(x/2 + 1), Poly(2*x + 1)
>>> f
Poly(1/2*x + 1, x, domain='QQ')
>>> g
Poly(2*x + 1, x, domain='ZZ')
>>> F, G = f.unify(g)
>>> F
Poly(1/2*x + 1, x, domain='QQ')
>>> G
Poly(2*x + 1, x, domain='QQ')
"""
_, per, F, G = f._unify(g)
return per(F), per(G)
def _unify(f, g):
g = sympify(g)
if not g.is_Poly:
try:
return f.rep.dom, f.per, f.rep, f.rep.per(f.rep.dom.from_sympy(g))
except CoercionFailed:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if isinstance(f.rep, DMP) and isinstance(g.rep, DMP):
gens = _unify_gens(f.gens, g.gens)
dom, lev = f.rep.dom.unify(g.rep.dom, gens), len(gens) - 1
if f.gens != gens:
f_monoms, f_coeffs = _dict_reorder(
f.rep.to_dict(), f.gens, gens)
if f.rep.dom != dom:
f_coeffs = [ dom.convert(c, f.rep.dom) for c in f_coeffs ]
F = DMP(dict(zip(f_monoms, f_coeffs)), dom, lev)
else:
F = f.rep.convert(dom)
if g.gens != gens:
g_monoms, g_coeffs = _dict_reorder(
g.rep.to_dict(), g.gens, gens)
if g.rep.dom != dom:
g_coeffs = [ dom.convert(c, g.rep.dom) for c in g_coeffs ]
G = DMP(dict(zip(g_monoms, g_coeffs)), dom, lev)
else:
G = g.rep.convert(dom)
else:
raise UnificationFailed("can't unify %s with %s" % (f, g))
cls = f.__class__
def per(rep, dom=dom, gens=gens, remove=None):
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return dom.to_sympy(rep)
return cls.new(rep, *gens)
return dom, per, F, G
def per(f, rep, gens=None, remove=None):
"""
Create a Poly out of the given representation.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x, y
>>> from sympy.polys.polyclasses import DMP
>>> a = Poly(x**2 + 1)
>>> a.per(DMP([ZZ(1), ZZ(1)], ZZ), gens=[y])
Poly(y + 1, y, domain='ZZ')
"""
if gens is None:
gens = f.gens
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return f.rep.dom.to_sympy(rep)
return f.__class__.new(rep, *gens)
def set_domain(f, domain):
"""Set the ground domain of ``f``. """
opt = options.build_options(f.gens, {'domain': domain})
return f.per(f.rep.convert(opt.domain))
def get_domain(f):
"""Get the ground domain of ``f``. """
return f.rep.dom
def set_modulus(f, modulus):
"""
Set the modulus of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(5*x**2 + 2*x - 1, x).set_modulus(2)
Poly(x**2 + 1, x, modulus=2)
"""
modulus = options.Modulus.preprocess(modulus)
return f.set_domain(FF(modulus))
def get_modulus(f):
"""
Get the modulus of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, modulus=2).get_modulus()
2
"""
domain = f.get_domain()
if domain.is_FiniteField:
return Integer(domain.characteristic())
else:
raise PolynomialError("not a polynomial over a Galois field")
def _eval_subs(f, old, new):
"""Internal implementation of :func:`subs`. """
if old in f.gens:
if new.is_number:
return f.eval(old, new)
else:
try:
return f.replace(old, new)
except PolynomialError:
pass
return f.as_expr().subs(old, new)
def exclude(f):
"""
Remove unnecessary generators from ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import a, b, c, d, x
>>> Poly(a + x, a, b, c, d, x).exclude()
Poly(a + x, a, x, domain='ZZ')
"""
J, new = f.rep.exclude()
gens = []
for j in range(len(f.gens)):
if j not in J:
gens.append(f.gens[j])
return f.per(new, gens=gens)
def replace(f, x, y=None):
"""
Replace ``x`` with ``y`` in generators list.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1, x).replace(x, y)
Poly(y**2 + 1, y, domain='ZZ')
"""
if y is None:
if f.is_univariate:
x, y = f.gen, x
else:
raise PolynomialError(
"syntax supported only in univariate case")
if x == y:
return f
if x in f.gens and y not in f.gens:
dom = f.get_domain()
if not dom.is_Composite or y not in dom.gens:
gens = list(f.gens)
gens[gens.index(x)] = y
return f.per(f.rep, gens=gens)
raise PolynomialError("can't replace %s with %s in %s" % (x, y, f))
def reorder(f, *gens, **args):
"""
Efficiently apply new order of generators.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x*y**2, x, y).reorder(y, x)
Poly(y**2*x + x**2, y, x, domain='ZZ')
"""
opt = options.Options((), args)
if not gens:
gens = _sort_gens(f.gens, opt=opt)
elif set(f.gens) != set(gens):
raise PolynomialError(
"generators list can differ only up to order of elements")
rep = dict(zip(*_dict_reorder(f.rep.to_dict(), f.gens, gens)))
return f.per(DMP(rep, f.rep.dom, len(gens) - 1), gens=gens)
def ltrim(f, gen):
"""
Remove dummy generators from the "left" of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(y**2 + y*z**2, x, y, z).ltrim(y)
Poly(y**2 + y*z**2, y, z, domain='ZZ')
"""
rep = f.as_dict(native=True)
j = f._gen_to_level(gen)
terms = {}
for monom, coeff in rep.iteritems():
monom = monom[j:]
if monom not in terms:
terms[monom] = coeff
else:
raise PolynomialError("can't left trim %s" % f)
gens = f.gens[j:]
return f.new(DMP.from_dict(terms, len(gens) - 1, f.rep.dom), *gens)
def has_only_gens(f, *gens):
"""
Return ``True`` if ``Poly(f, *gens)`` retains ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(x*y + 1, x, y, z).has_only_gens(x, y)
True
>>> Poly(x*y + z, x, y, z).has_only_gens(x, y)
False
"""
f_gens = list(f.gens)
indices = set([])
for gen in gens:
try:
index = f_gens.index(gen)
except ValueError:
raise GeneratorsError(
"%s doesn't have %s as generator" % (f, gen))
else:
indices.add(index)
for monom in f.monoms():
for i, elt in enumerate(monom):
if i not in indices and elt:
return False
return True
def to_ring(f):
"""
Make the ground domain a ring.
Examples
========
>>> from sympy import Poly, QQ
>>> from sympy.abc import x
>>> Poly(x**2 + 1, domain=QQ).to_ring()
Poly(x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'to_ring'):
result = f.rep.to_ring()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_ring')
return f.per(result)
def to_field(f):
"""
Make the ground domain a field.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x, domain=ZZ).to_field()
Poly(x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'to_field'):
result = f.rep.to_field()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_field')
return f.per(result)
def to_exact(f):
"""
Make the ground domain exact.
Examples
========
>>> from sympy import Poly, RR
>>> from sympy.abc import x
>>> Poly(x**2 + 1.0, x, domain=RR).to_exact()
Poly(x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'to_exact'):
result = f.rep.to_exact()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_exact')
return f.per(result)
def retract(f, field=None):
"""
Recalculate the ground domain of a polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2 + 1, x, domain='QQ[y]')
>>> f
Poly(x**2 + 1, x, domain='QQ[y]')
>>> f.retract()
Poly(x**2 + 1, x, domain='ZZ')
>>> f.retract(field=True)
Poly(x**2 + 1, x, domain='QQ')
"""
dom, rep = construct_domain(f.as_dict(zero=True), field=field)
return f.from_dict(rep, f.gens, domain=dom)
def slice(f, x, m, n=None):
"""Take a continuous subsequence of terms of ``f``. """
if n is None:
j, m, n = 0, x, m
else:
j = f._gen_to_level(x)
m, n = int(m), int(n)
if hasattr(f.rep, 'slice'):
result = f.rep.slice(m, n, j)
else: # pragma: no cover
raise OperationNotSupported(f, 'slice')
return f.per(result)
def coeffs(f, order=None):
"""
Returns all non-zero coefficients from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x + 3, x).coeffs()
[1, 2, 3]
See Also
========
all_coeffs
coeff_monomial
nth
"""
return [ f.rep.dom.to_sympy(c) for c in f.rep.coeffs(order=order) ]
def monoms(f, order=None):
"""
Returns all non-zero monomials from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 + x*y + 3*y, x, y).monoms()
[(2, 0), (1, 2), (1, 1), (0, 1)]
See Also
========
all_monoms
"""
return f.rep.monoms(order=order)
def terms(f, order=None):
"""
Returns all non-zero terms from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 + x*y + 3*y, x, y).terms()
[((2, 0), 1), ((1, 2), 2), ((1, 1), 1), ((0, 1), 3)]
See Also
========
all_terms
"""
return [ (m, f.rep.dom.to_sympy(c)) for m, c in f.rep.terms(order=order) ]
def all_coeffs(f):
"""
Returns all coefficients from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_coeffs()
[1, 0, 2, -1]
"""
return [ f.rep.dom.to_sympy(c) for c in f.rep.all_coeffs() ]
def all_monoms(f):
"""
Returns all monomials from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_monoms()
[(3,), (2,), (1,), (0,)]
See Also
========
all_terms
"""
return f.rep.all_monoms()
def all_terms(f):
"""
Returns all terms from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_terms()
[((3,), 1), ((2,), 0), ((1,), 2), ((0,), -1)]
"""
return [ (m, f.rep.dom.to_sympy(c)) for m, c in f.rep.all_terms() ]
def termwise(f, func, *gens, **args):
"""
Apply a function to all terms of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> def func((k,), coeff):
... return coeff//10**(2-k)
>>> Poly(x**2 + 20*x + 400).termwise(func)
Poly(x**2 + 2*x + 4, x, domain='ZZ')
"""
terms = {}
for monom, coeff in f.terms():
result = func(monom, coeff)
if isinstance(result, tuple):
monom, coeff = result
else:
coeff = result
if coeff:
if monom not in terms:
terms[monom] = coeff
else:
raise PolynomialError(
"%s monomial was generated twice" % monom)
return f.from_dict(terms, *(gens or f.gens), **args)
def length(f):
"""
Returns the number of non-zero terms in ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 2*x - 1).length()
3
"""
return len(f.as_dict())
def as_dict(f, native=False, zero=False):
"""
Switch to a ``dict`` representation.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 - y, x, y).as_dict()
{(0, 1): -1, (1, 2): 2, (2, 0): 1}
"""
if native:
return f.rep.to_dict(zero=zero)
else:
return f.rep.to_sympy_dict(zero=zero)
def as_list(f, native=False):
"""Switch to a ``list`` representation. """
if native:
return f.rep.to_list()
else:
return f.rep.to_sympy_list()
def as_expr(f, *gens):
"""
Convert a Poly instance to an Expr instance.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2 + 2*x*y**2 - y, x, y)
>>> f.as_expr()
x**2 + 2*x*y**2 - y
>>> f.as_expr({x: 5})
10*y**2 - y + 25
>>> f.as_expr(5, 6)
379
"""
if not gens:
gens = f.gens
elif len(gens) == 1 and isinstance(gens[0], dict):
mapping = gens[0]
gens = list(f.gens)
for gen, value in mapping.iteritems():
try:
index = gens.index(gen)
except ValueError:
raise GeneratorsError(
"%s doesn't have %s as generator" % (f, gen))
else:
gens[index] = value
return basic_from_dict(f.rep.to_sympy_dict(), *gens)
def lift(f):
"""
Convert algebraic coefficients to rationals.
Examples
========
>>> from sympy import Poly, I
>>> from sympy.abc import x
>>> Poly(x**2 + I*x + 1, x, extension=I).lift()
Poly(x**4 + 3*x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'lift'):
result = f.rep.lift()
else: # pragma: no cover
raise OperationNotSupported(f, 'lift')
return f.per(result)
def deflate(f):
"""
Reduce degree of ``f`` by mapping ``x_i**m`` to ``y_i``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**6*y**2 + x**3 + 1, x, y).deflate()
((3, 2), Poly(x**2*y + x + 1, x, y, domain='ZZ'))
"""
if hasattr(f.rep, 'deflate'):
J, result = f.rep.deflate()
else: # pragma: no cover
raise OperationNotSupported(f, 'deflate')
return J, f.per(result)
def inject(f, front=False):
"""
Inject ground domain generators into ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2*y + x*y**3 + x*y + 1, x)
>>> f.inject()
Poly(x**2*y + x*y**3 + x*y + 1, x, y, domain='ZZ')
>>> f.inject(front=True)
Poly(y**3*x + y*x**2 + y*x + 1, y, x, domain='ZZ')
"""
dom = f.rep.dom
if dom.is_Numerical:
return f
elif not dom.is_Poly:
raise DomainError("can't inject generators over %s" % dom)
if hasattr(f.rep, 'inject'):
result = f.rep.inject(front=front)
else: # pragma: no cover
raise OperationNotSupported(f, 'inject')
if front:
gens = dom.gens + f.gens
else:
gens = f.gens + dom.gens
return f.new(result, *gens)
def eject(f, *gens):
"""
Eject selected generators into the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2*y + x*y**3 + x*y + 1, x, y)
>>> f.eject(x)
Poly(x*y**3 + (x**2 + x)*y + 1, y, domain='ZZ[x]')
>>> f.eject(y)
Poly(y*x**2 + (y**3 + y)*x + 1, x, domain='ZZ[y]')
"""
dom = f.rep.dom
if not dom.is_Numerical:
raise DomainError("can't eject generators over %s" % dom)
n, k = len(f.gens), len(gens)
if f.gens[:k] == gens:
_gens, front = f.gens[k:], True
elif f.gens[-k:] == gens:
_gens, front = f.gens[:-k], False
else:
raise NotImplementedError(
"can only eject front or back generators")
dom = dom.inject(*gens)
if hasattr(f.rep, 'eject'):
result = f.rep.eject(dom, front=front)
else: # pragma: no cover
raise OperationNotSupported(f, 'eject')
return f.new(result, *_gens)
def terms_gcd(f):
"""
Remove GCD of terms from the polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**6*y**2 + x**3*y, x, y).terms_gcd()
((3, 1), Poly(x**3*y + 1, x, y, domain='ZZ'))
"""
if hasattr(f.rep, 'terms_gcd'):
J, result = f.rep.terms_gcd()
else: # pragma: no cover
raise OperationNotSupported(f, 'terms_gcd')
return J, f.per(result)
def add_ground(f, coeff):
"""
Add an element of the ground domain to ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).add_ground(2)
Poly(x + 3, x, domain='ZZ')
"""
if hasattr(f.rep, 'add_ground'):
result = f.rep.add_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'add_ground')
return f.per(result)
def sub_ground(f, coeff):
"""
Subtract an element of the ground domain from ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).sub_ground(2)
Poly(x - 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'sub_ground'):
result = f.rep.sub_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'sub_ground')
return f.per(result)
def mul_ground(f, coeff):
"""
Multiply ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).mul_ground(2)
Poly(2*x + 2, x, domain='ZZ')
"""
if hasattr(f.rep, 'mul_ground'):
result = f.rep.mul_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'mul_ground')
return f.per(result)
def quo_ground(f, coeff):
"""
Quotient of ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x + 4).quo_ground(2)
Poly(x + 2, x, domain='ZZ')
>>> Poly(2*x + 3).quo_ground(2)
Poly(x + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'quo_ground'):
result = f.rep.quo_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'quo_ground')
return f.per(result)
def exquo_ground(f, coeff):
"""
Exact quotient of ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x + 4).exquo_ground(2)
Poly(x + 2, x, domain='ZZ')
>>> Poly(2*x + 3).exquo_ground(2)
Traceback (most recent call last):
...
ExactQuotientFailed: 2 does not divide 3 in ZZ
"""
if hasattr(f.rep, 'exquo_ground'):
result = f.rep.exquo_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'exquo_ground')
return f.per(result)
def abs(f):
"""
Make all coefficients in ``f`` positive.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).abs()
Poly(x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'abs'):
result = f.rep.abs()
else: # pragma: no cover
raise OperationNotSupported(f, 'abs')
return f.per(result)
def neg(f):
"""
Negate all coefficients in ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).neg()
Poly(-x**2 + 1, x, domain='ZZ')
>>> -Poly(x**2 - 1, x)
Poly(-x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'neg'):
result = f.rep.neg()
else: # pragma: no cover
raise OperationNotSupported(f, 'neg')
return f.per(result)
def add(f, g):
"""
Add two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).add(Poly(x - 2, x))
Poly(x**2 + x - 1, x, domain='ZZ')
>>> Poly(x**2 + 1, x) + Poly(x - 2, x)
Poly(x**2 + x - 1, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.add_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'add'):
result = F.add(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'add')
return per(result)
def sub(f, g):
"""
Subtract two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).sub(Poly(x - 2, x))
Poly(x**2 - x + 3, x, domain='ZZ')
>>> Poly(x**2 + 1, x) - Poly(x - 2, x)
Poly(x**2 - x + 3, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.sub_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'sub'):
result = F.sub(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'sub')
return per(result)
def mul(f, g):
"""
Multiply two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).mul(Poly(x - 2, x))
Poly(x**3 - 2*x**2 + x - 2, x, domain='ZZ')
>>> Poly(x**2 + 1, x)*Poly(x - 2, x)
Poly(x**3 - 2*x**2 + x - 2, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.mul_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'mul'):
result = F.mul(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'mul')
return per(result)
def sqr(f):
"""
Square a polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x - 2, x).sqr()
Poly(x**2 - 4*x + 4, x, domain='ZZ')
>>> Poly(x - 2, x)**2
Poly(x**2 - 4*x + 4, x, domain='ZZ')
"""
if hasattr(f.rep, 'sqr'):
result = f.rep.sqr()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqr')
return f.per(result)
def pow(f, n):
"""
Raise ``f`` to a non-negative power ``n``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x - 2, x).pow(3)
Poly(x**3 - 6*x**2 + 12*x - 8, x, domain='ZZ')
>>> Poly(x - 2, x)**3
Poly(x**3 - 6*x**2 + 12*x - 8, x, domain='ZZ')
"""
n = int(n)
if hasattr(f.rep, 'pow'):
result = f.rep.pow(n)
else: # pragma: no cover
raise OperationNotSupported(f, 'pow')
return f.per(result)
def pdiv(f, g):
"""
Polynomial pseudo-division of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).pdiv(Poly(2*x - 4, x))
(Poly(2*x + 4, x, domain='ZZ'), Poly(20, x, domain='ZZ'))
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pdiv'):
q, r = F.pdiv(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'pdiv')
return per(q), per(r)
def prem(f, g):
"""
Polynomial pseudo-remainder of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).prem(Poly(2*x - 4, x))
Poly(20, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'prem'):
result = F.prem(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'prem')
return per(result)
def pquo(f, g):
"""
Polynomial pseudo-quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).pquo(Poly(2*x - 4, x))
Poly(2*x + 4, x, domain='ZZ')
>>> Poly(x**2 - 1, x).pquo(Poly(2*x - 2, x))
Poly(2*x + 2, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pquo'):
result = F.pquo(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'pquo')
return per(result)
def pexquo(f, g):
"""
Polynomial exact pseudo-quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).pexquo(Poly(2*x - 2, x))
Poly(2*x + 2, x, domain='ZZ')
>>> Poly(x**2 + 1, x).pexquo(Poly(2*x - 4, x))
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pexquo'):
try:
result = F.pexquo(G)
except ExactQuotientFailed, exc:
raise exc.new(f.as_expr(), g.as_expr())
else: # pragma: no cover
raise OperationNotSupported(f, 'pexquo')
return per(result)
def div(f, g, auto=True):
"""
Polynomial division with remainder of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).div(Poly(2*x - 4, x))
(Poly(1/2*x + 1, x, domain='QQ'), Poly(5, x, domain='QQ'))
>>> Poly(x**2 + 1, x).div(Poly(2*x - 4, x), auto=False)
(Poly(0, x, domain='ZZ'), Poly(x**2 + 1, x, domain='ZZ'))
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'div'):
q, r = F.div(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'div')
if retract:
try:
Q, R = q.to_ring(), r.to_ring()
except CoercionFailed:
pass
else:
q, r = Q, R
return per(q), per(r)
def rem(f, g, auto=True):
"""
Computes the polynomial remainder of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly, ZZ, QQ
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).rem(Poly(2*x - 4, x))
Poly(5, x, domain='ZZ')
>>> Poly(x**2 + 1, x).rem(Poly(2*x - 4, x), auto=False)
Poly(x**2 + 1, x, domain='ZZ')
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'rem'):
r = F.rem(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'rem')
if retract:
try:
r = r.to_ring()
except CoercionFailed:
pass
return per(r)
def quo(f, g, auto=True):
"""
Computes polynomial quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).quo(Poly(2*x - 4, x))
Poly(1/2*x + 1, x, domain='QQ')
>>> Poly(x**2 - 1, x).quo(Poly(x - 1, x))
Poly(x + 1, x, domain='ZZ')
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'quo'):
q = F.quo(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'quo')
if retract:
try:
q = q.to_ring()
except CoercionFailed:
pass
return per(q)
def exquo(f, g, auto=True):
"""
Computes polynomial exact quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).exquo(Poly(x - 1, x))
Poly(x + 1, x, domain='ZZ')
>>> Poly(x**2 + 1, x).exquo(Poly(2*x - 4, x))
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'exquo'):
try:
q = F.exquo(G)
except ExactQuotientFailed, exc:
raise exc.new(f.as_expr(), g.as_expr())
else: # pragma: no cover
raise OperationNotSupported(f, 'exquo')
if retract:
try:
q = q.to_ring()
except CoercionFailed:
pass
return per(q)
def _gen_to_level(f, gen):
"""Returns level associated with the given generator. """
if isinstance(gen, int):
length = len(f.gens)
if -length <= gen < length:
if gen < 0:
return length + gen
else:
return gen
else:
raise PolynomialError("-%s <= gen < %s expected, got %s" %
(length, length, gen))
else:
try:
return list(f.gens).index(sympify(gen))
except ValueError:
raise PolynomialError(
"a valid generator expected, got %s" % gen)
def degree(f, gen=0):
"""
Returns degree of ``f`` in ``x_j``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).degree()
2
>>> Poly(x**2 + y*x + y, x, y).degree(y)
1
"""
j = f._gen_to_level(gen)
if hasattr(f.rep, 'degree'):
return f.rep.degree(j)
else: # pragma: no cover
raise OperationNotSupported(f, 'degree')
def degree_list(f):
"""
Returns a list of degrees of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).degree_list()
(2, 1)
"""
if hasattr(f.rep, 'degree_list'):
return f.rep.degree_list()
else: # pragma: no cover
raise OperationNotSupported(f, 'degree_list')
def total_degree(f):
"""
Returns the total degree of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).total_degree()
2
>>> Poly(x + y**5, x, y).total_degree()
5
"""
if hasattr(f.rep, 'total_degree'):
return f.rep.total_degree()
else: # pragma: no cover
raise OperationNotSupported(f, 'total_degree')
def homogeneous_order(f):
"""
Returns the homogeneous order of ``f``.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. This degree is
the homogeneous order of ``f``. If you only want to check if a
polynomial is homogeneous, then use :func:`Poly.is_homogeneous`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**5 + 2*x**3*y**2 + 9*x*y**4)
>>> f.homogeneous_order()
5
"""
if hasattr(f.rep, 'homogeneous_order'):
return f.rep.homogeneous_order()
else: # pragma: no cover
raise OperationNotSupported(f, 'homogeneous_order')
def LC(f, order=None):
"""
Returns the leading coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(4*x**3 + 2*x**2 + 3*x, x).LC()
4
"""
if order is not None:
return f.coeffs(order)[0]
if hasattr(f.rep, 'LC'):
result = f.rep.LC()
else: # pragma: no cover
raise OperationNotSupported(f, 'LC')
return f.rep.dom.to_sympy(result)
def TC(f):
"""
Returns the trailing coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x**2 + 3*x, x).TC()
0
"""
if hasattr(f.rep, 'TC'):
result = f.rep.TC()
else: # pragma: no cover
raise OperationNotSupported(f, 'TC')
return f.rep.dom.to_sympy(result)
def EC(f, order=None):
"""
Returns the last non-zero coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x**2 + 3*x, x).EC()
3
"""
if hasattr(f.rep, 'coeffs'):
return f.coeffs(order)[-1]
else: # pragma: no cover
raise OperationNotSupported(f, 'EC')
def coeff_monomial(f, monom):
"""
Returns the coefficient of ``monom`` in ``f`` if there, else None.
Examples
========
>>> from sympy import Poly, exp
>>> from sympy.abc import x, y
>>> p = Poly(24*x*y*exp(8) + 23*x, x, y)
>>> p.coeff_monomial(x)
23
>>> p.coeff_monomial(y)
0
>>> p.coeff_monomial(x*y)
24*exp(8)
Note that ``Expr.coeff()`` behaves differently, collecting terms
if possible; the Poly must be converted to an Expr to use that
method, however:
>>> p.as_expr().coeff(x)
24*y*exp(8) + 23
>>> p.as_expr().coeff(y)
24*x*exp(8)
>>> p.as_expr().coeff(x*y)
24*exp(8)
See Also
========
nth: more efficient query using exponents of the monomial's generators
"""
return f.nth(*Monomial(monom, f.gens).exponents)
def nth(f, *N):
"""
Returns the ``n``-th coefficient of ``f`` where ``N`` are the
exponents of the generators in the term of interest.
Examples
========
>>> from sympy import Poly, sqrt
>>> from sympy.abc import x, y
>>> Poly(x**3 + 2*x**2 + 3*x, x).nth(2)
2
>>> Poly(x**3 + 2*x*y**2 + y**2, x, y).nth(1, 2)
2
>>> Poly(4*sqrt(x)*y)
Poly(4*y*sqrt(x), y, sqrt(x), domain='ZZ')
>>> _.nth(1, 1)
4
See Also
========
coeff_monomial
"""
if hasattr(f.rep, 'nth'):
result = f.rep.nth(*map(int, N))
else: # pragma: no cover
raise OperationNotSupported(f, 'nth')
return f.rep.dom.to_sympy(result)
def coeff(f, x, n=1, right=False):
# the semantics of coeff_monomial and Expr.coeff are different;
# if someone is working with a Poly, they should be aware of the
# differences and chose the method best suited for the query.
# Alternatively, a pure-polys method could be written here but
# at this time the ``right`` keyword would be ignored because Poly
# doesn't work with non-commutatives.
raise NotImplementedError(
'Either convert to Expr with `as_expr` method '
'to use Expr\'s coeff method or else use the '
'`coeff_monomial` method of Polys.')
def LM(f, order=None):
"""
Returns the leading monomial of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).LM()
x**2*y**0
"""
return Monomial(f.monoms(order)[0], f.gens)
def EM(f, order=None):
"""
Returns the last non-zero monomial of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).EM()
x**0*y**1
"""
return Monomial(f.monoms(order)[-1], f.gens)
def LT(f, order=None):
"""
Returns the leading term of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).LT()
(x**2*y**0, 4)
"""
monom, coeff = f.terms(order)[0]
return Monomial(monom, f.gens), coeff
def ET(f, order=None):
"""
Returns the last non-zero term of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).ET()
(x**0*y**1, 3)
"""
monom, coeff = f.terms(order)[-1]
return Monomial(monom, f.gens), coeff
def max_norm(f):
"""
Returns maximum norm of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(-x**2 + 2*x - 3, x).max_norm()
3
"""
if hasattr(f.rep, 'max_norm'):
result = f.rep.max_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'max_norm')
return f.rep.dom.to_sympy(result)
def l1_norm(f):
"""
Returns l1 norm of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(-x**2 + 2*x - 3, x).l1_norm()
6
"""
if hasattr(f.rep, 'l1_norm'):
result = f.rep.l1_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'l1_norm')
return f.rep.dom.to_sympy(result)
def clear_denoms(f, convert=False):
"""
Clear denominators, but keep the ground domain.
Examples
========
>>> from sympy import Poly, S, QQ
>>> from sympy.abc import x
>>> f = Poly(x/2 + S(1)/3, x, domain=QQ)
>>> f.clear_denoms()
(6, Poly(3*x + 2, x, domain='QQ'))
>>> f.clear_denoms(convert=True)
(6, Poly(3*x + 2, x, domain='ZZ'))
"""
if not f.rep.dom.has_Field:
return S.One, f
dom = f.get_domain()
if dom.has_assoc_Ring:
dom = f.rep.dom.get_ring()
if hasattr(f.rep, 'clear_denoms'):
coeff, result = f.rep.clear_denoms()
else: # pragma: no cover
raise OperationNotSupported(f, 'clear_denoms')
coeff, f = dom.to_sympy(coeff), f.per(result)
if not convert:
return coeff, f
else:
return coeff, f.to_ring()
def rat_clear_denoms(f, g):
"""
Clear denominators in a rational function ``f/g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2/y + 1, x)
>>> g = Poly(x**3 + y, x)
>>> p, q = f.rat_clear_denoms(g)
>>> p
Poly(x**2 + y, x, domain='ZZ[y]')
>>> q
Poly(y*x**3 + y**2, x, domain='ZZ[y]')
"""
dom, per, f, g = f._unify(g)
f = per(f)
g = per(g)
if not (dom.has_Field and dom.has_assoc_Ring):
return f, g
a, f = f.clear_denoms(convert=True)
b, g = g.clear_denoms(convert=True)
f = f.mul_ground(b)
g = g.mul_ground(a)
return f, g
def integrate(f, *specs, **args):
"""
Computes indefinite integral of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x + 1, x).integrate()
Poly(1/3*x**3 + x**2 + x, x, domain='QQ')
>>> Poly(x*y**2 + x, x, y).integrate((0, 1), (1, 0))
Poly(1/2*x**2*y**2 + 1/2*x**2, x, y, domain='QQ')
"""
if args.get('auto', True) and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'integrate'):
if not specs:
return f.per(f.rep.integrate(m=1))
rep = f.rep
for spec in specs:
if type(spec) is tuple:
gen, m = spec
else:
gen, m = spec, 1
rep = rep.integrate(int(m), f._gen_to_level(gen))
return f.per(rep)
else: # pragma: no cover
raise OperationNotSupported(f, 'integrate')
def diff(f, *specs):
"""
Computes partial derivative of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x + 1, x).diff()
Poly(2*x + 2, x, domain='ZZ')
>>> Poly(x*y**2 + x, x, y).diff((0, 0), (1, 1))
Poly(2*x*y, x, y, domain='ZZ')
"""
if hasattr(f.rep, 'diff'):
if not specs:
return f.per(f.rep.diff(m=1))
rep = f.rep
for spec in specs:
if type(spec) is tuple:
gen, m = spec
else:
gen, m = spec, 1
rep = rep.diff(int(m), f._gen_to_level(gen))
return f.per(rep)
else: # pragma: no cover
raise OperationNotSupported(f, 'diff')
def eval(f, x, a=None, auto=True):
"""
Evaluate ``f`` at ``a`` in the given variable.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(x**2 + 2*x + 3, x).eval(2)
11
>>> Poly(2*x*y + 3*x + y + 2, x, y).eval(x, 2)
Poly(5*y + 8, y, domain='ZZ')
>>> f = Poly(2*x*y + 3*x + y + 2*z, x, y, z)
>>> f.eval({x: 2})
Poly(5*y + 2*z + 6, y, z, domain='ZZ')
>>> f.eval({x: 2, y: 5})
Poly(2*z + 31, z, domain='ZZ')
>>> f.eval({x: 2, y: 5, z: 7})
45
>>> f.eval((2, 5))
Poly(2*z + 31, z, domain='ZZ')
>>> f(2, 5)
Poly(2*z + 31, z, domain='ZZ')
"""
if a is None:
if isinstance(x, dict):
mapping = x
for gen, value in mapping.iteritems():
f = f.eval(gen, value)
return f
elif isinstance(x, (tuple, list)):
values = x
if len(values) > len(f.gens):
raise ValueError("too many values provided")
for gen, value in zip(f.gens, values):
f = f.eval(gen, value)
return f
else:
j, a = 0, x
else:
j = f._gen_to_level(x)
if not hasattr(f.rep, 'eval'): # pragma: no cover
raise OperationNotSupported(f, 'eval')
try:
result = f.rep.eval(a, j)
except CoercionFailed:
if not auto:
raise DomainError("can't evaluate at %s in %s" % (a, f.rep.dom))
else:
a_domain, [a] = construct_domain([a])
new_domain = f.get_domain().unify(a_domain, gens=f.gens)
f = f.set_domain(new_domain)
a = new_domain.convert(a, a_domain)
result = f.rep.eval(a, j)
return f.per(result, remove=j)
def __call__(f, *values):
"""
Evaluate ``f`` at the give values.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> f = Poly(2*x*y + 3*x + y + 2*z, x, y, z)
>>> f(2)
Poly(5*y + 2*z + 6, y, z, domain='ZZ')
>>> f(2, 5)
Poly(2*z + 31, z, domain='ZZ')
>>> f(2, 5, 7)
45
"""
return f.eval(values)
def half_gcdex(f, g, auto=True):
"""
Half extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> Poly(f).half_gcdex(Poly(g))
(Poly(-1/5*x + 3/5, x, domain='QQ'), Poly(x + 1, x, domain='QQ'))
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'half_gcdex'):
s, h = F.half_gcdex(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'half_gcdex')
return per(s), per(h)
def gcdex(f, g, auto=True):
"""
Extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> Poly(f).gcdex(Poly(g))
(Poly(-1/5*x + 3/5, x, domain='QQ'),
Poly(1/5*x**2 - 6/5*x + 2, x, domain='QQ'),
Poly(x + 1, x, domain='QQ'))
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'gcdex'):
s, t, h = F.gcdex(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'gcdex')
return per(s), per(t), per(h)
def invert(f, g, auto=True):
"""
Invert ``f`` modulo ``g`` when possible.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).invert(Poly(2*x - 1, x))
Poly(-4/3, x, domain='QQ')
>>> Poly(x**2 - 1, x).invert(Poly(x - 1, x))
Traceback (most recent call last):
...
NotInvertible: zero divisor
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'invert'):
result = F.invert(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'invert')
return per(result)
def revert(f, n):
"""Compute ``f**(-1)`` mod ``x**n``. """
if hasattr(f.rep, 'revert'):
result = f.rep.revert(int(n))
else: # pragma: no cover
raise OperationNotSupported(f, 'revert')
return f.per(result)
def subresultants(f, g):
"""
Computes the subresultant PRS of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).subresultants(Poly(x**2 - 1, x))
[Poly(x**2 + 1, x, domain='ZZ'),
Poly(x**2 - 1, x, domain='ZZ'),
Poly(-2, x, domain='ZZ')]
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'subresultants'):
result = F.subresultants(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'subresultants')
return map(per, result)
def resultant(f, g, includePRS=False):
"""
Computes the resultant of ``f`` and ``g`` via PRS.
If includePRS=True, it includes the subresultant PRS in the result.
Because the PRS is used to calculate the resultant, this is more
efficient than calling :func:`subresultants` separately.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**2 + 1, x)
>>> f.resultant(Poly(x**2 - 1, x))
4
>>> f.resultant(Poly(x**2 - 1, x), includePRS=True)
(4, [Poly(x**2 + 1, x, domain='ZZ'), Poly(x**2 - 1, x, domain='ZZ'),
Poly(-2, x, domain='ZZ')])
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'resultant'):
if includePRS:
result, R = F.resultant(G, includePRS=includePRS)
else:
result = F.resultant(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'resultant')
if includePRS:
return (per(result, remove=0), map(per, R))
return per(result, remove=0)
def discriminant(f):
"""
Computes the discriminant of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 2*x + 3, x).discriminant()
-8
"""
if hasattr(f.rep, 'discriminant'):
result = f.rep.discriminant()
else: # pragma: no cover
raise OperationNotSupported(f, 'discriminant')
return f.per(result, remove=0)
def cofactors(f, g):
"""
Returns the GCD of ``f`` and ``g`` and their cofactors.
Returns polynomials ``(h, cff, cfg)`` such that ``h = gcd(f, g)``, and
``cff = quo(f, h)`` and ``cfg = quo(g, h)`` are, so called, cofactors
of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).cofactors(Poly(x**2 - 3*x + 2, x))
(Poly(x - 1, x, domain='ZZ'),
Poly(x + 1, x, domain='ZZ'),
Poly(x - 2, x, domain='ZZ'))
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'cofactors'):
h, cff, cfg = F.cofactors(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'cofactors')
return per(h), per(cff), per(cfg)
def gcd(f, g):
"""
Returns the polynomial GCD of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).gcd(Poly(x**2 - 3*x + 2, x))
Poly(x - 1, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'gcd'):
result = F.gcd(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'gcd')
return per(result)
def lcm(f, g):
"""
Returns polynomial LCM of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).lcm(Poly(x**2 - 3*x + 2, x))
Poly(x**3 - 2*x**2 - x + 2, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'lcm'):
result = F.lcm(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'lcm')
return per(result)
def trunc(f, p):
"""
Reduce ``f`` modulo a constant ``p``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 + 3*x**2 + 5*x + 7, x).trunc(3)
Poly(-x**3 - x + 1, x, domain='ZZ')
"""
p = f.rep.dom.convert(p)
if hasattr(f.rep, 'trunc'):
result = f.rep.trunc(p)
else: # pragma: no cover
raise OperationNotSupported(f, 'trunc')
return f.per(result)
def monic(f, auto=True):
"""
Divides all coefficients by ``LC(f)``.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x
>>> Poly(3*x**2 + 6*x + 9, x, domain=ZZ).monic()
Poly(x**2 + 2*x + 3, x, domain='QQ')
>>> Poly(3*x**2 + 4*x + 2, x, domain=ZZ).monic()
Poly(x**2 + 4/3*x + 2/3, x, domain='QQ')
"""
if auto and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'monic'):
result = f.rep.monic()
else: # pragma: no cover
raise OperationNotSupported(f, 'monic')
return f.per(result)
def content(f):
"""
Returns the GCD of polynomial coefficients.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(6*x**2 + 8*x + 12, x).content()
2
"""
if hasattr(f.rep, 'content'):
result = f.rep.content()
else: # pragma: no cover
raise OperationNotSupported(f, 'content')
return f.rep.dom.to_sympy(result)
def primitive(f):
"""
Returns the content and a primitive form of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 + 8*x + 12, x).primitive()
(2, Poly(x**2 + 4*x + 6, x, domain='ZZ'))
"""
if hasattr(f.rep, 'primitive'):
cont, result = f.rep.primitive()
else: # pragma: no cover
raise OperationNotSupported(f, 'primitive')
return f.rep.dom.to_sympy(cont), f.per(result)
def compose(f, g):
"""
Computes the functional composition of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + x, x).compose(Poly(x - 1, x))
Poly(x**2 - x, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'compose'):
result = F.compose(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'compose')
return per(result)
def decompose(f):
"""
Computes a functional decomposition of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**4 + 2*x**3 - x - 1, x, domain='ZZ').decompose()
[Poly(x**2 - x - 1, x, domain='ZZ'), Poly(x**2 + x, x, domain='ZZ')]
"""
if hasattr(f.rep, 'decompose'):
result = f.rep.decompose()
else: # pragma: no cover
raise OperationNotSupported(f, 'decompose')
return map(f.per, result)
def shift(f, a):
"""
Efficiently compute Taylor shift ``f(x + a)``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 2*x + 1, x).shift(2)
Poly(x**2 + 2*x + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'shift'):
result = f.rep.shift(a)
else: # pragma: no cover
raise OperationNotSupported(f, 'shift')
return f.per(result)
def sturm(f, auto=True):
"""
Computes the Sturm sequence of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 - 2*x**2 + x - 3, x).sturm()
[Poly(x**3 - 2*x**2 + x - 3, x, domain='QQ'),
Poly(3*x**2 - 4*x + 1, x, domain='QQ'),
Poly(2/9*x + 25/9, x, domain='QQ'),
Poly(-2079/4, x, domain='QQ')]
"""
if auto and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'sturm'):
result = f.rep.sturm()
else: # pragma: no cover
raise OperationNotSupported(f, 'sturm')
return map(f.per, result)
def gff_list(f):
"""
Computes greatest factorial factorization of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**5 + 2*x**4 - x**3 - 2*x**2
>>> Poly(f).gff_list()
[(Poly(x, x, domain='ZZ'), 1), (Poly(x + 2, x, domain='ZZ'), 4)]
"""
if hasattr(f.rep, 'gff_list'):
result = f.rep.gff_list()
else: # pragma: no cover
raise OperationNotSupported(f, 'gff_list')
return [ (f.per(g), k) for g, k in result ]
def sqf_norm(f):
"""
Computes square-free norm of ``f``.
Returns ``s``, ``f``, ``r``, such that ``g(x) = f(x-sa)`` and
``r(x) = Norm(g(x))`` is a square-free polynomial over ``K``,
where ``a`` is the algebraic extension of the ground domain.
Examples
========
>>> from sympy import Poly, sqrt
>>> from sympy.abc import x
>>> s, f, r = Poly(x**2 + 1, x, extension=[sqrt(3)]).sqf_norm()
>>> s
1
>>> f
Poly(x**2 - 2*sqrt(3)*x + 4, x, domain='QQ<sqrt(3)>')
>>> r
Poly(x**4 - 4*x**2 + 16, x, domain='QQ')
"""
if hasattr(f.rep, 'sqf_norm'):
s, g, r = f.rep.sqf_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_norm')
return s, f.per(g), f.per(r)
def sqf_part(f):
"""
Computes square-free part of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 - 3*x - 2, x).sqf_part()
Poly(x**2 - x - 2, x, domain='ZZ')
"""
if hasattr(f.rep, 'sqf_part'):
result = f.rep.sqf_part()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_part')
return f.per(result)
def sqf_list(f, all=False):
"""
Returns a list of square-free factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = 2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16
>>> Poly(f).sqf_list()
(2, [(Poly(x + 1, x, domain='ZZ'), 2),
(Poly(x + 2, x, domain='ZZ'), 3)])
>>> Poly(f).sqf_list(all=True)
(2, [(Poly(1, x, domain='ZZ'), 1),
(Poly(x + 1, x, domain='ZZ'), 2),
(Poly(x + 2, x, domain='ZZ'), 3)])
"""
if hasattr(f.rep, 'sqf_list'):
coeff, factors = f.rep.sqf_list(all)
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_list')
return f.rep.dom.to_sympy(coeff), [ (f.per(g), k) for g, k in factors ]
def sqf_list_include(f, all=False):
"""
Returns a list of square-free factors of ``f``.
Examples
========
>>> from sympy import Poly, expand
>>> from sympy.abc import x
>>> f = expand(2*(x + 1)**3*x**4)
>>> f
2*x**7 + 6*x**6 + 6*x**5 + 2*x**4
>>> Poly(f).sqf_list_include()
[(Poly(2, x, domain='ZZ'), 1),
(Poly(x + 1, x, domain='ZZ'), 3),
(Poly(x, x, domain='ZZ'), 4)]
>>> Poly(f).sqf_list_include(all=True)
[(Poly(2, x, domain='ZZ'), 1),
(Poly(1, x, domain='ZZ'), 2),
(Poly(x + 1, x, domain='ZZ'), 3),
(Poly(x, x, domain='ZZ'), 4)]
"""
if hasattr(f.rep, 'sqf_list_include'):
factors = f.rep.sqf_list_include(all)
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_list_include')
return [ (f.per(g), k) for g, k in factors ]
def factor_list(f):
"""
Returns a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = 2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y
>>> Poly(f).factor_list()
(2, [(Poly(x + y, x, y, domain='ZZ'), 1),
(Poly(x**2 + 1, x, y, domain='ZZ'), 2)])
"""
if hasattr(f.rep, 'factor_list'):
try:
coeff, factors = f.rep.factor_list()
except DomainError:
return S.One, [(f, 1)]
else: # pragma: no cover
raise OperationNotSupported(f, 'factor_list')
return f.rep.dom.to_sympy(coeff), [ (f.per(g), k) for g, k in factors ]
def factor_list_include(f):
"""
Returns a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = 2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y
>>> Poly(f).factor_list_include()
[(Poly(2*x + 2*y, x, y, domain='ZZ'), 1),
(Poly(x**2 + 1, x, y, domain='ZZ'), 2)]
"""
if hasattr(f.rep, 'factor_list_include'):
try:
factors = f.rep.factor_list_include()
except DomainError:
return [(f, 1)]
else: # pragma: no cover
raise OperationNotSupported(f, 'factor_list_include')
return [ (f.per(g), k) for g, k in factors ]
def intervals(f, all=False, eps=None, inf=None, sup=None, fast=False, sqf=False):
"""
Compute isolating intervals for roots of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3, x).intervals()
[((-2, -1), 1), ((1, 2), 1)]
>>> Poly(x**2 - 3, x).intervals(eps=1e-2)
[((-26/15, -19/11), 1), ((19/11, 26/15), 1)]
"""
if eps is not None:
eps = QQ.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if inf is not None:
inf = QQ.convert(inf)
if sup is not None:
sup = QQ.convert(sup)
if hasattr(f.rep, 'intervals'):
result = f.rep.intervals(
all=all, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf)
else: # pragma: no cover
raise OperationNotSupported(f, 'intervals')
if sqf:
def _real(interval):
s, t = interval
return (QQ.to_sympy(s), QQ.to_sympy(t))
if not all:
return map(_real, result)
def _complex(rectangle):
(u, v), (s, t) = rectangle
return (QQ.to_sympy(u) + I*QQ.to_sympy(v),
QQ.to_sympy(s) + I*QQ.to_sympy(t))
real_part, complex_part = result
return map(_real, real_part), map(_complex, complex_part)
else:
def _real(interval):
(s, t), k = interval
return ((QQ.to_sympy(s), QQ.to_sympy(t)), k)
if not all:
return map(_real, result)
def _complex(rectangle):
((u, v), (s, t)), k = rectangle
return ((QQ.to_sympy(u) + I*QQ.to_sympy(v),
QQ.to_sympy(s) + I*QQ.to_sympy(t)), k)
real_part, complex_part = result
return map(_real, real_part), map(_complex, complex_part)
def refine_root(f, s, t, eps=None, steps=None, fast=False, check_sqf=False):
"""
Refine an isolating interval of a root to the given precision.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3, x).refine_root(1, 2, eps=1e-2)
(19/11, 26/15)
"""
if check_sqf and not f.is_sqf:
raise PolynomialError("only square-free polynomials supported")
s, t = QQ.convert(s), QQ.convert(t)
if eps is not None:
eps = QQ.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if steps is not None:
steps = int(steps)
elif eps is None:
steps = 1
if hasattr(f.rep, 'refine_root'):
S, T = f.rep.refine_root(s, t, eps=eps, steps=steps, fast=fast)
else: # pragma: no cover
raise OperationNotSupported(f, 'refine_root')
return QQ.to_sympy(S), QQ.to_sympy(T)
def count_roots(f, inf=None, sup=None):
"""
Return the number of roots of ``f`` in ``[inf, sup]`` interval.
Examples
========
>>> from sympy import Poly, I
>>> from sympy.abc import x
>>> Poly(x**4 - 4, x).count_roots(-3, 3)
2
>>> Poly(x**4 - 4, x).count_roots(0, 1 + 3*I)
1
"""
inf_real, sup_real = True, True
if inf is not None:
inf = sympify(inf)
if inf is S.NegativeInfinity:
inf = None
else:
re, im = inf.as_real_imag()
if not im:
inf = QQ.convert(inf)
else:
inf, inf_real = map(QQ.convert, (re, im)), False
if sup is not None:
sup = sympify(sup)
if sup is S.Infinity:
sup = None
else:
re, im = sup.as_real_imag()
if not im:
sup = QQ.convert(sup)
else:
sup, sup_real = map(QQ.convert, (re, im)), False
if inf_real and sup_real:
if hasattr(f.rep, 'count_real_roots'):
count = f.rep.count_real_roots(inf=inf, sup=sup)
else: # pragma: no cover
raise OperationNotSupported(f, 'count_real_roots')
else:
if inf_real and inf is not None:
inf = (inf, QQ.zero)
if sup_real and sup is not None:
sup = (sup, QQ.zero)
if hasattr(f.rep, 'count_complex_roots'):
count = f.rep.count_complex_roots(inf=inf, sup=sup)
else: # pragma: no cover
raise OperationNotSupported(f, 'count_complex_roots')
return Integer(count)
def root(f, index, radicals=True):
"""
Get an indexed root of a polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(2*x**3 - 7*x**2 + 4*x + 4)
>>> f.root(0)
-1/2
>>> f.root(1)
2
>>> f.root(2)
2
>>> f.root(3)
Traceback (most recent call last):
...
IndexError: root index out of [-3, 2] range, got 3
>>> Poly(x**5 + x + 1).root(0)
RootOf(x**3 - x**2 + 1, 0)
"""
return sympy.polys.rootoftools.RootOf(f, index, radicals=radicals)
def real_roots(f, multiple=True, radicals=True):
"""
Return a list of real roots with multiplicities.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 - 7*x**2 + 4*x + 4).real_roots()
[-1/2, 2, 2]
>>> Poly(x**3 + x + 1).real_roots()
[RootOf(x**3 + x + 1, 0)]
"""
reals = sympy.polys.rootoftools.RootOf.real_roots(f, radicals=radicals)
if multiple:
return reals
else:
return group(reals, multiple=False)
def all_roots(f, multiple=True, radicals=True):
"""
Return a list of real and complex roots with multiplicities.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 - 7*x**2 + 4*x + 4).all_roots()
[-1/2, 2, 2]
>>> Poly(x**3 + x + 1).all_roots()
[RootOf(x**3 + x + 1, 0),
RootOf(x**3 + x + 1, 1),
RootOf(x**3 + x + 1, 2)]
"""
roots = sympy.polys.rootoftools.RootOf.all_roots(f, radicals=radicals)
if multiple:
return roots
else:
return group(roots, multiple=False)
def nroots(f, n=15, maxsteps=50, cleanup=True, error=False):
"""
Compute numerical approximations of roots of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3).nroots(n=15)
[-1.73205080756888, 1.73205080756888]
>>> Poly(x**2 - 3).nroots(n=30)
[-1.73205080756887729352744634151, 1.73205080756887729352744634151]
"""
if f.is_multivariate:
raise MultivariatePolynomialError(
"can't compute numerical roots of %s" % f)
if f.degree() <= 0:
return []
coeffs = [ coeff.evalf(n=n).as_real_imag()
for coeff in f.all_coeffs() ]
dps = sympy.mpmath.mp.dps
sympy.mpmath.mp.dps = n
try:
try:
coeffs = [ sympy.mpmath.mpc(*coeff) for coeff in coeffs ]
except TypeError:
raise DomainError(
"numerical domain expected, got %s" % f.rep.dom)
result = sympy.mpmath.polyroots(
coeffs, maxsteps=maxsteps, cleanup=cleanup, error=error)
if error:
roots, error = result
else:
roots, error = result, None
roots = map(sympify, sorted(roots, key=lambda r: (r.real, r.imag)))
finally:
sympy.mpmath.mp.dps = dps
if error is not None:
return roots, sympify(error)
else:
return roots
def ground_roots(f):
"""
Compute roots of ``f`` by factorization in the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**6 - 4*x**4 + 4*x**3 - x**2).ground_roots()
{0: 2, 1: 2}
"""
if f.is_multivariate:
raise MultivariatePolynomialError(
"can't compute ground roots of %s" % f)
roots = {}
for factor, k in f.factor_list()[1]:
if factor.is_linear:
a, b = factor.all_coeffs()
roots[-b/a] = k
return roots
def nth_power_roots_poly(f, n):
"""
Construct a polynomial with n-th powers of roots of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**4 - x**2 + 1)
>>> f.nth_power_roots_poly(2)
Poly(x**4 - 2*x**3 + 3*x**2 - 2*x + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(3)
Poly(x**4 + 2*x**2 + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(4)
Poly(x**4 + 2*x**3 + 3*x**2 + 2*x + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(12)
Poly(x**4 - 4*x**3 + 6*x**2 - 4*x + 1, x, domain='ZZ')
"""
if f.is_multivariate:
raise MultivariatePolynomialError(
"must be a univariate polynomial")
N = sympify(n)
if N.is_Integer and N >= 1:
n = int(N)
else:
raise ValueError("'n' must an integer and n >= 1, got %s" % n)
x = f.gen
t = Dummy('t')
r = f.resultant(f.__class__.from_expr(x**n - t, x, t))
return r.replace(t, x)
def cancel(f, g, include=False):
"""
Cancel common factors in a rational function ``f/g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 - 2, x).cancel(Poly(x**2 - 2*x + 1, x))
(1, Poly(2*x + 2, x, domain='ZZ'), Poly(x - 1, x, domain='ZZ'))
>>> Poly(2*x**2 - 2, x).cancel(Poly(x**2 - 2*x + 1, x), include=True)
(Poly(2*x + 2, x, domain='ZZ'), Poly(x - 1, x, domain='ZZ'))
"""
dom, per, F, G = f._unify(g)
if hasattr(F, 'cancel'):
result = F.cancel(G, include=include)
else: # pragma: no cover
raise OperationNotSupported(f, 'cancel')
if not include:
if dom.has_assoc_Ring:
dom = dom.get_ring()
cp, cq, p, q = result
cp = dom.to_sympy(cp)
cq = dom.to_sympy(cq)
return cp/cq, per(p), per(q)
else:
return tuple(map(per, result))
@property
def is_zero(f):
"""
Returns ``True`` if ``f`` is a zero polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(0, x).is_zero
True
>>> Poly(1, x).is_zero
False
"""
return f.rep.is_zero
@property
def is_one(f):
"""
Returns ``True`` if ``f`` is a unit polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(0, x).is_one
False
>>> Poly(1, x).is_one
True
"""
return f.rep.is_one
@property
def is_sqf(f):
"""
Returns ``True`` if ``f`` is a square-free polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 2*x + 1, x).is_sqf
False
>>> Poly(x**2 - 1, x).is_sqf
True
"""
return f.rep.is_sqf
@property
def is_monic(f):
"""
Returns ``True`` if the leading coefficient of ``f`` is one.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 2, x).is_monic
True
>>> Poly(2*x + 2, x).is_monic
False
"""
return f.rep.is_monic
@property
def is_primitive(f):
"""
Returns ``True`` if GCD of the coefficients of ``f`` is one.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 + 6*x + 12, x).is_primitive
False
>>> Poly(x**2 + 3*x + 6, x).is_primitive
True
"""
return f.rep.is_primitive
@property
def is_ground(f):
"""
Returns ``True`` if ``f`` is an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x, x).is_ground
False
>>> Poly(2, x).is_ground
True
>>> Poly(y, x).is_ground
True
"""
return f.rep.is_ground
@property
def is_linear(f):
"""
Returns ``True`` if ``f`` is linear in all its variables.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x + y + 2, x, y).is_linear
True
>>> Poly(x*y + 2, x, y).is_linear
False
"""
return f.rep.is_linear
@property
def is_quadratic(f):
"""
Returns ``True`` if ``f`` is quadratic in all its variables.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x*y + 2, x, y).is_quadratic
True
>>> Poly(x*y**2 + 2, x, y).is_quadratic
False
"""
return f.rep.is_quadratic
@property
def is_monomial(f):
"""
Returns ``True`` if ``f`` is zero or has only one term.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(3*x**2, x).is_monomial
True
>>> Poly(3*x**2 + 1, x).is_monomial
False
"""
return f.rep.is_monomial
@property
def is_homogeneous(f):
"""
Returns ``True`` if ``f`` is a homogeneous polynomial.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. If you want not
only to check if a polynomial is homogeneous but also compute its
homogeneous order, then use :func:`Poly.homogeneous_order`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x*y, x, y).is_homogeneous
True
>>> Poly(x**3 + x*y, x, y).is_homogeneous
False
"""
return f.rep.is_homogeneous
@property
def is_irreducible(f):
"""
Returns ``True`` if ``f`` has no factors over its domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + x + 1, x, modulus=2).is_irreducible
True
>>> Poly(x**2 + 1, x, modulus=2).is_irreducible
False
"""
return f.rep.is_irreducible
@property
def is_univariate(f):
"""
Returns ``True`` if ``f`` is a univariate polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x + 1, x).is_univariate
True
>>> Poly(x*y**2 + x*y + 1, x, y).is_univariate
False
>>> Poly(x*y**2 + x*y + 1, x).is_univariate
True
>>> Poly(x**2 + x + 1, x, y).is_univariate
False
"""
return len(f.gens) == 1
@property
def is_multivariate(f):
"""
Returns ``True`` if ``f`` is a multivariate polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x + 1, x).is_multivariate
False
>>> Poly(x*y**2 + x*y + 1, x, y).is_multivariate
True
>>> Poly(x*y**2 + x*y + 1, x).is_multivariate
False
>>> Poly(x**2 + x + 1, x, y).is_multivariate
True
"""
return len(f.gens) != 1
@property
def is_cyclotomic(f):
"""
Returns ``True`` if ``f`` is a cyclotomic polnomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1
>>> Poly(f).is_cyclotomic
False
>>> g = x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1
>>> Poly(g).is_cyclotomic
True
"""
return f.rep.is_cyclotomic
def __abs__(f):
return f.abs()
def __neg__(f):
return f.neg()
@_sympifyit('g', NotImplemented)
def __add__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr() + g
return f.add(g)
@_sympifyit('g', NotImplemented)
def __radd__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g + f.as_expr()
return g.add(f)
@_sympifyit('g', NotImplemented)
def __sub__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr() - g
return f.sub(g)
@_sympifyit('g', NotImplemented)
def __rsub__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g - f.as_expr()
return g.sub(f)
@_sympifyit('g', NotImplemented)
def __mul__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr()*g
return f.mul(g)
@_sympifyit('g', NotImplemented)
def __rmul__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g*f.as_expr()
return g.mul(f)
@_sympifyit('n', NotImplemented)
def __pow__(f, n):
if n.is_Integer and n >= 0:
return f.pow(n)
else:
return f.as_expr()**n
@_sympifyit('g', NotImplemented)
def __divmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.div(g)
@_sympifyit('g', NotImplemented)
def __rdivmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.div(f)
@_sympifyit('g', NotImplemented)
def __mod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.rem(g)
@_sympifyit('g', NotImplemented)
def __rmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.rem(f)
@_sympifyit('g', NotImplemented)
def __floordiv__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.quo(g)
@_sympifyit('g', NotImplemented)
def __rfloordiv__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.quo(f)
@_sympifyit('g', NotImplemented)
def __div__(f, g):
return f.as_expr()/g.as_expr()
@_sympifyit('g', NotImplemented)
def __rdiv__(f, g):
return g.as_expr()/f.as_expr()
__truediv__ = __div__
__rtruediv__ = __rdiv__
@_sympifyit('g', NotImplemented)
def __eq__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, f.gens, domain=f.get_domain())
except (PolynomialError, DomainError, CoercionFailed):
return False
if f.gens != g.gens:
return False
if f.rep.dom != g.rep.dom:
try:
dom = f.rep.dom.unify(g.rep.dom, f.gens)
except UnificationFailed:
return False
f = f.set_domain(dom)
g = g.set_domain(dom)
return f.rep == g.rep
@_sympifyit('g', NotImplemented)
def __ne__(f, g):
return not f.__eq__(g)
def __nonzero__(f):
return not f.is_zero
def eq(f, g, strict=False):
if not strict:
return f.__eq__(g)
else:
return f._strict_eq(sympify(g))
def ne(f, g, strict=False):
return not f.eq(g, strict=strict)
def _strict_eq(f, g):
return isinstance(g, f.__class__) and f.gens == g.gens and f.rep.eq(g.rep, strict=True)
class PurePoly(Poly):
"""Class for representing pure polynomials. """
def _hashable_content(self):
"""Allow SymPy to hash Poly instances. """
return (self.rep,)
def __hash__(self):
return super(PurePoly, self).__hash__()
@property
def free_symbols(self):
"""
Free symbols of a polynomial.
Examples
========
>>> from sympy import PurePoly
>>> from sympy.abc import x, y
>>> PurePoly(x**2 + 1).free_symbols
set()
>>> PurePoly(x**2 + y).free_symbols
set()
>>> PurePoly(x**2 + y, x).free_symbols
set([y])
"""
return self.free_symbols_in_domain
@_sympifyit('g', NotImplemented)
def __eq__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, f.gens, domain=f.get_domain())
except (PolynomialError, DomainError, CoercionFailed):
return False
if len(f.gens) != len(g.gens):
return False
if f.rep.dom != g.rep.dom:
try:
dom = f.rep.dom.unify(g.rep.dom, f.gens)
except UnificationFailed:
return False
f = f.set_domain(dom)
g = g.set_domain(dom)
return f.rep == g.rep
def _strict_eq(f, g):
return isinstance(g, f.__class__) and f.rep.eq(g.rep, strict=True)
def _unify(f, g):
g = sympify(g)
if not g.is_Poly:
try:
return f.rep.dom, f.per, f.rep, f.rep.per(f.rep.dom.from_sympy(g))
except CoercionFailed:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if len(f.gens) != len(g.gens):
raise UnificationFailed("can't unify %s with %s" % (f, g))
if not (isinstance(f.rep, DMP) and isinstance(g.rep, DMP)):
raise UnificationFailed("can't unify %s with %s" % (f, g))
cls = f.__class__
gens = f.gens
dom = f.rep.dom.unify(g.rep.dom, gens)
F = f.rep.convert(dom)
G = g.rep.convert(dom)
def per(rep, dom=dom, gens=gens, remove=None):
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return dom.to_sympy(rep)
return cls.new(rep, *gens)
return dom, per, F, G
def poly_from_expr(expr, *gens, **args):
"""Construct a polynomial from an expression. """
opt = options.build_options(gens, args)
return _poly_from_expr(expr, opt)
def _poly_from_expr(expr, opt):
"""Construct a polynomial from an expression. """
orig, expr = expr, sympify(expr)
if not isinstance(expr, Basic):
raise PolificationFailed(opt, orig, expr)
elif expr.is_Poly:
poly = expr.__class__._from_poly(expr, opt)
opt['gens'] = poly.gens
opt['domain'] = poly.domain
if opt.polys is None:
opt['polys'] = True
return poly, opt
elif opt.expand:
expr = expr.expand()
try:
rep, opt = _dict_from_expr(expr, opt)
except GeneratorsNeeded:
raise PolificationFailed(opt, orig, expr)
monoms, coeffs = zip(*rep.items())
domain = opt.domain
if domain is None:
domain, coeffs = construct_domain(coeffs, opt=opt)
else:
coeffs = map(domain.from_sympy, coeffs)
level = len(opt.gens) - 1
poly = Poly.new(
DMP.from_monoms_coeffs(monoms, coeffs, level, domain), *opt.gens)
opt['domain'] = domain
if opt.polys is None:
opt['polys'] = False
return poly, opt
def parallel_poly_from_expr(exprs, *gens, **args):
"""Construct polynomials from expressions. """
opt = options.build_options(gens, args)
return _parallel_poly_from_expr(exprs, opt)
def _parallel_poly_from_expr(exprs, opt):
"""Construct polynomials from expressions. """
if len(exprs) == 2:
f, g = exprs
if isinstance(f, Poly) and isinstance(g, Poly):
f = f.__class__._from_poly(f, opt)
g = g.__class__._from_poly(g, opt)
f, g = f.unify(g)
opt['gens'] = f.gens
opt['domain'] = f.domain
if opt.polys is None:
opt['polys'] = True
return [f, g], opt
origs, exprs = list(exprs), []
_exprs, _polys = [], []
failed = False
for i, expr in enumerate(origs):
expr = sympify(expr)
if isinstance(expr, Basic):
if expr.is_Poly:
_polys.append(i)
else:
_exprs.append(i)
if opt.expand:
expr = expr.expand()
else:
failed = True
exprs.append(expr)
if failed:
raise PolificationFailed(opt, origs, exprs, True)
if _polys:
# XXX: this is a temporary solution
for i in _polys:
exprs[i] = exprs[i].as_expr()
try:
reps, opt = _parallel_dict_from_expr(exprs, opt)
except GeneratorsNeeded:
raise PolificationFailed(opt, origs, exprs, True)
coeffs_list, lengths = [], []
all_monoms = []
all_coeffs = []
for rep in reps:
monoms, coeffs = zip(*rep.items())
coeffs_list.extend(coeffs)
all_monoms.append(monoms)
lengths.append(len(coeffs))
domain = opt.domain
if domain is None:
domain, coeffs_list = construct_domain(coeffs_list, opt=opt)
else:
coeffs_list = map(domain.from_sympy, coeffs_list)
for k in lengths:
all_coeffs.append(coeffs_list[:k])
coeffs_list = coeffs_list[k:]
polys, level = [], len(opt.gens) - 1
for monoms, coeffs in zip(all_monoms, all_coeffs):
rep = DMP.from_monoms_coeffs(monoms, coeffs, level, domain)
polys.append(Poly.new(rep, *opt.gens))
opt['domain'] = domain
if opt.polys is None:
opt['polys'] = bool(_polys)
return polys, opt
def _update_args(args, key, value):
"""Add a new ``(key, value)`` pair to arguments ``dict``. """
args = dict(args)
if key not in args:
args[key] = value
return args
def degree(f, *gens, **args):
"""
Return the degree of ``f`` in the given variable.
Examples
========
>>> from sympy import degree
>>> from sympy.abc import x, y
>>> degree(x**2 + y*x + 1, gen=x)
2
>>> degree(x**2 + y*x + 1, gen=y)
1
"""
options.allowed_flags(args, ['gen', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('degree', 1, exc)
return Integer(F.degree(opt.gen))
def degree_list(f, *gens, **args):
"""
Return a list of degrees of ``f`` in all variables.
Examples
========
>>> from sympy import degree_list
>>> from sympy.abc import x, y
>>> degree_list(x**2 + y*x + 1)
(2, 1)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('degree_list', 1, exc)
degrees = F.degree_list()
return tuple(map(Integer, degrees))
def LC(f, *gens, **args):
"""
Return the leading coefficient of ``f``.
Examples
========
>>> from sympy import LC
>>> from sympy.abc import x, y
>>> LC(4*x**2 + 2*x*y**2 + x*y + 3*y)
4
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('LC', 1, exc)
return F.LC(order=opt.order)
def LM(f, *gens, **args):
"""
Return the leading monomial of ``f``.
Examples
========
>>> from sympy import LM
>>> from sympy.abc import x, y
>>> LM(4*x**2 + 2*x*y**2 + x*y + 3*y)
x**2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('LM', 1, exc)
monom = F.LM(order=opt.order)
return monom.as_expr()
def LT(f, *gens, **args):
"""
Return the leading term of ``f``.
Examples
========
>>> from sympy import LT
>>> from sympy.abc import x, y
>>> LT(4*x**2 + 2*x*y**2 + x*y + 3*y)
4*x**2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('LT', 1, exc)
monom, coeff = F.LT(order=opt.order)
return coeff*monom.as_expr()
def pdiv(f, g, *gens, **args):
"""
Compute polynomial pseudo-division of ``f`` and ``g``.
Examples
========
>>> from sympy import pdiv
>>> from sympy.abc import x
>>> pdiv(x**2 + 1, 2*x - 4)
(2*x + 4, 20)
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('pdiv', 2, exc)
q, r = F.pdiv(G)
if not opt.polys:
return q.as_expr(), r.as_expr()
else:
return q, r
def prem(f, g, *gens, **args):
"""
Compute polynomial pseudo-remainder of ``f`` and ``g``.
Examples
========
>>> from sympy import prem
>>> from sympy.abc import x
>>> prem(x**2 + 1, 2*x - 4)
20
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('prem', 2, exc)
r = F.prem(G)
if not opt.polys:
return r.as_expr()
else:
return r
def pquo(f, g, *gens, **args):
"""
Compute polynomial pseudo-quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import pquo
>>> from sympy.abc import x
>>> pquo(x**2 + 1, 2*x - 4)
2*x + 4
>>> pquo(x**2 - 1, 2*x - 1)
2*x + 1
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('pquo', 2, exc)
try:
q = F.pquo(G)
except ExactQuotientFailed:
raise ExactQuotientFailed(f, g)
if not opt.polys:
return q.as_expr()
else:
return q
def pexquo(f, g, *gens, **args):
"""
Compute polynomial exact pseudo-quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import pexquo
>>> from sympy.abc import x
>>> pexquo(x**2 - 1, 2*x - 2)
2*x + 2
>>> pexquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('pexquo', 2, exc)
q = F.pexquo(G)
if not opt.polys:
return q.as_expr()
else:
return q
def div(f, g, *gens, **args):
"""
Compute polynomial division of ``f`` and ``g``.
Examples
========
>>> from sympy import div, ZZ, QQ
>>> from sympy.abc import x
>>> div(x**2 + 1, 2*x - 4, domain=ZZ)
(0, x**2 + 1)
>>> div(x**2 + 1, 2*x - 4, domain=QQ)
(x/2 + 1, 5)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('div', 2, exc)
q, r = F.div(G, auto=opt.auto)
if not opt.polys:
return q.as_expr(), r.as_expr()
else:
return q, r
def rem(f, g, *gens, **args):
"""
Compute polynomial remainder of ``f`` and ``g``.
Examples
========
>>> from sympy import rem, ZZ, QQ
>>> from sympy.abc import x
>>> rem(x**2 + 1, 2*x - 4, domain=ZZ)
x**2 + 1
>>> rem(x**2 + 1, 2*x - 4, domain=QQ)
5
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('rem', 2, exc)
r = F.rem(G, auto=opt.auto)
if not opt.polys:
return r.as_expr()
else:
return r
def quo(f, g, *gens, **args):
"""
Compute polynomial quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import quo
>>> from sympy.abc import x
>>> quo(x**2 + 1, 2*x - 4)
x/2 + 1
>>> quo(x**2 - 1, x - 1)
x + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('quo', 2, exc)
q = F.quo(G, auto=opt.auto)
if not opt.polys:
return q.as_expr()
else:
return q
def exquo(f, g, *gens, **args):
"""
Compute polynomial exact quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import exquo
>>> from sympy.abc import x
>>> exquo(x**2 - 1, x - 1)
x + 1
>>> exquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('exquo', 2, exc)
q = F.exquo(G, auto=opt.auto)
if not opt.polys:
return q.as_expr()
else:
return q
def half_gcdex(f, g, *gens, **args):
"""
Half extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``.
Examples
========
>>> from sympy import half_gcdex
>>> from sympy.abc import x
>>> half_gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4)
(-x/5 + 3/5, x + 1)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
s, h = domain.half_gcdex(a, b)
except NotImplementedError:
raise ComputationFailed('half_gcdex', 2, exc)
else:
return domain.to_sympy(s), domain.to_sympy(h)
s, h = F.half_gcdex(G, auto=opt.auto)
if not opt.polys:
return s.as_expr(), h.as_expr()
else:
return s, h
def gcdex(f, g, *gens, **args):
"""
Extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
Examples
========
>>> from sympy import gcdex
>>> from sympy.abc import x
>>> gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4)
(-x/5 + 3/5, x**2/5 - 6*x/5 + 2, x + 1)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
s, t, h = domain.gcdex(a, b)
except NotImplementedError:
raise ComputationFailed('gcdex', 2, exc)
else:
return domain.to_sympy(s), domain.to_sympy(t), domain.to_sympy(h)
s, t, h = F.gcdex(G, auto=opt.auto)
if not opt.polys:
return s.as_expr(), t.as_expr(), h.as_expr()
else:
return s, t, h
def invert(f, g, *gens, **args):
"""
Invert ``f`` modulo ``g`` when possible.
Examples
========
>>> from sympy import invert
>>> from sympy.abc import x
>>> invert(x**2 - 1, 2*x - 1)
-4/3
>>> invert(x**2 - 1, x - 1)
Traceback (most recent call last):
...
NotInvertible: zero divisor
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.invert(a, b))
except NotImplementedError:
raise ComputationFailed('invert', 2, exc)
h = F.invert(G, auto=opt.auto)
if not opt.polys:
return h.as_expr()
else:
return h
def subresultants(f, g, *gens, **args):
"""
Compute subresultant PRS of ``f`` and ``g``.
Examples
========
>>> from sympy import subresultants
>>> from sympy.abc import x
>>> subresultants(x**2 + 1, x**2 - 1)
[x**2 + 1, x**2 - 1, -2]
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('subresultants', 2, exc)
result = F.subresultants(G)
if not opt.polys:
return [ r.as_expr() for r in result ]
else:
return result
def resultant(f, g, *gens, **args):
"""
Compute resultant of ``f`` and ``g``.
Examples
========
>>> from sympy import resultant
>>> from sympy.abc import x
>>> resultant(x**2 + 1, x**2 - 1)
4
"""
includePRS = args.pop('includePRS', False)
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('resultant', 2, exc)
if includePRS:
result, R = F.resultant(G, includePRS=includePRS)
else:
result = F.resultant(G)
if not opt.polys:
if includePRS:
return result.as_expr(), [r.as_expr() for r in R]
return result.as_expr()
else:
if includePRS:
return result, R
return result
def discriminant(f, *gens, **args):
"""
Compute discriminant of ``f``.
Examples
========
>>> from sympy import discriminant
>>> from sympy.abc import x
>>> discriminant(x**2 + 2*x + 3)
-8
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('discriminant', 1, exc)
result = F.discriminant()
if not opt.polys:
return result.as_expr()
else:
return result
def cofactors(f, g, *gens, **args):
"""
Compute GCD and cofactors of ``f`` and ``g``.
Returns polynomials ``(h, cff, cfg)`` such that ``h = gcd(f, g)``, and
``cff = quo(f, h)`` and ``cfg = quo(g, h)`` are, so called, cofactors
of ``f`` and ``g``.
Examples
========
>>> from sympy import cofactors
>>> from sympy.abc import x
>>> cofactors(x**2 - 1, x**2 - 3*x + 2)
(x - 1, x + 1, x - 2)
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
h, cff, cfg = domain.cofactors(a, b)
except NotImplementedError:
raise ComputationFailed('cofactors', 2, exc)
else:
return domain.to_sympy(h), domain.to_sympy(cff), domain.to_sympy(cfg)
h, cff, cfg = F.cofactors(G)
if not opt.polys:
return h.as_expr(), cff.as_expr(), cfg.as_expr()
else:
return h, cff, cfg
def gcd_list(seq, *gens, **args):
"""
Compute GCD of a list of polynomials.
Examples
========
>>> from sympy import gcd_list
>>> from sympy.abc import x
>>> gcd_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])
x - 1
"""
seq = sympify(seq)
if not gens and not args:
domain, numbers = construct_domain(seq)
if not numbers:
return domain.zero
elif domain.is_Numerical:
result, numbers = numbers[0], numbers[1:]
for number in numbers:
result = domain.gcd(result, number)
if domain.is_one(result):
break
return domain.to_sympy(result)
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('gcd_list', len(seq), exc)
if not polys:
if not opt.polys:
return S.Zero
else:
return Poly(0, opt=opt)
result, polys = polys[0], polys[1:]
for poly in polys:
result = result.gcd(poly)
if result.is_one:
break
if not opt.polys:
return result.as_expr()
else:
return result
def gcd(f, g=None, *gens, **args):
"""
Compute GCD of ``f`` and ``g``.
Examples
========
>>> from sympy import gcd
>>> from sympy.abc import x
>>> gcd(x**2 - 1, x**2 - 3*x + 2)
x - 1
"""
if hasattr(f, '__iter__'):
if g is not None:
gens = (g,) + gens
return gcd_list(f, *gens, **args)
elif g is None:
raise TypeError("gcd() takes 2 arguments or a sequence of arguments")
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.gcd(a, b))
except NotImplementedError:
raise ComputationFailed('gcd', 2, exc)
result = F.gcd(G)
if not opt.polys:
return result.as_expr()
else:
return result
def lcm_list(seq, *gens, **args):
"""
Compute LCM of a list of polynomials.
Examples
========
>>> from sympy import lcm_list
>>> from sympy.abc import x
>>> lcm_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])
x**5 - x**4 - 2*x**3 - x**2 + x + 2
"""
seq = sympify(seq)
if not gens and not args:
domain, numbers = construct_domain(seq)
if not numbers:
return domain.one
elif domain.is_Numerical:
result, numbers = numbers[0], numbers[1:]
for number in numbers:
result = domain.lcm(result, number)
return domain.to_sympy(result)
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('lcm_list', len(seq), exc)
if not polys:
if not opt.polys:
return S.One
else:
return Poly(1, opt=opt)
result, polys = polys[0], polys[1:]
for poly in polys:
result = result.lcm(poly)
if not opt.polys:
return result.as_expr()
else:
return result
def lcm(f, g=None, *gens, **args):
"""
Compute LCM of ``f`` and ``g``.
Examples
========
>>> from sympy import lcm
>>> from sympy.abc import x
>>> lcm(x**2 - 1, x**2 - 3*x + 2)
x**3 - 2*x**2 - x + 2
"""
if hasattr(f, '__iter__'):
if g is not None:
gens = (g,) + gens
return lcm_list(f, *gens, **args)
elif g is None:
raise TypeError("lcm() takes 2 arguments or a sequence of arguments")
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.lcm(a, b))
except NotImplementedError:
raise ComputationFailed('lcm', 2, exc)
result = F.lcm(G)
if not opt.polys:
return result.as_expr()
else:
return result
def terms_gcd(f, *gens, **args):
"""
Remove GCD of terms from ``f``.
If the ``deep`` flag is True, then the arguments of ``f`` will have
terms_gcd applied to them.
If a fraction is factored out of ``f`` and ``f`` is an Add, then
an unevaluated Mul will be returned so that automatic simplification
does not redistribute it. The hint ``clear``, when set to False, can be
used to prevent such factoring when all coefficients are not fractions.
Examples
========
>>> from sympy import terms_gcd, cos, pi
>>> from sympy.abc import x, y
>>> terms_gcd(x**6*y**2 + x**3*y, x, y)
x**3*y*(x**3*y + 1)
The default action of polys routines is to expand the expression
given to them. terms_gcd follows this behavior:
>>> terms_gcd((3+3*x)*(x+x*y))
3*x*(x*y + x + y + 1)
If this is not desired then the hint ``expand`` can be set to False.
In this case the expression will be treated as though it were comprised
of one or more terms:
>>> terms_gcd((3+3*x)*(x+x*y), expand=False)
(3*x + 3)*(x*y + x)
In order to traverse factors of a Mul or the arguments of other
functions, the ``deep`` hint can be used:
>>> terms_gcd((3 + 3*x)*(x + x*y), expand=False, deep=True)
3*x*(x + 1)*(y + 1)
>>> terms_gcd(cos(x + x*y), deep=True)
cos(x*(y + 1))
Rationals are factored out by default:
>>> terms_gcd(x + y/2)
(2*x + y)/2
Only the y-term had a coefficient that was a fraction; if one
does not want to factor out the 1/2 in cases like this, the
flag ``clear`` can be set to False:
>>> terms_gcd(x + y/2, clear=False)
x + y/2
>>> terms_gcd(x*y/2 + y**2, clear=False)
y*(x/2 + y)
The ``clear`` flag is ignored if all coefficients are fractions:
>>> terms_gcd(x/3 + y/2, clear=False)
(2*x + 3*y)/6
See Also
========
sympy.core.exprtools.gcd_terms, sympy.core.exprtools.factor_terms
"""
if not isinstance(f, Expr) or f.is_Atom:
return sympify(f)
if args.get('deep', False):
new = f.func(*[terms_gcd(a, *gens, **args) for a in f.args])
args.pop('deep')
args['expand'] = False
return terms_gcd(new, *gens, **args)
clear = args.pop('clear', True)
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
return exc.expr
J, f = F.terms_gcd()
if opt.domain.has_Ring:
if opt.domain.has_Field:
denom, f = f.clear_denoms(convert=True)
coeff, f = f.primitive()
if opt.domain.has_Field:
coeff /= denom
else:
coeff = S.One
term = Mul(*[ x**j for x, j in zip(f.gens, J) ])
if clear:
return _keep_coeff(coeff, term*f.as_expr())
# base the clearing on the form of the original expression, not
# the (perhaps) Mul that we have now
coeff, f = _keep_coeff(coeff, f.as_expr(), clear=False).as_coeff_Mul()
return _keep_coeff(coeff, term*f, clear=False)
def trunc(f, p, *gens, **args):
"""
Reduce ``f`` modulo a constant ``p``.
Examples
========
>>> from sympy import trunc
>>> from sympy.abc import x
>>> trunc(2*x**3 + 3*x**2 + 5*x + 7, 3)
-x**3 - x + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('trunc', 1, exc)
result = F.trunc(sympify(p))
if not opt.polys:
return result.as_expr()
else:
return result
def monic(f, *gens, **args):
"""
Divide all coefficients of ``f`` by ``LC(f)``.
Examples
========
>>> from sympy import monic
>>> from sympy.abc import x
>>> monic(3*x**2 + 4*x + 2)
x**2 + 4*x/3 + 2/3
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('monic', 1, exc)
result = F.monic(auto=opt.auto)
if not opt.polys:
return result.as_expr()
else:
return result
def content(f, *gens, **args):
"""
Compute GCD of coefficients of ``f``.
Examples
========
>>> from sympy import content
>>> from sympy.abc import x
>>> content(6*x**2 + 8*x + 12)
2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('content', 1, exc)
return F.content()
def primitive(f, *gens, **args):
"""
Compute content and the primitive form of ``f``.
Examples
========
>>> from sympy.polys.polytools import primitive
>>> from sympy.abc import x, y
>>> primitive(6*x**2 + 8*x + 12)
(2, 3*x**2 + 4*x + 6)
>>> eq = (2 + 2*x)*x + 2
Expansion is performed by default:
>>> primitive(eq)
(2, x**2 + x + 1)
Set ``expand`` to False to shut this off. Note that the
extraction will not be recursive; use the as_content_primitive method
for recursive, non-destructive Rational extraction.
>>> primitive(eq, expand=False)
(1, x*(2*x + 2) + 2)
>>> eq.as_content_primitive()
(2, x*(x + 1) + 1)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('primitive', 1, exc)
cont, result = F.primitive()
if not opt.polys:
return cont, result.as_expr()
else:
return cont, result
def compose(f, g, *gens, **args):
"""
Compute functional composition ``f(g)``.
Examples
========
>>> from sympy import compose
>>> from sympy.abc import x
>>> compose(x**2 + x, x - 1)
x**2 - x
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('compose', 2, exc)
result = F.compose(G)
if not opt.polys:
return result.as_expr()
else:
return result
def decompose(f, *gens, **args):
"""
Compute functional decomposition of ``f``.
Examples
========
>>> from sympy import decompose
>>> from sympy.abc import x
>>> decompose(x**4 + 2*x**3 - x - 1)
[x**2 - x - 1, x**2 + x]
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('decompose', 1, exc)
result = F.decompose()
if not opt.polys:
return [ r.as_expr() for r in result ]
else:
return result
def sturm(f, *gens, **args):
"""
Compute Sturm sequence of ``f``.
Examples
========
>>> from sympy import sturm
>>> from sympy.abc import x
>>> sturm(x**3 - 2*x**2 + x - 3)
[x**3 - 2*x**2 + x - 3, 3*x**2 - 4*x + 1, 2*x/9 + 25/9, -2079/4]
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('sturm', 1, exc)
result = F.sturm(auto=opt.auto)
if not opt.polys:
return [ r.as_expr() for r in result ]
else:
return result
def gff_list(f, *gens, **args):
"""
Compute a list of greatest factorial factors of ``f``.
Examples
========
>>> from sympy import gff_list, ff
>>> from sympy.abc import x
>>> f = x**5 + 2*x**4 - x**3 - 2*x**2
>>> gff_list(f)
[(x, 1), (x + 2, 4)]
>>> (ff(x, 1)*ff(x + 2, 4)).expand() == f
True
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('gff_list', 1, exc)
factors = F.gff_list()
if not opt.polys:
return [ (g.as_expr(), k) for g, k in factors ]
else:
return factors
def gff(f, *gens, **args):
"""Compute greatest factorial factorization of ``f``. """
raise NotImplementedError('symbolic falling factorial')
def sqf_norm(f, *gens, **args):
"""
Compute square-free norm of ``f``.
Returns ``s``, ``f``, ``r``, such that ``g(x) = f(x-sa)`` and
``r(x) = Norm(g(x))`` is a square-free polynomial over ``K``,
where ``a`` is the algebraic extension of the ground domain.
Examples
========
>>> from sympy import sqf_norm, sqrt
>>> from sympy.abc import x
>>> sqf_norm(x**2 + 1, extension=[sqrt(3)])
(1, x**2 - 2*sqrt(3)*x + 4, x**4 - 4*x**2 + 16)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('sqf_norm', 1, exc)
s, g, r = F.sqf_norm()
if not opt.polys:
return Integer(s), g.as_expr(), r.as_expr()
else:
return Integer(s), g, r
def sqf_part(f, *gens, **args):
"""
Compute square-free part of ``f``.
Examples
========
>>> from sympy import sqf_part
>>> from sympy.abc import x
>>> sqf_part(x**3 - 3*x - 2)
x**2 - x - 2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('sqf_part', 1, exc)
result = F.sqf_part()
if not opt.polys:
return result.as_expr()
else:
return result
def _sorted_factors(factors, method):
"""Sort a list of ``(expr, exp)`` pairs. """
if method == 'sqf':
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (exp, len(rep), rep)
else:
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (len(rep), exp, rep)
return sorted(factors, key=key)
def _factors_product(factors):
"""Multiply a list of ``(expr, exp)`` pairs. """
return Mul(*[ f.as_expr()**k for f, k in factors ])
def _symbolic_factor_list(expr, opt, method):
"""Helper function for :func:`_symbolic_factor`. """
coeff, factors = S.One, []
for arg in Mul.make_args(expr):
if arg.is_Number:
coeff *= arg
continue
elif arg.is_Pow:
base, exp = arg.args
if base.is_Number:
factors.append((base, exp))
continue
else:
base, exp = arg, S.One
try:
poly, _ = _poly_from_expr(base, opt)
except PolificationFailed, exc:
factors.append((exc.expr, exp))
else:
func = getattr(poly, method + '_list')
_coeff, _factors = func()
if _coeff is not S.One:
if exp.is_Integer:
coeff *= _coeff**exp
elif _coeff.is_positive:
factors.append((_coeff, exp))
else:
_factors.append((_coeff, None))
if exp is S.One:
factors.extend(_factors)
elif exp.is_integer or len(_factors) == 1:
factors.extend([ (f, k*exp) for f, k in _factors ])
else:
other = []
for f, k in _factors:
if f.as_expr().is_positive:
factors.append((f, k*exp))
elif k is not None:
other.append((f, k))
else:
other.append((f, S.One))
if len(other) == 1:
f, k = other[0]
factors.append((f, k*exp))
else:
factors.append((_factors_product(other), exp))
return coeff, factors
def _symbolic_factor(expr, opt, method):
"""Helper function for :func:`_factor`. """
if isinstance(expr, Expr) and not expr.is_Relational:
coeff, factors = _symbolic_factor_list(together(expr), opt, method)
return _keep_coeff(coeff, _factors_product(factors))
elif hasattr(expr, 'args'):
return expr.func(*[ _symbolic_factor(arg, opt, method) for arg in expr.args ])
elif hasattr(expr, '__iter__'):
return expr.__class__([ _symbolic_factor(arg, opt, method) for arg in expr ])
else:
return expr
def _generic_factor_list(expr, gens, args, method):
"""Helper function for :func:`sqf_list` and :func:`factor_list`. """
options.allowed_flags(args, ['frac', 'polys'])
opt = options.build_options(gens, args)
expr = sympify(expr)
if isinstance(expr, Expr) and not expr.is_Relational:
numer, denom = together(expr).as_numer_denom()
cp, fp = _symbolic_factor_list(numer, opt, method)
cq, fq = _symbolic_factor_list(denom, opt, method)
if fq and not opt.frac:
raise PolynomialError("a polynomial expected, got %s" % expr)
_opt = opt.clone(dict(expand=True))
for factors in (fp, fq):
for i, (f, k) in enumerate(factors):
if not f.is_Poly:
f, _ = _poly_from_expr(f, _opt)
factors[i] = (f, k)
fp = _sorted_factors(fp, method)
fq = _sorted_factors(fq, method)
if not opt.polys:
fp = [ (f.as_expr(), k) for f, k in fp ]
fq = [ (f.as_expr(), k) for f, k in fq ]
coeff = cp/cq
if not opt.frac:
return coeff, fp
else:
return coeff, fp, fq
else:
raise PolynomialError("a polynomial expected, got %s" % expr)
def _generic_factor(expr, gens, args, method):
"""Helper function for :func:`sqf` and :func:`factor`. """
options.allowed_flags(args, [])
opt = options.build_options(gens, args)
return _symbolic_factor(sympify(expr), opt, method)
def sqf_list(f, *gens, **args):
"""
Compute a list of square-free factors of ``f``.
Examples
========
>>> from sympy import sqf_list
>>> from sympy.abc import x
>>> sqf_list(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
(2, [(x + 1, 2), (x + 2, 3)])
"""
return _generic_factor_list(f, gens, args, method='sqf')
def sqf(f, *gens, **args):
"""
Compute square-free factorization of ``f``.
Examples
========
>>> from sympy import sqf
>>> from sympy.abc import x
>>> sqf(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
2*(x + 1)**2*(x + 2)**3
"""
return _generic_factor(f, gens, args, method='sqf')
def factor_list(f, *gens, **args):
"""
Compute a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import factor_list
>>> from sympy.abc import x, y
>>> factor_list(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
(2, [(x + y, 1), (x**2 + 1, 2)])
"""
return _generic_factor_list(f, gens, args, method='factor')
def factor(f, *gens, **args):
"""
Compute the factorization of ``f`` into irreducibles. (Use factorint to
factor an integer.)
There two modes implemented: symbolic and formal. If ``f`` is not an
instance of :class:`Poly` and generators are not specified, then the
former mode is used. Otherwise, the formal mode is used.
In symbolic mode, :func:`factor` will traverse the expression tree and
factor its components without any prior expansion, unless an instance
of :class:`Add` is encountered (in this case formal factorization is
used). This way :func:`factor` can handle large or symbolic exponents.
By default, the factorization is computed over the rationals. To factor
over other domain, e.g. an algebraic or finite field, use appropriate
options: ``extension``, ``modulus`` or ``domain``.
Examples
========
>>> from sympy import factor, sqrt
>>> from sympy.abc import x, y
>>> factor(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
2*(x + y)*(x**2 + 1)**2
>>> factor(x**2 + 1)
x**2 + 1
>>> factor(x**2 + 1, modulus=2)
(x + 1)**2
>>> factor(x**2 + 1, gaussian=True)
(x - I)*(x + I)
>>> factor(x**2 - 2, extension=sqrt(2))
(x - sqrt(2))*(x + sqrt(2))
>>> factor((x**2 - 1)/(x**2 + 4*x + 4))
(x - 1)*(x + 1)/(x + 2)**2
>>> factor((x**2 + 4*x + 4)**10000000*(x**2 + 1))
(x + 2)**20000000*(x**2 + 1)
"""
try:
return _generic_factor(f, gens, args, method='factor')
except PolynomialError, msg:
if not f.is_commutative:
from sympy.core.exprtools import factor_nc
return factor_nc(f)
else:
raise PolynomialError(msg)
def intervals(F, all=False, eps=None, inf=None, sup=None, strict=False, fast=False, sqf=False):
"""
Compute isolating intervals for roots of ``f``.
Examples
========
>>> from sympy import intervals
>>> from sympy.abc import x
>>> intervals(x**2 - 3)
[((-2, -1), 1), ((1, 2), 1)]
>>> intervals(x**2 - 3, eps=1e-2)
[((-26/15, -19/11), 1), ((19/11, 26/15), 1)]
"""
if not hasattr(F, '__iter__'):
try:
F = Poly(F)
except GeneratorsNeeded:
return []
return F.intervals(all=all, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf)
else:
polys, opt = parallel_poly_from_expr(F, domain='QQ')
if len(opt.gens) > 1:
raise MultivariatePolynomialError
for i, poly in enumerate(polys):
polys[i] = poly.rep.rep
if eps is not None:
eps = opt.domain.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if inf is not None:
inf = opt.domain.convert(inf)
if sup is not None:
sup = opt.domain.convert(sup)
intervals = dup_isolate_real_roots_list(polys, opt.domain,
eps=eps, inf=inf, sup=sup, strict=strict, fast=fast)
result = []
for (s, t), indices in intervals:
s, t = opt.domain.to_sympy(s), opt.domain.to_sympy(t)
result.append(((s, t), indices))
return result
def refine_root(f, s, t, eps=None, steps=None, fast=False, check_sqf=False):
"""
Refine an isolating interval of a root to the given precision.
Examples
========
>>> from sympy import refine_root
>>> from sympy.abc import x
>>> refine_root(x**2 - 3, 1, 2, eps=1e-2)
(19/11, 26/15)
"""
try:
F = Poly(f)
except GeneratorsNeeded:
raise PolynomialError(
"can't refine a root of %s, not a polynomial" % f)
return F.refine_root(s, t, eps=eps, steps=steps, fast=fast, check_sqf=check_sqf)
def count_roots(f, inf=None, sup=None):
"""
Return the number of roots of ``f`` in ``[inf, sup]`` interval.
If one of ``inf`` or ``sup`` is complex, it will return the number of roots
in the complex rectangle with corners at ``inf`` and ``sup``.
Examples
========
>>> from sympy import count_roots, I
>>> from sympy.abc import x
>>> count_roots(x**4 - 4, -3, 3)
2
>>> count_roots(x**4 - 4, 0, 1 + 3*I)
1
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError("can't count roots of %s, not a polynomial" % f)
return F.count_roots(inf=inf, sup=sup)
def real_roots(f, multiple=True):
"""
Return a list of real roots with multiplicities of ``f``.
Examples
========
>>> from sympy import real_roots
>>> from sympy.abc import x
>>> real_roots(2*x**3 - 7*x**2 + 4*x + 4)
[-1/2, 2, 2]
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError(
"can't compute real roots of %s, not a polynomial" % f)
return F.real_roots(multiple=multiple)
def nroots(f, n=15, maxsteps=50, cleanup=True, error=False):
"""
Compute numerical approximations of roots of ``f``.
Examples
========
>>> from sympy import nroots
>>> from sympy.abc import x
>>> nroots(x**2 - 3, n=15)
[-1.73205080756888, 1.73205080756888]
>>> nroots(x**2 - 3, n=30)
[-1.73205080756887729352744634151, 1.73205080756887729352744634151]
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError(
"can't compute numerical roots of %s, not a polynomial" % f)
return F.nroots(n=n, maxsteps=maxsteps, cleanup=cleanup, error=error)
def ground_roots(f, *gens, **args):
"""
Compute roots of ``f`` by factorization in the ground domain.
Examples
========
>>> from sympy import ground_roots
>>> from sympy.abc import x
>>> ground_roots(x**6 - 4*x**4 + 4*x**3 - x**2)
{0: 2, 1: 2}
"""
options.allowed_flags(args, [])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('ground_roots', 1, exc)
return F.ground_roots()
def nth_power_roots_poly(f, n, *gens, **args):
"""
Construct a polynomial with n-th powers of roots of ``f``.
Examples
========
>>> from sympy import nth_power_roots_poly, factor, roots
>>> from sympy.abc import x
>>> f = x**4 - x**2 + 1
>>> g = factor(nth_power_roots_poly(f, 2))
>>> g
(x**2 - x + 1)**2
>>> R_f = [ (r**2).expand() for r in roots(f) ]
>>> R_g = roots(g).keys()
>>> set(R_f) == set(R_g)
True
"""
options.allowed_flags(args, [])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('nth_power_roots_poly', 1, exc)
result = F.nth_power_roots_poly(n)
if not opt.polys:
return result.as_expr()
else:
return result
def cancel(f, *gens, **args):
"""
Cancel common factors in a rational function ``f``.
Examples
========
>>> from sympy import cancel
>>> from sympy.abc import x
>>> cancel((2*x**2 - 2)/(x**2 - 2*x + 1))
(2*x + 2)/(x - 1)
"""
options.allowed_flags(args, ['polys'])
f = sympify(f)
if not isinstance(f, (tuple, Tuple)):
if f.is_Number:
return f
else:
p, q = f.as_numer_denom()
else:
p, q = f
try:
(F, G), opt = parallel_poly_from_expr((p, q), *gens, **args)
except PolificationFailed:
if not isinstance(f, (tuple, Tuple)):
return f
else:
return S.One, p, q
c, P, Q = F.cancel(G)
if not isinstance(f, (tuple, Tuple)):
return c*(P.as_expr()/Q.as_expr())
else:
if not opt.polys:
return c, P.as_expr(), Q.as_expr()
else:
return c, P, Q
def reduced(f, G, *gens, **args):
"""
Reduces a polynomial ``f`` modulo a set of polynomials ``G``.
Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``,
computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r``
such that ``f = q_1*f_1 + ... + q_n*f_n + r``, where ``r`` vanishes or ``r``
is a completely reduced polynomial with respect to ``G``.
Examples
========
>>> from sympy import reduced
>>> from sympy.abc import x, y
>>> reduced(2*x**4 + y**2 - x**2 + y**3, [x**3 - x, y**3 - y])
([2*x, 1], x**2 + y**2 + y)
"""
options.allowed_flags(args, ['polys', 'auto'])
try:
polys, opt = parallel_poly_from_expr([f] + list(G), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('reduced', 0, exc)
domain = opt.domain
retract = False
if opt.auto and domain.has_Ring and not domain.has_Field:
opt = opt.clone(dict(domain=domain.get_field()))
retract = True
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, opt.order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
Q, r = polys[0].div(polys[1:])
Q = [ Poly._from_dict(dict(q), opt) for q in Q ]
r = Poly._from_dict(dict(r), opt)
if retract:
try:
_Q, _r = [ q.to_ring() for q in Q ], r.to_ring()
except CoercionFailed:
pass
else:
Q, r = _Q, _r
if not opt.polys:
return [ q.as_expr() for q in Q ], r.as_expr()
else:
return Q, r
def groebner(F, *gens, **args):
"""
Computes the reduced Groebner basis for a set of polynomials.
Use the ``order`` argument to set the monomial ordering that will be
used to compute the basis. Allowed orders are ``lex``, ``grlex`` and
``grevlex``. If no order is specified, it defaults to ``lex``.
For more information on Groebner bases, see the references and the docstring
of `solve_poly_system()`.
Examples
========
Example taken from [1].
>>> from sympy import groebner
>>> from sympy.abc import x, y
>>> F = [x*y - 2*y, 2*y**2 - x**2]
>>> groebner(F, x, y, order='lex')
GroebnerBasis([x**2 - 2*y**2, x*y - 2*y, y**3 - 2*y], x, y,
domain='ZZ', order='lex')
>>> groebner(F, x, y, order='grlex')
GroebnerBasis([y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y], x, y,
domain='ZZ', order='grlex')
>>> groebner(F, x, y, order='grevlex')
GroebnerBasis([y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y], x, y,
domain='ZZ', order='grevlex')
By default, an improved implementation of the Buchberger algorithm is
used. Optionally, an implementation of the F5B algorithm can be used.
The algorithm can be set using ``method`` flag or with the :func:`setup`
function from :mod:`sympy.polys.polyconfig`:
>>> F = [x**2 - x - 1, (2*x - 1) * y - (x**10 - (1 - x)**10)]
>>> groebner(F, x, y, method='buchberger')
GroebnerBasis([x**2 - x - 1, y - 55], x, y, domain='ZZ', order='lex')
>>> groebner(F, x, y, method='f5b')
GroebnerBasis([x**2 - x - 1, y - 55], x, y, domain='ZZ', order='lex')
References
==========
1. [Buchberger01]_
2. [Cox97]_
"""
return GroebnerBasis(F, *gens, **args)
def is_zero_dimensional(F, *gens, **args):
"""
Checks if the ideal generated by a Groebner basis is zero-dimensional.
The algorithm checks if the set of monomials not divisible by the
leading monomial of any element of ``F`` is bounded.
References
==========
David A. Cox, John B. Little, Donal O'Shea. Ideals, Varieties and
Algorithms, 3rd edition, p. 230
"""
return GroebnerBasis(F, *gens, **args).is_zero_dimensional
class GroebnerBasis(Basic):
"""Represents a reduced Groebner basis. """
__slots__ = ['_basis', '_options']
def __new__(cls, F, *gens, **args):
"""Compute a reduced Groebner basis for a system of polynomials. """
options.allowed_flags(args, ['polys', 'method'])
try:
polys, opt = parallel_poly_from_expr(F, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('groebner', len(F), exc)
domain = opt.domain
if domain.has_assoc_Field:
opt.domain = domain.get_field()
else:
raise DomainError("can't compute a Groebner basis over %s" % opt.domain)
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, opt.order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
G = _groebner(polys, _ring, method=opt.method)
G = [ Poly._from_dict(g, opt) for g in G ]
if not domain.has_Field:
G = [ g.clear_denoms(convert=True)[1] for g in G ]
opt.domain = domain
return cls._new(G, opt)
@classmethod
def _new(cls, basis, options):
obj = Basic.__new__(cls)
obj._basis = tuple(basis)
obj._options = options
return obj
@property
def args(self):
return (Tuple(*self._basis), Tuple(*self._options.gens))
@property
def exprs(self):
return [ poly.as_expr() for poly in self._basis ]
@property
def polys(self):
return list(self._basis)
@property
def gens(self):
return self._options.gens
@property
def domain(self):
return self._options.domain
@property
def order(self):
return self._options.order
def __len__(self):
return len(self._basis)
def __iter__(self):
if self._options.polys:
return iter(self.polys)
else:
return iter(self.exprs)
def __getitem__(self, item):
if self._options.polys:
basis = self.polys
else:
basis = self.exprs
return basis[item]
def __hash__(self):
return hash((self._basis, tuple(self._options.items())))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._basis == other._basis and self._options == other._options
elif iterable(other):
return self.polys == list(other) or self.exprs == list(other)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
@property
def is_zero_dimensional(self):
"""
Checks if the ideal generated by a Groebner basis is zero-dimensional.
The algorithm checks if the set of monomials not divisible by the
leading monomial of any element of ``F`` is bounded.
References
==========
David A. Cox, John B. Little, Donal O'Shea. Ideals, Varieties and
Algorithms, 3rd edition, p. 230
"""
def single_var(monomial):
return sum(map(bool, monomial)) == 1
exponents = Monomial([0]*len(self.gens))
order = self._options.order
for poly in self.polys:
monomial = poly.LM(order=order)
if single_var(monomial):
exponents *= monomial
# If any element of the exponents vector is zero, then there's
# a variable for which there's no degree bound and the ideal
# generated by this Groebner basis isn't zero-dimensional.
return all(exponents)
def fglm(self, order):
"""
Convert a Groebner basis from one ordering to another.
The FGLM algorithm converts reduced Groebner bases of zero-dimensional
ideals from one ordering to another. This method is often used when it
is infeasible to compute a Groebner basis with respect to a particular
ordering directly.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import groebner
>>> F = [x**2 - 3*y - x + 1, y**2 - 2*x + y - 1]
>>> G = groebner(F, x, y, order='grlex')
>>> list(G.fglm('lex'))
[2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7]
>>> list(groebner(F, x, y, order='lex'))
[2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7]
References
==========
J.C. Faugere, P. Gianni, D. Lazard, T. Mora (1994). Efficient
Computation of Zero-dimensional Groebner Bases by Change of
Ordering
"""
opt = self._options
src_order = opt.order
dst_order = monomial_key(order)
if src_order == dst_order:
return self
if not self.is_zero_dimensional:
raise NotImplementedError("can't convert Groebner bases of ideals with positive dimension")
polys = list(self._basis)
domain = opt.domain
opt = opt.clone(dict(
domain=domain.get_field(),
order=dst_order,
))
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, src_order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
G = matrix_fglm(polys, _ring, dst_order)
G = [ Poly._from_dict(dict(g), opt) for g in G ]
if not domain.has_Field:
G = [ g.clear_denoms(convert=True)[1] for g in G ]
opt.domain = domain
return self._new(G, opt)
def reduce(self, expr, auto=True):
"""
Reduces a polynomial modulo a Groebner basis.
Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``,
computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r``
such that ``f = q_1*f_1 + ... + q_n*f_n + r``, where ``r`` vanishes or ``r``
is a completely reduced polynomial with respect to ``G``.
Examples
========
>>> from sympy import groebner, expand
>>> from sympy.abc import x, y
>>> f = 2*x**4 - x**2 + y**3 + y**2
>>> G = groebner([x**3 - x, y**3 - y])
>>> G.reduce(f)
([2*x, 1], x**2 + y**2 + y)
>>> Q, r = _
>>> expand(sum(q*g for q, g in zip(Q, G)) + r)
2*x**4 - x**2 + y**3 + y**2
>>> _ == f
True
"""
poly = Poly._from_expr(expr, self._options)
polys = [poly] + list(self._basis)
opt = self._options
domain = opt.domain
retract = False
if auto and domain.has_Ring and not domain.has_Field:
opt = opt.clone(dict(domain=domain.get_field()))
retract = True
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, opt.order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
Q, r = polys[0].div(polys[1:])
Q = [ Poly._from_dict(dict(q), opt) for q in Q ]
r = Poly._from_dict(dict(r), opt)
if retract:
try:
_Q, _r = [ q.to_ring() for q in Q ], r.to_ring()
except CoercionFailed:
pass
else:
Q, r = _Q, _r
if not opt.polys:
return [ q.as_expr() for q in Q ], r.as_expr()
else:
return Q, r
def contains(self, poly):
"""
Check if ``poly`` belongs the ideal generated by ``self``.
Examples
========
>>> from sympy import groebner
>>> from sympy.abc import x, y
>>> f = 2*x**3 + y**3 + 3*y
>>> G = groebner([x**2 + y**2 - 1, x*y - 2])
>>> G.contains(f)
True
>>> G.contains(f + 1)
False
"""
return self.reduce(poly)[1] == 0
def poly(expr, *gens, **args):
"""
Efficiently transform an expression into a polynomial.
Examples
========
>>> from sympy import poly
>>> from sympy.abc import x
>>> poly(x*(x**2 + x - 1)**2)
Poly(x**5 + 2*x**4 - x**3 - 2*x**2 + x, x, domain='ZZ')
"""
options.allowed_flags(args, [])
def _poly(expr, opt):
terms, poly_terms = [], []
for term in Add.make_args(expr):
factors, poly_factors = [], []
for factor in Mul.make_args(term):
if factor.is_Add:
poly_factors.append(_poly(factor, opt))
elif factor.is_Pow and factor.base.is_Add and factor.exp.is_Integer:
poly_factors.append(
_poly(factor.base, opt).pow(factor.exp))
else:
factors.append(factor)
if not poly_factors:
terms.append(term)
else:
product = poly_factors[0]
for factor in poly_factors[1:]:
product = product.mul(factor)
if factors:
factor = Mul(*factors)
if factor.is_Number:
product = product.mul(factor)
else:
product = product.mul(Poly._from_expr(factor, opt))
poly_terms.append(product)
if not poly_terms:
result = Poly._from_expr(expr, opt)
else:
result = poly_terms[0]
for term in poly_terms[1:]:
result = result.add(term)
if terms:
term = Add(*terms)
if term.is_Number:
result = result.add(term)
else:
result = result.add(Poly._from_expr(term, opt))
return result.reorder(*opt.get('gens', ()), **args)
expr = sympify(expr)
if expr.is_Poly:
return Poly(expr, *gens, **args)
if 'expand' not in args:
args['expand'] = False
opt = options.build_options(gens, args)
return _poly(expr, opt)
|
amitjamadagni/sympy
|
sympy/polys/polytools.py
|
Python
|
bsd-3-clause
| 155,247
|
[
"Gaussian"
] |
ca816075a8fdd2e063d44e6a12dcd767c12c120d2e8f4bd91b0f6f796582f75b
|
from itertools import combinations_with_replacement
import numpy as np
from scipy import ndimage as ndi
from scipy import stats
from ..util import img_as_float
from ..feature import peak_local_max
from ..feature.util import _prepare_grayscale_input_2D
from ..feature.corner_cy import _corner_fast
from ._hessian_det_appx import _hessian_matrix_det
from ..transform import integral_image
from .._shared.utils import safe_as_int
def _compute_derivatives(image, mode='constant', cval=0):
"""Compute derivatives in x and y direction using the Sobel operator.
Parameters
----------
image : ndarray
Input image.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
imx : ndarray
Derivative in x-direction.
imy : ndarray
Derivative in y-direction.
"""
imy = ndi.sobel(image, axis=0, mode=mode, cval=cval)
imx = ndi.sobel(image, axis=1, mode=mode, cval=cval)
return imx, imy
def structure_tensor(image, sigma=1, mode='constant', cval=0):
"""Compute structure tensor using sum of squared differences.
The structure tensor A is defined as::
A = [Axx Axy]
[Axy Ayy]
which is approximated by the weighted sum of squared differences in a local
window around each pixel in the image.
Parameters
----------
image : ndarray
Input image.
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used as a
weighting function for the local summation of squared differences.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
Axx : ndarray
Element of the structure tensor for each pixel in the input image.
Axy : ndarray
Element of the structure tensor for each pixel in the input image.
Ayy : ndarray
Element of the structure tensor for each pixel in the input image.
Examples
--------
>>> from skimage.feature import structure_tensor
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 1
>>> Axx, Axy, Ayy = structure_tensor(square, sigma=0.1)
>>> Axx
array([[ 0., 0., 0., 0., 0.],
[ 0., 1., 0., 1., 0.],
[ 0., 4., 0., 4., 0.],
[ 0., 1., 0., 1., 0.],
[ 0., 0., 0., 0., 0.]])
"""
image = _prepare_grayscale_input_2D(image)
imx, imy = _compute_derivatives(image, mode=mode, cval=cval)
# structure tensore
Axx = ndi.gaussian_filter(imx * imx, sigma, mode=mode, cval=cval)
Axy = ndi.gaussian_filter(imx * imy, sigma, mode=mode, cval=cval)
Ayy = ndi.gaussian_filter(imy * imy, sigma, mode=mode, cval=cval)
return Axx, Axy, Ayy
def hessian_matrix(image, sigma=1, mode='constant', cval=0):
"""Compute Hessian matrix.
The Hessian matrix is defined as::
H = [Hxx Hxy]
[Hxy Hyy]
which is computed by convolving the image with the second derivatives
of the Gaussian kernel in the respective x- and y-directions.
Parameters
----------
image : ndarray
Input image.
sigma : float
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
Hxx : ndarray
Element of the Hessian matrix for each pixel in the input image.
Hxy : ndarray
Element of the Hessian matrix for each pixel in the input image.
Hyy : ndarray
Element of the Hessian matrix for each pixel in the input image.
Examples
--------
>>> from skimage.feature import hessian_matrix
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 4
>>> Hxx, Hxy, Hyy = hessian_matrix(square, sigma=0.1)
>>> Hxy
array([[ 0., 0., 0., 0., 0.],
[ 0., 1., 0., -1., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., -1., 0., 1., 0.],
[ 0., 0., 0., 0., 0.]])
"""
image = img_as_float(image)
gaussian_filtered = ndi.gaussian_filter(image, sigma=sigma,
mode=mode, cval=cval)
gradients = np.gradient(gaussian_filtered)
axes = range(image.ndim)
H_elems = [np.gradient(gradients[ax0], axis=ax1)
for ax0, ax1 in combinations_with_replacement(axes, 2)]
if image.ndim == 2:
# The legacy 2D code followed (x, y) convention, so we swap the axis
# order to maintain compatibility with old code
H_elems.reverse()
return H_elems
def hessian_matrix_det(image, sigma=1):
"""Computes the approximate Hessian Determinant over an image.
This method uses box filters over integral images to compute the
approximate Hessian Determinant as described in [1]_.
Parameters
----------
image : array
The image over which to compute Hessian Determinant.
sigma : float, optional
Standard deviation used for the Gaussian kernel, used for the Hessian
matrix.
Returns
-------
out : array
The array of the Determinant of Hessians.
References
----------
.. [1] Herbert Bay, Andreas Ess, Tinne Tuytelaars, Luc Van Gool,
"SURF: Speeded Up Robust Features"
ftp://ftp.vision.ee.ethz.ch/publications/articles/eth_biwi_00517.pdf
Notes
-----
The running time of this method only depends on size of the image. It is
independent of `sigma` as one would expect. The downside is that the
result for `sigma` less than `3` is not accurate, i.e., not similar to
the result obtained if someone computed the Hessian and took it's
determinant.
"""
image = img_as_float(image)
image = integral_image(image)
return np.array(_hessian_matrix_det(image, sigma))
def _image_orthogonal_matrix22_eigvals(M00, M01, M11):
l1 = (M00 + M11) / 2 + np.sqrt(4 * M01 ** 2 + (M00 - M11) ** 2) / 2
l2 = (M00 + M11) / 2 - np.sqrt(4 * M01 ** 2 + (M00 - M11) ** 2) / 2
return l1, l2
def structure_tensor_eigvals(Axx, Axy, Ayy):
"""Compute Eigen values of structure tensor.
Parameters
----------
Axx : ndarray
Element of the structure tensor for each pixel in the input image.
Axy : ndarray
Element of the structure tensor for each pixel in the input image.
Ayy : ndarray
Element of the structure tensor for each pixel in the input image.
Returns
-------
l1 : ndarray
Larger eigen value for each input matrix.
l2 : ndarray
Smaller eigen value for each input matrix.
Examples
--------
>>> from skimage.feature import structure_tensor, structure_tensor_eigvals
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 1
>>> Axx, Axy, Ayy = structure_tensor(square, sigma=0.1)
>>> structure_tensor_eigvals(Axx, Axy, Ayy)[0]
array([[ 0., 0., 0., 0., 0.],
[ 0., 2., 4., 2., 0.],
[ 0., 4., 0., 4., 0.],
[ 0., 2., 4., 2., 0.],
[ 0., 0., 0., 0., 0.]])
"""
return _image_orthogonal_matrix22_eigvals(Axx, Axy, Ayy)
def hessian_matrix_eigvals(Hxx, Hxy, Hyy):
"""Compute Eigen values of Hessian matrix.
Parameters
----------
Hxx : ndarray
Element of the Hessian matrix for each pixel in the input image.
Hxy : ndarray
Element of the Hessian matrix for each pixel in the input image.
Hyy : ndarray
Element of the Hessian matrix for each pixel in the input image.
Returns
-------
l1 : ndarray
Larger eigen value for each input matrix.
l2 : ndarray
Smaller eigen value for each input matrix.
Examples
--------
>>> from skimage.feature import hessian_matrix, hessian_matrix_eigvals
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 4
>>> Hxx, Hxy, Hyy = hessian_matrix(square, sigma=0.1)
>>> hessian_matrix_eigvals(Hxx, Hxy, Hyy)[0]
array([[ 0., 0., 2., 0., 0.],
[ 0., 1., 0., 1., 0.],
[ 2., 0., -2., 0., 2.],
[ 0., 1., 0., 1., 0.],
[ 0., 0., 2., 0., 0.]])
"""
return _image_orthogonal_matrix22_eigvals(Hxx, Hxy, Hyy)
def corner_kitchen_rosenfeld(image, mode='constant', cval=0):
"""Compute Kitchen and Rosenfeld corner measure response image.
The corner measure is calculated as follows::
(imxx * imy**2 + imyy * imx**2 - 2 * imxy * imx * imy)
/ (imx**2 + imy**2)
Where imx and imy are the first and imxx, imxy, imyy the second
derivatives.
Parameters
----------
image : ndarray
Input image.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
response : ndarray
Kitchen and Rosenfeld response image.
"""
imx, imy = _compute_derivatives(image, mode=mode, cval=cval)
imxx, imxy = _compute_derivatives(imx, mode=mode, cval=cval)
imyx, imyy = _compute_derivatives(imy, mode=mode, cval=cval)
numerator = (imxx * imy ** 2 + imyy * imx ** 2 - 2 * imxy * imx * imy)
denominator = (imx ** 2 + imy ** 2)
response = np.zeros_like(image, dtype=np.double)
mask = denominator != 0
response[mask] = numerator[mask] / denominator[mask]
return response
def corner_harris(image, method='k', k=0.05, eps=1e-6, sigma=1):
"""Compute Harris corner measure response image.
This corner detector uses information from the auto-correlation matrix A::
A = [(imx**2) (imx*imy)] = [Axx Axy]
[(imx*imy) (imy**2)] [Axy Ayy]
Where imx and imy are first derivatives, averaged with a gaussian filter.
The corner measure is then defined as::
det(A) - k * trace(A)**2
or::
2 * det(A) / (trace(A) + eps)
Parameters
----------
image : ndarray
Input image.
method : {'k', 'eps'}, optional
Method to compute the response image from the auto-correlation matrix.
k : float, optional
Sensitivity factor to separate corners from edges, typically in range
`[0, 0.2]`. Small values of k result in detection of sharp corners.
eps : float, optional
Normalisation factor (Noble's corner measure).
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
Returns
-------
response : ndarray
Harris response image.
References
----------
.. [1] http://kiwi.cs.dal.ca/~dparks/CornerDetection/harris.htm
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_harris, corner_peaks
>>> square = np.zeros([10, 10])
>>> square[2:8, 2:8] = 1
>>> square.astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> corner_peaks(corner_harris(square), min_distance=1)
array([[2, 2],
[2, 7],
[7, 2],
[7, 7]])
"""
Axx, Axy, Ayy = structure_tensor(image, sigma)
# determinant
detA = Axx * Ayy - Axy ** 2
# trace
traceA = Axx + Ayy
if method == 'k':
response = detA - k * traceA ** 2
else:
response = 2 * detA / (traceA + eps)
return response
def corner_shi_tomasi(image, sigma=1):
"""Compute Shi-Tomasi (Kanade-Tomasi) corner measure response image.
This corner detector uses information from the auto-correlation matrix A::
A = [(imx**2) (imx*imy)] = [Axx Axy]
[(imx*imy) (imy**2)] [Axy Ayy]
Where imx and imy are first derivatives, averaged with a gaussian filter.
The corner measure is then defined as the smaller eigenvalue of A::
((Axx + Ayy) - sqrt((Axx - Ayy)**2 + 4 * Axy**2)) / 2
Parameters
----------
image : ndarray
Input image.
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
Returns
-------
response : ndarray
Shi-Tomasi response image.
References
----------
.. [1] http://kiwi.cs.dal.ca/~dparks/CornerDetection/harris.htm
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_shi_tomasi, corner_peaks
>>> square = np.zeros([10, 10])
>>> square[2:8, 2:8] = 1
>>> square.astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> corner_peaks(corner_shi_tomasi(square), min_distance=1)
array([[2, 2],
[2, 7],
[7, 2],
[7, 7]])
"""
Axx, Axy, Ayy = structure_tensor(image, sigma)
# minimum eigenvalue of A
response = ((Axx + Ayy) - np.sqrt((Axx - Ayy) ** 2 + 4 * Axy ** 2)) / 2
return response
def corner_foerstner(image, sigma=1):
"""Compute Foerstner corner measure response image.
This corner detector uses information from the auto-correlation matrix A::
A = [(imx**2) (imx*imy)] = [Axx Axy]
[(imx*imy) (imy**2)] [Axy Ayy]
Where imx and imy are first derivatives, averaged with a gaussian filter.
The corner measure is then defined as::
w = det(A) / trace(A) (size of error ellipse)
q = 4 * det(A) / trace(A)**2 (roundness of error ellipse)
Parameters
----------
image : ndarray
Input image.
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used as
weighting function for the auto-correlation matrix.
Returns
-------
w : ndarray
Error ellipse sizes.
q : ndarray
Roundness of error ellipse.
References
----------
.. [1] http://www.ipb.uni-bonn.de/uploads/tx_ikgpublication/foerstner87.fast.pdf
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_foerstner, corner_peaks
>>> square = np.zeros([10, 10])
>>> square[2:8, 2:8] = 1
>>> square.astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> w, q = corner_foerstner(square)
>>> accuracy_thresh = 0.5
>>> roundness_thresh = 0.3
>>> foerstner = (q > roundness_thresh) * (w > accuracy_thresh) * w
>>> corner_peaks(foerstner, min_distance=1)
array([[2, 2],
[2, 7],
[7, 2],
[7, 7]])
"""
Axx, Axy, Ayy = structure_tensor(image, sigma)
# determinant
detA = Axx * Ayy - Axy ** 2
# trace
traceA = Axx + Ayy
w = np.zeros_like(image, dtype=np.double)
q = np.zeros_like(image, dtype=np.double)
mask = traceA != 0
w[mask] = detA[mask] / traceA[mask]
q[mask] = 4 * detA[mask] / traceA[mask] ** 2
return w, q
def corner_fast(image, n=12, threshold=0.15):
"""Extract FAST corners for a given image.
Parameters
----------
image : 2D ndarray
Input image.
n : int
Minimum number of consecutive pixels out of 16 pixels on the circle
that should all be either brighter or darker w.r.t testpixel.
A point c on the circle is darker w.r.t test pixel p if
`Ic < Ip - threshold` and brighter if `Ic > Ip + threshold`. Also
stands for the n in `FAST-n` corner detector.
threshold : float
Threshold used in deciding whether the pixels on the circle are
brighter, darker or similar w.r.t. the test pixel. Decrease the
threshold when more corners are desired and vice-versa.
Returns
-------
response : ndarray
FAST corner response image.
References
----------
.. [1] Edward Rosten and Tom Drummond
"Machine Learning for high-speed corner detection",
http://www.edwardrosten.com/work/rosten_2006_machine.pdf
.. [2] Wikipedia, "Features from accelerated segment test",
https://en.wikipedia.org/wiki/Features_from_accelerated_segment_test
Examples
--------
>>> from skimage.feature import corner_fast, corner_peaks
>>> square = np.zeros((12, 12))
>>> square[3:9, 3:9] = 1
>>> square.astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> corner_peaks(corner_fast(square, 9), min_distance=1)
array([[3, 3],
[3, 8],
[8, 3],
[8, 8]])
"""
image = _prepare_grayscale_input_2D(image)
image = np.ascontiguousarray(image)
response = _corner_fast(image, n, threshold)
return response
def corner_subpix(image, corners, window_size=11, alpha=0.99):
"""Determine subpixel position of corners.
A statistical test decides whether the corner is defined as the
intersection of two edges or a single peak. Depending on the classification
result, the subpixel corner location is determined based on the local
covariance of the grey-values. If the significance level for either
statistical test is not sufficient, the corner cannot be classified, and
the output subpixel position is set to NaN.
Parameters
----------
image : ndarray
Input image.
corners : (N, 2) ndarray
Corner coordinates `(row, col)`.
window_size : int, optional
Search window size for subpixel estimation.
alpha : float, optional
Significance level for corner classification.
Returns
-------
positions : (N, 2) ndarray
Subpixel corner positions. NaN for "not classified" corners.
References
----------
.. [1] http://www.ipb.uni-bonn.de/uploads/tx_ikgpublication/\
foerstner87.fast.pdf
.. [2] http://en.wikipedia.org/wiki/Corner_detection
Examples
--------
>>> from skimage.feature import corner_harris, corner_peaks, corner_subpix
>>> img = np.zeros((10, 10))
>>> img[:5, :5] = 1
>>> img[5:, 5:] = 1
>>> img.astype(int)
array([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1]])
>>> coords = corner_peaks(corner_harris(img), min_distance=2)
>>> coords_subpix = corner_subpix(img, coords, window_size=7)
>>> coords_subpix
array([[ 4.5, 4.5]])
"""
# window extent in one direction
wext = (window_size - 1) // 2
image = np.pad(image, pad_width=wext, mode='constant', constant_values=0)
# add pad width, make sure to not modify the input values in-place
corners = safe_as_int(corners + wext)
# normal equation arrays
N_dot = np.zeros((2, 2), dtype=np.double)
N_edge = np.zeros((2, 2), dtype=np.double)
b_dot = np.zeros((2, ), dtype=np.double)
b_edge = np.zeros((2, ), dtype=np.double)
# critical statistical test values
redundancy = window_size ** 2 - 2
t_crit_dot = stats.f.isf(1 - alpha, redundancy, redundancy)
t_crit_edge = stats.f.isf(alpha, redundancy, redundancy)
# coordinates of pixels within window
y, x = np.mgrid[- wext:wext + 1, - wext:wext + 1]
corners_subpix = np.zeros_like(corners, dtype=np.double)
for i, (y0, x0) in enumerate(corners):
# crop window around corner + border for sobel operator
miny = y0 - wext - 1
maxy = y0 + wext + 2
minx = x0 - wext - 1
maxx = x0 + wext + 2
window = image[miny:maxy, minx:maxx]
winx, winy = _compute_derivatives(window, mode='constant', cval=0)
# compute gradient suares and remove border
winx_winx = (winx * winx)[1:-1, 1:-1]
winx_winy = (winx * winy)[1:-1, 1:-1]
winy_winy = (winy * winy)[1:-1, 1:-1]
# sum of squared differences (mean instead of gaussian filter)
Axx = np.sum(winx_winx)
Axy = np.sum(winx_winy)
Ayy = np.sum(winy_winy)
# sum of squared differences weighted with coordinates
# (mean instead of gaussian filter)
bxx_x = np.sum(winx_winx * x)
bxx_y = np.sum(winx_winx * y)
bxy_x = np.sum(winx_winy * x)
bxy_y = np.sum(winx_winy * y)
byy_x = np.sum(winy_winy * x)
byy_y = np.sum(winy_winy * y)
# normal equations for subpixel position
N_dot[0, 0] = Axx
N_dot[0, 1] = N_dot[1, 0] = - Axy
N_dot[1, 1] = Ayy
N_edge[0, 0] = Ayy
N_edge[0, 1] = N_edge[1, 0] = Axy
N_edge[1, 1] = Axx
b_dot[:] = bxx_y - bxy_x, byy_x - bxy_y
b_edge[:] = byy_y + bxy_x, bxx_x + bxy_y
# estimated positions
try:
est_dot = np.linalg.solve(N_dot, b_dot)
est_edge = np.linalg.solve(N_edge, b_edge)
except np.linalg.LinAlgError:
# if image is constant the system is singular
corners_subpix[i, :] = np.nan, np.nan
continue
# residuals
ry_dot = y - est_dot[0]
rx_dot = x - est_dot[1]
ry_edge = y - est_edge[0]
rx_edge = x - est_edge[1]
# squared residuals
rxx_dot = rx_dot * rx_dot
rxy_dot = rx_dot * ry_dot
ryy_dot = ry_dot * ry_dot
rxx_edge = rx_edge * rx_edge
rxy_edge = rx_edge * ry_edge
ryy_edge = ry_edge * ry_edge
# determine corner class (dot or edge)
# variance for different models
var_dot = np.sum(winx_winx * ryy_dot - 2 * winx_winy * rxy_dot
+ winy_winy * rxx_dot)
var_edge = np.sum(winy_winy * ryy_edge + 2 * winx_winy * rxy_edge
+ winx_winx * rxx_edge)
# test value (F-distributed)
if var_dot < np.spacing(1) and var_edge < np.spacing(1):
t = np.nan
elif var_dot == 0:
t = np.inf
else:
t = var_edge / var_dot
# 1 for edge, -1 for dot, 0 for "not classified"
corner_class = int(t < t_crit_edge) - int(t > t_crit_dot)
if corner_class == -1:
corners_subpix[i, :] = y0 + est_dot[0], x0 + est_dot[1]
elif corner_class == 0:
corners_subpix[i, :] = np.nan, np.nan
elif corner_class == 1:
corners_subpix[i, :] = y0 + est_edge[0], x0 + est_edge[1]
# subtract pad width
corners_subpix -= wext
return corners_subpix
def corner_peaks(image, min_distance=1, threshold_abs=None, threshold_rel=0.1,
exclude_border=True, indices=True, num_peaks=np.inf,
footprint=None, labels=None):
"""Find corners in corner measure response image.
This differs from `skimage.feature.peak_local_max` in that it suppresses
multiple connected peaks with the same accumulator value.
Parameters
----------
* : *
See :py:meth:`skimage.feature.peak_local_max`.
Examples
--------
>>> from skimage.feature import peak_local_max
>>> response = np.zeros((5, 5))
>>> response[2:4, 2:4] = 1
>>> response
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 1., 0.],
[ 0., 0., 1., 1., 0.],
[ 0., 0., 0., 0., 0.]])
>>> peak_local_max(response)
array([[2, 2],
[2, 3],
[3, 2],
[3, 3]])
>>> corner_peaks(response)
array([[2, 2]])
"""
peaks = peak_local_max(image, min_distance=min_distance,
threshold_abs=threshold_abs,
threshold_rel=threshold_rel,
exclude_border=exclude_border,
indices=False, num_peaks=num_peaks,
footprint=footprint, labels=labels)
if min_distance > 0:
coords = np.transpose(peaks.nonzero())
for r, c in coords:
if peaks[r, c]:
peaks[r - min_distance:r + min_distance + 1,
c - min_distance:c + min_distance + 1] = False
peaks[r, c] = True
if indices is True:
return np.transpose(peaks.nonzero())
else:
return peaks
|
rjeli/scikit-image
|
skimage/feature/corner.py
|
Python
|
bsd-3-clause
| 26,730
|
[
"Gaussian"
] |
2445f085ffe7626cff856681c66e3df05e1ecfe40a35b49711d2a04e18eacff2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# lm.py
"""
A python implementation of Levenberg–Marquardt.
exposes a drop in replacement for scipy.curve_fit and
allows the user to fit their function by maximizing the
maximum likelihood for poisson deviates rather than for
gaussian deviates, requires the jacobian to be defined.
### References
1. Methods for Non-Linear Least Squares Problems (2nd ed.) http://www2.imm.dtu.dk/pubdb/views/publication_details.php?id=3215 (accessed Aug 18, 2017).
1. [Laurence, T. A.; Chromy, B. A. Efficient Maximum Likelihood Estimator Fitting of Histograms. Nat Meth 2010, 7 (5), 338–339.](http://www.nature.com/nmeth/journal/v7/n5/full/nmeth0510-338.html)
1. Numerical Recipes in C: The Art of Scientific Computing, 2nd ed.; Press, W. H., Ed.; Cambridge University Press: Cambridge ; New York, 1992.
1. https://www.osti.gov/scitech/servlets/purl/7256021/
Copyright (c) 2017, David Hoffman
"""
import numpy as np
from numpy import linalg as la
import scipy.optimize
import logging
logger = logging.getLogger(__name__)
def _chi2_ls(f):
"""Sum of the squares of the residuals
Assumes that f returns residuals.
Minimizing this will maximize the likelihood for a
data model with gaussian deviates."""
return 0.5 * (f ** 2).sum(0)
def _update_ls(x0, f, Dfun):
"""Hessian and gradient calculations for gaussian deviates"""
# calculate the jacobian
# j shape (ndata, nparams)
j = Dfun(x0)
# calculate the linear term of Hessian
# a shape (nparams, nparams)
a = j.T @ j
# calculate the gradient
# g shape (nparams,)
g = j.T @ f
return j, a, g
def _chi2_mle(f):
"""The equivalent "chi2" for poisson deviates
Minimizing this will maximize the likelihood for a data
model with gaussian deviates."""
f, y = f
if f.min() < 0:
logger.debug("function has dropped below zero {}, this shouldn't happen".format(f.min()))
return np.inf
# don't include points where the data is less
# than zero as this isn't allowed.
# calculate the parts of chi2
part1 = (f - y).sum(0)
# make sure to change nans and infs to nums
with np.errstate(invalid="ignore", divide="ignore"):
part2 = -(y * np.log(f / y))
part2[~np.isfinite(part2)] = 0.0
part2 = part2.sum(0)
return part1 + part2
def _update_mle(x0, f, Dfun):
"""Hessian and gradient calculations for poisson deviates"""
# calculate the jacobian
# j shape (ndata, nparams)
f, y = f
with np.errstate(invalid="ignore"):
y_f = y / f
y_f2 = y_f / f
# make sure we have finite results.
# any errors here are divide by zero problems
valid_points = np.isfinite(y_f2) & np.isfinite(y_f)
y_f[~valid_points] = 0
y_f2[~valid_points] = 0
j = Dfun(x0)
# calculate the linear term of Hessian
# a shape (nparams, nparams)
a = (j.T * y_f2) @ j
# calculate the gradient
# g shape (nparams,)
g = j.T @ (1 - y_f)
return j, a, g
def _ensure_positive(data):
"""Make sure data is positive and has no zeros
For numerical stability
If we realize that mutating data is not a problem
and that changing in place could lead to signifcant
speed ups we can lose the data.copy() line"""
# make a copy of the data
data = data.copy()
data[data <= 0] = 0
return data
def _wrap_func_mle(func, xdata, ydata, transform):
"""Returns f and xdata
This is the cost function as defined by Transtrum and Sethna"""
# add non-negativity constraint to data
ydata_nn = _ensure_positive(ydata)
if transform is None:
def func_wrapped(params):
# return function and data
return _ensure_positive(func(xdata, *params)), ydata_nn
elif transform.ndim == 1:
raise NotImplementedError
else:
# Chisq = (y - yd)^T C^{-1} (y-yd)
# transform = L such that C = L L^T
# C^{-1} = L^{-T} L^{-1}
# Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd)
# Define (y-yd)' = L^{-1} (y-yd)
# by solving
# L (y-yd)' = (y-yd)
# and minimize (y-yd)'^T (y-yd)'
raise NotImplementedError
return func_wrapped
def _wrap_jac_mle(jac, xdata, transform):
if transform is None:
def jac_wrapped(params):
return jac(xdata, *params)
elif transform.ndim == 1:
raise NotImplementedError
else:
raise NotImplementedError
return jac_wrapped
def _wrap_func_ls(func, xdata, ydata, transform):
"""This is the cost function as defined by Transtrum and Sethna"""
if transform is None:
def func_wrapped(params):
return func(xdata, *params) - ydata
elif transform.ndim == 1:
def func_wrapped(params):
return transform * (func(xdata, *params) - ydata)
else:
# Chisq = (y - yd)^T C^{-1} (y-yd)
# transform = L such that C = L L^T
# C^{-1} = L^{-T} L^{-1}
# Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd)
# Define (y-yd)' = L^{-1} (y-yd)
# by solving
# L (y-yd)' = (y-yd)
# and minimize (y-yd)'^T (y-yd)'
def func_wrapped(params):
return solve_triangular(transform, func(xdata, *params) - ydata, lower=True)
return func_wrapped
def _wrap_jac_ls(jac, xdata, transform):
if transform is None:
def jac_wrapped(params):
return jac(xdata, *params)
elif transform.ndim == 1:
def jac_wrapped(params):
return transform[:, np.newaxis] * np.asarray(jac(xdata, *params))
else:
def jac_wrapped(params):
return solve_triangular(transform, np.asarray(jac(xdata, *params)), lower=True)
return jac_wrapped
def make_lambda(j, d0):
"""Make the diagonal matrix which takes care of scaling
according to J. J. Moré's paper"""
# Calculate the norm of the jacobian columns
ds = la.norm(j, axis=0)
ds[0] = d0
# return an increasing diagnonal matrix
return np.diag([max(ds[i], ds[i - 1]) for i in range(1, len(ds))])
def lm(
func,
x0,
args=(),
Dfun=None,
full_output=False,
col_deriv=True,
ftol=1.49012e-8,
xtol=1.49012e-8,
gtol=0.0,
maxfev=None,
epsfcn=None,
factor=100,
diag=None,
method="ls",
):
"""A more thorough implementation of levenburg-marquet
for gaussian Noise
::
x = arg min(sum(func(y)**2,axis=0))
y
Parameters
----------
func : callable
should take at least one (possibly length N vector) argument and
returns M floating point numbers. It must not return NaNs or
fitting might fail.
x0 : ndarray
The starting estimate for the minimization.
args : tuple, optional
Any extra arguments to func are placed in this tuple.
Dfun : callable, optional
A function or method to compute the Jacobian of func with derivatives
across the rows. If this is None, the Jacobian will be estimated.
full_output : bool, optional
non-zero to return all optional outputs.
col_deriv : bool, optional
non-zero to specify that the Jacobian function computes derivatives
down the columns (faster, because there is no transpose operation).
ftol : float, optional
Relative error desired in the sum of squares.
xtol : float, optional
Relative error desired in the approximate solution.
gtol : float, optional
Orthogonality desired between the function vector and the columns of
the Jacobian.
maxfev : int, optional
The maximum number of calls to the function. If `Dfun` is provided
then the default `maxfev` is 100*(N+1) where N is the number of elements
in x0, otherwise the default `maxfev` is 200*(N+1).
epsfcn : float, optional
A variable used in determining a suitable step length for the forward-
difference approximation of the Jacobian (for Dfun=None).
Normally the actual step length will be sqrt(epsfcn)*x
If epsfcn is less than the machine precision, it is assumed that the
relative errors are of the order of the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the variables.
method : "ls" or "mle"
What type of estimator to use. Maximum likelihood ("mle") assumes that the noise
in the measurement is poisson distributed while least squares ("ls") assumes
normally distributed noise.
"""
info = 0
x0 = np.asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
# shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
# m = shape[0]
# if n > m:
# raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
if Dfun is None:
raise NotImplementedError
if epsfcn is None:
epsfcn = np.finfo(dtype).eps
else:
if col_deriv:
pass
# _check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
else:
raise NotImplementedError("Column derivatives required")
if maxfev is None:
maxfev = 100 * (n + 1)
# this is stolen from scipy.leastsq so it isn't fully implemented
errors = {
0: ["Improper input parameters.", TypeError],
1: [
"Both actual and predicted relative reductions "
"in the sum of squares are at most {}".format(ftol),
None,
],
2: [
"The relative error between two consecutive " "iterates is at most {}".format(xtol),
None,
],
3: [
"Both actual and predicted relative reductions in "
"the sum of squares\n are at most %f and the "
"relative error between two consecutive "
"iterates is at \n most %f" % (ftol, xtol),
None,
],
4: [
"The cosine of the angle between func(x) and any "
"column of the\n Jacobian is at most %f in "
"absolute value" % gtol,
None,
],
5: ["Number of calls to function has reached " "maxfev = %d." % maxfev, ValueError],
6: [
"ftol=%f is too small, no further reduction "
"in the sum of squares\n is possible."
"" % ftol,
ValueError,
],
7: [
"xtol=%f is too small, no further improvement in "
"the approximate\n solution is possible." % xtol,
ValueError,
],
8: [
"gtol=%f is too small, func(x) is orthogonal to the "
"columns of\n the Jacobian to machine "
"precision." % gtol,
ValueError,
],
"unknown": ["Unknown error.", TypeError],
}
if maxfev is None:
maxfev = 100 * (len(x0) + 1)
def gtest(g):
"""test if the gradient has converged"""
if gtol:
return np.abs(g).max() <= gtol
else:
return False
def xtest(dx, x):
"""see if the parameters have converged"""
return la.norm(dx) <= xtol * (la.norm(x) + xtol)
# set up update and chi2 for use
if method == "ls":
def update(x0, f):
return _update_ls(x0, f, Dfun)
def chi2(f):
return _chi2_ls(f)
elif method == "mle":
def update(x0, f):
return _update_mle(x0, f, Dfun)
def chi2(f):
return _chi2_mle(f)
else:
raise TypeError("Method {} not recognized".format(method))
# get initial function, jacobian, hessian and gradient
f = func(x0)
# j is Jacobian, a is J.T @ J, g is J.T @ f
j, a, g = update(x0, f)
# initialize D.T @ D array
dtd = np.diag(np.diag(a))
# initialize chi2
chisq_old = chi2(f)
# lambda
lambda_ = np.sqrt(x0.T @ dtd @ x0)
if lambda_ <= 0 or ~np.isfinite(lambda_):
lambda_ = np.array(100.0)
# make our scaling factor
# mu = factor * np.diagonal(a).max()
x = x0
for ev in range(maxfev):
logger.debug("Iteration #{}".format(ev))
if gtest(g):
info = 4
break
# calculate proposed step
# equivalent to $\lambda D^TD$ except no scaling on D
# which is why it's a diagnonal matrix ...
# lambda_ = make_lambda(j, d0)
logger.debug("lambda_ = {}".format(lambda_))
logger.debug("x = {}".format(x))
logger.debug("delta = {}".format(np.sqrt(x.T @ dtd @ x)))
# lambda_ = np.ones_like(g)
aug_a = a + lambda_ * dtd
try:
# https://software.intel.com/en-us/mkl-developer-reference-fortran-matrix-inversion-lapack-computational-routines
# dx = -la.inv(aug_a) @ g
# dx is called p_k in Jorge J Moré's paper
dx = la.solve(aug_a, -g)
except la.LinAlgError:
lambda_ *= factor
continue
if xtest(dx, x0):
info = 2
break
# make test move, I think I should be saving previous
# position so that I can "undo" if this is bad
x = x0 + dx
f = func(x)
# jtj = a
# v = -g
# temp1 = 0.5 * (g.T @ a @ g) / chisq_old
# temp2 = 0.5 * lambda_ * (g.T @ dtd @ g) / chisq_old
# pred_red = temp1 + 2.0 * temp2
# dirder = -1.0 * (temp1 + temp2)
chisq_new = chi2(f)
if method == "mle":
chisq_predicted = chi2((f[0] + j @ dx, f[1]))
else:
chisq_predicted = chi2(f + j @ dx)
actual_reduction = chisq_old - chisq_new
predicted_reduction = chisq_old - chisq_predicted
# see if we reduced chisq relative to what we predicted the reduction should be
rho = actual_reduction / predicted_reduction
if actual_reduction < 0:
logger.debug("Reduction negative setting to rho to 0")
rho = 0
elif predicted_reduction < 0:
logger.debug("Predicted negative setting to rho to 1")
rho = 0
elif not np.isfinite(rho):
logger.debug("rho = {} setting to 0".format(rho))
rho = 0
else:
logger.debug("rho = {}".format(rho))
if rho > 1e-2:
# ftest
if actual_reduction <= ftol * chisq_old:
info = 1
break
# update params, chisq and a and g
x0, chisq_old = x, chisq_new
j, a, g = update(x0, f)
dtd = np.fmax(dtd, np.diag(np.diag(a)))
lambda_ = max(lambda_ / 5, 1e-7)
else:
lambda_ = min(lambda_ * 1.5, 1e7)
else:
# loop exited normally
info = 5
if method == "mle":
# remember we return the data with f?
f = f[0]
logger.debug("Ended with {} function evaluations".format(ev + 1))
infodict = dict(fvec=f, fjac=j, nfev=ev)
if info not in [1, 2, 3, 4] and not full_output:
if info in [5, 6, 7, 8]:
warnings.warn(errors[info][0], RuntimeWarning)
else:
try:
raise errors[info][1](errors[info][0])
except KeyError:
raise errors["unknown"][1](errors["unknown"][0])
errmsg = errors[info][0]
logger.debug(errmsg)
popt, cov_x = x, None
if full_output:
return popt, cov_x, infodict, errmsg, info
else:
return popt, cov_x
def curve_fit(
f,
xdata,
ydata,
p0=None,
sigma=None,
absolute_sigma=False,
check_finite=True,
bounds=(-np.inf, np.inf),
method=None,
jac=None,
**kwargs
):
"""
Use non-linear least squares to fit a function, f, to data.
Assumes ``ydata = poisson(f(xdata, *params))``
Parameters
----------
f : callable
The model function, f(x, ...). It must take the independent
variable as the first argument and the parameters to fit as
separate remaining arguments.
xdata : An M-length sequence or an (k,M)-shaped array for functions with k predictors
The independent variable where the data is measured.
ydata : M-length sequence
The dependent data --- nominally f(xdata, ...)
p0 : None, scalar, or N-length sequence, optional
Initial guess for the parameters. If None, then the initial
values will all be 1 (if the number of parameters for the function
can be determined using introspection, otherwise a ValueError
is raised).
sigma : None or M-length sequence or MxM array, optional
Determines the uncertainty in `ydata`. If we define residuals as
``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma`
depends on its number of dimensions:
- A 1-d `sigma` should contain values of standard deviations of
errors in `ydata`. In this case, the optimized function is
``chisq = sum((r / sigma) ** 2)``.
- A 2-d `sigma` should contain the covariance matrix of
errors in `ydata`. In this case, the optimized function is
``chisq = r.T @ inv(sigma) @ r``.
.. versionadded:: 0.19
None (default) is equivalent of 1-d `sigma` filled with ones.
absolute_sigma : bool, optional
If True, `sigma` is used in an absolute sense and the estimated parameter
covariance `pcov` reflects these absolute values.
If False, only the relative magnitudes of the `sigma` values matter.
The returned parameter covariance matrix `pcov` is based on scaling
`sigma` by a constant factor. This constant is set by demanding that the
reduced `chisq` for the optimal parameters `popt` when using the
*scaled* `sigma` equals unity. In other words, `sigma` is scaled to
match the sample variance of the residuals after the fit.
Mathematically,
``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)``
check_finite : bool, optional
If True, check that the input arrays do not contain nans of infs,
and raise a ValueError if they do. Setting this parameter to
False may silently produce nonsensical results if the input arrays
do contain nans. Default is True.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each element of the tuple must be either an array with the length equal
to the number of parameters, or a scalar (in which case the bound is
taken to be the same for all parameters.) Use ``np.inf`` with an
appropriate sign to disable bounds on all or some parameters.
.. versionadded:: 0.17
method : {'lm', 'trf', 'dogbox'}, optional
Method to use for optimization. See `least_squares` for more details.
Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
provided. The method 'lm' won't work when the number of observations
is less than the number of variables, use 'trf' or 'dogbox' in this
case.
"ls", "mle"
What type of estimator to use. Maximum likelihood ("mle") assumes that the noise
in the measurement is poisson distributed while least squares ("ls") assumes
normally distributed noise. "pyls" is a python implementation, for testing only
.. versionadded:: 0.17
jac : callable, string or None, optional
Function with signature ``jac(x, ...)`` which computes the Jacobian
matrix of the model function with respect to parameters as a dense
array_like structure. It will be scaled according to provided `sigma`.
If None (default), the Jacobian will be estimated numerically.
String keywords for 'trf' and 'dogbox' methods can be used to select
a finite difference scheme, see `least_squares`.
.. versionadded:: 0.18
kwargs
Keyword arguments passed to `leastsq` for ``method='lm'`` or
`least_squares` otherwise."""
# fix kwargs
return_full = kwargs.pop("full_output", False)
can_full_output = method not in {"trf", "dogbox"} and np.array_equal(bounds, (-np.inf, np.inf))
if method in {"lm", "trf", "dogbox", None}:
if can_full_output:
kwargs["full_output"] = return_full
res = scipy.optimize.curve_fit(
f, xdata, ydata, p0, sigma, absolute_sigma, check_finite, bounds, method, jac, **kwargs
)
# user has requested that full_output be returned, but the method
# isn't capable, fill in the blanks.
if return_full and not can_full_output:
return res[0], res[1], None, "No error", 1
else:
return res
elif method == "ls":
_wrap_func = _wrap_func_ls
_wrap_jac = _wrap_jac_ls
elif method == "mle":
_wrap_func = _wrap_func_mle
_wrap_jac = _wrap_jac_mle
else:
raise TypeError("Method {} not recognized".format(method))
if bounds != (-np.inf, np.inf):
raise NotImplementedError("Bounds has not been implemented")
if sigma is not None:
raise NotImplementedError("Weighting has not been implemented")
else:
transform = None
if jac is None:
raise NotImplementedError("You need a Jacobian")
# initialize p0 with standard LM
res = scipy.optimize.curve_fit(
f, xdata, ydata, p0, sigma, absolute_sigma, check_finite, bounds, None, jac, **kwargs
)
# grab p0
logger.debug("Initialized p0")
p0 = res[0]
# NaNs can not be handled
if check_finite:
ydata = np.asarray_chkfinite(ydata)
else:
ydata = np.asarray(ydata)
if isinstance(xdata, (list, tuple, np.ndarray)):
# `xdata` is passed straight to the user-defined `f`, so allow
# non-array_like `xdata`.
if check_finite:
xdata = np.asarray_chkfinite(xdata)
else:
xdata = np.asarray(xdata)
func = _wrap_func(f, xdata, ydata, transform)
if callable(jac):
jac = _wrap_jac(jac, xdata, transform)
res = lm(func, p0, Dfun=jac, full_output=1, method=method, **kwargs)
popt, pcov, infodict, errmsg, info = res
cost = np.sum(infodict["fvec"] ** 2)
# Do Moore-Penrose inverse discarding zero singular values.
_, s, VT = la.svd(infodict["fjac"], full_matrices=False)
threshold = np.finfo(float).eps * max(infodict["fjac"].shape) * s[0]
s = s[s > threshold]
VT = VT[: s.size]
pcov = np.dot(VT.T / s ** 2, VT)
if info not in [1, 2, 3, 4]:
raise RuntimeError("Optimal parameters not found: " + errmsg)
if return_full:
return popt, pcov, infodict, errmsg, info
else:
return popt, pcov
|
david-hoffman/dphutils
|
lm.py
|
Python
|
apache-2.0
| 23,083
|
[
"Gaussian"
] |
14fd014102f56719ca5686adf83028a35542c1690d97d5edc961cfa75c30307a
|
#!/usr/bin/env python
# NetView P v.0.7.1 - Windows
# Dependencies: PLINK
# Eike Steinig
# Zenger Lab, JCU
# https://github.com/esteinig/netview
import os
import time
import json
import shutil
import argparse
import subprocess
import numpy as np
import multiprocessing as mp
import scipy.sparse.csgraph as csg
import scipy.spatial.distance as sd
from sklearn.neighbors import NearestNeighbors
def main():
commands = CommandLine()
dat = Data()
dat.project = commands.arg_dict['project']
dat.prefix = commands.arg_dict['prefix']
dat.ploidy = commands.arg_dict['ploidy']
dat.missing = commands.arg_dict['missing']
if commands.arg_dict['visual']:
print('\nGenerated node attribute files only.\n')
dat.readData(commands.arg_dict['attribute_file'], f='attributes', sep=',')
dat.writeData(f='attributes')
makeProject(commands.arg_dict['project'] + '_attributes', commands.arg_dict['prefix'])
exit(1)
print()
print(get_time() + "\t" + "---------------------------------")
print(get_time() + "\t" + " NETVIEW P v.0.7.1 ")
print(get_time() + "\t" + "---------------------------------")
print(get_time() + "\t" + "File =", commands.arg_dict['data_file'].upper())
if commands.arg_dict['plink']:
dat.filetype = 'plink'
dat.readData(commands.arg_dict['data_file'], f='plink', sep=commands.arg_dict['sep'])
elif commands.arg_dict['snps']:
dat.filetype = 'snps'
dat.readData(commands.arg_dict['data_file'], f='snp_mat', sep=commands.arg_dict['sep'])
elif commands.arg_dict['nexus']:
dat.filetype = 'nexus'
dat.readData(commands.arg_dict['data_file'], f='nexus', sep=commands.arg_dict['sep'])
elif commands.arg_dict['raxml']:
dat.filetype = 'raxml'
dat.readData(commands.arg_dict['data_file'], f='raxml', sep=commands.arg_dict['sep'])
else:
dat.filetype = 'dist'
dat.readData(commands.arg_dict['data_file'], f='matrix', sep=commands.arg_dict['sep'])
dat.readData(commands.arg_dict['attribute_file'], f='attributes', sep=',')
for stored in dat.meta_data.values():
if len(stored) != dat.n:
print('\nError. N in Data != N in Attribute File.')
exit(1)
if dat.ploidy == 'diploid':
nsnp = dat.nSNP//2
else:
nsnp = dat.nSNP
print(get_time() + "\t" + "N =", str(dat.n).upper())
print(get_time() + "\t" + "SNPs =", str(nsnp).upper())
print(get_time() + "\t" + "Ploidy =", dat.ploidy.upper())
print(get_time() + "\t" + "---------------------------------")
print(get_time() + "\t" + "Quality Control =", str(commands.arg_dict['qc']).upper())
pipeline = Analysis(dat)
if commands.arg_dict['qc'] and pipeline.data.filetype != 'dist':
qc_params = {'--mind': commands.arg_dict['mind'],
'--geno': commands.arg_dict['geno'],
'--maf': commands.arg_dict['maf'],
'--hwe': commands.arg_dict['hwe']}
pipeline.runPLINK(qc_parameters=qc_params, quality=True)
pipeline.updateNodeAttributes(commands.arg_dict['attribute_file'])
if commands.arg_dict['mat'] and pipeline.data.filetype != 'dist':
pipeline.getDistance(distance=commands.arg_dict['distance'])
pipeline.data.writeData(file=commands.arg_dict['prefix'] + '_mat.dist', f='matrix')
makeProject(commands.arg_dict['project'] + '_dist', commands.arg_dict['prefix'])
print(get_time() + "\t" + "---------------------------------\n")
exit(1)
elif commands.arg_dict['mat'] and pipeline.data.filetype == 'dist':
print('\nError. Input is already a Distance Matrix.\n')
exit(1)
if not commands.arg_dict['off']:
if pipeline.data.filetype != 'dist':
pipeline.getDistance(distance=commands.arg_dict['distance'])
pipeline.runNetView(tree=commands.arg_dict['tree'], start=commands.arg_dict['start'],
stop=commands.arg_dict['stop'], step=commands.arg_dict['step'],
algorithm=commands.arg_dict['algorithm'], edges=commands.arg_dict['edges'],
html=commands.arg_dict['web'])
pipeline.data.writeData(f='attributes')
makeProject(commands.arg_dict['project'], commands.arg_dict['prefix'])
print(get_time() + "\t" + "---------------------------------\n")
def makeProject(project, prefix):
cwd = os.getcwd()
project_path = os.path.realpath(os.path.join(os.getcwd(), project))
plink_path = os.path.realpath(os.path.join(project_path, 'plink'))
network_path = os.path.realpath(os.path.join(project_path, 'networks'))
other_path = os.path.realpath(os.path.join(project_path, 'other'))
node_path = os.path.realpath(os.path.join(project_path, 'nodes'))
d3_path = os.path.realpath(os.path.join(project_path, 'd3'))
if os.path.exists(project_path):
shutil.rmtree(project_path)
architecture = [project_path, plink_path, network_path, other_path, node_path, d3_path]
for directory in architecture:
try:
os.makedirs(directory)
except OSError:
if not os.path.isdir(directory):
raise
for name in os.listdir(cwd):
pathname = os.path.join(cwd, name)
if os.path.isfile(pathname):
if name.endswith('.edges'):
shutil.move(pathname, network_path)
elif name.endswith('.dist'):
shutil.move(pathname, other_path)
elif name.endswith('.nat'):
shutil.move(pathname, node_path)
elif name.startswith(prefix + '_plink'):
shutil.move(pathname, plink_path)
elif name.endswith('_qc.csv'):
shutil.move(pathname, other_path)
elif name.endswith('.json') or name.endswith('.html'):
shutil.move(pathname, d3_path)
elif name.startswith(prefix + '_plink_in'):
os.remove(pathname)
#### Functions for Multiprocessing ####
def netview(matrix, k, mst, algorithm, tree):
nbrs = NearestNeighbors(n_neighbors=k+1, algorithm=algorithm).fit(matrix)
adj_knn = nbrs.kneighbors_graph(matrix).toarray()
np.fill_diagonal(adj_knn, 0)
adj_mknn = (adj_knn == adj_knn.T) * adj_knn
if tree:
adj = mst + adj_mknn
else:
adj = adj_mknn
adjacency = np.tril(adj)
if tree:
mst_edges = np.argwhere(adjacency < 1)
else:
mst_edges = np.array([])
adjacency[adjacency > 0] = 1.
edges = np.argwhere(adjacency != 0)
weights = matrix[edges[:, 0], edges[:, 1]]
return [k, edges, weights, adjacency, mst_edges]
def netview_callback(k):
print(get_time() + "\t" + ' k=' + str(k[0]))
def get_time():
return time.strftime("[%H:%M:%S]")
#### Command Line Module ####
class CommandLine:
def __init__(self):
self.parser = argparse.ArgumentParser(description='NetView P v0.7.1', add_help=True)
self.setParser()
self.args = self.parser.parse_args()
self.arg_dict = vars(self.args)
def setParser(self):
data_type = self.parser.add_mutually_exclusive_group(required=True)
# Required Options
self.parser.add_argument('-f', dest='data_file', required=True, type=str,
help="Name of Data File")
data_type.add_argument('-p', dest='plink', action='store_true',
help="PLINK format (.ped/.map)")
data_type.add_argument('-s', dest='snps', action='store_true',
help="SNP matrix (N x SNPs)")
data_type.add_argument('-m', dest='dist', action='store_true',
help="Distance matrix (N x N)")
data_type.add_argument('-n', dest='nexus', action='store_true',
help="Nexus format from SPANDx")
data_type.add_argument('-r', dest='raxml', action='store_true',
help="RAxML format from SPANDx")
self.parser.add_argument('-a', dest='attribute_file', default='', type=str, required=True,
help="Node attribute file (.csv)")
# MAIN Options
self.parser.add_argument('--quality', dest='qc', action='store_true', default=False,
help="Quality control in PLINK (OFF)")
self.parser.add_argument('--distance', dest='distance', default='asd', type=str,
help="Distance measure for SNPs: hamming, asd, correlation... (asd)")
self.parser.add_argument('--algorithm', dest='algorithm', default='brute', type=str,
help="Algorithm for NN: auto, ball_tree, kd_tree, brute (brute)")
self.parser.add_argument('--mst-off', dest='tree', action='store_false', default=True,
help="Disable minimum spanning tree (OFF)")
self.parser.add_argument('--ploidy', dest='ploidy', default='diploid', type=str,
help="Set ploidy: haploid, diploid (diploid.")
self.parser.add_argument('--missing', dest='missing', default='0', type=str,
help="Set missing character (0)")
self.parser.add_argument('--prefix', dest='prefix', default='project', type=str,
help="Set prefix (project)")
self.parser.add_argument('--project', dest='project', default=time.strftime("%d-%m-%Y_%H-%M-%S"), type=str,
help="Output project name (timestamp)")
self.parser.add_argument('--sep', dest='sep', default='\t', type=str,
help="Delimiter for data file (\\t).")
self.parser.add_argument('--html', dest='web', action='store_true', default=True,
help="Generate D3/JSON graphs (ON)")
self.parser.add_argument('--edges', dest='edges', action='store_false', default=True,
help="Generate graphs as edge files (ON)")
# PARAMETER Options
self.parser.add_argument('--mind', dest='mind', default=0.1, type=float,
help="Filter samples > missing rate (0.1)")
self.parser.add_argument('--geno', dest='geno', default=0.1, type=float,
help="Filter SNPs > missing rate (0.1)")
self.parser.add_argument('--maf', dest='maf', default=0.01, type=float,
help="Filter SNPs < minor allele frequency (0.01)")
self.parser.add_argument('--hwe', dest='hwe', default=0.001, type=float,
help="Filter SNPs failing HWE test at P < (0.001)")
self.parser.add_argument('--start', dest='start', default=10, type=int,
help="Start at k = (10)")
self.parser.add_argument('--stop', dest='stop', default=40, type=int,
help="Stop at k = (40)")
self.parser.add_argument('--step', dest='step', default=10, type=int,
help="Step by k = (10)")
# PIPELINE Options
self.parser.add_argument('--visual', dest='visual', action='store_true', default=False,
help="Node attributes ONLY (OFF)")
self.parser.add_argument('--off', dest='off', action='store_true', default=False,
help="Switch off NetView and run only QC (OFF).")
self.parser.add_argument('--matrix', dest='mat', action='store_true', default=False,
help="Generate distance matrix ONLY (OFF).")
#### Data Module ####
class Data:
### DATA ATTRIBUTES ###
def __init__(self):
self.project = "project"
self.prefix = "prefix"
self.ploidy = 'diploid'
self.missing = "0"
self.n = 0
self.nSNP = 0
self.ids = [] # IDs
self.alleles = []
self.snps = np.array([]) # Array (N x SNPs)
self.biodata = [] # List/Alignment of BioPython SeqRecords
self.meta_data = {}
self.snp_data = {}
self.matrices = {}
self.networks = {}
self.matrix = np.array([]) # Current Matrix
self.netview_runs = 0
self.filetype = ''
### DATA READER ###
def readData(self, file, f, sep="\t", header=False, add_col=0):
def _read_nexus(file, sep=sep):
snp_position = []
snps = []
matrix = False
for line in file:
content = line.strip().split(sep)
if matrix:
if ";" in line:
break
snp_position.append(content[0])
snps.append(content[1:])
else:
if "dimensions" in line:
self.n = int(content[1].split("=")[1])
self.nSNP = int(content[2].split("=")[1][:-1])
elif "taxlabels" in line:
self.ids = content[1:]
elif "matrix" in line:
matrix = True
self.snps = np.array([list(i) for i in zip(*snps)]) # ordered by N
self.snp_data['snp_id'] = [''.join(p.split("_")[:-1]) for p in snp_position]
self.snp_data['snp_position'] = [p.split("_")[-1] for p in snp_position]
self.filetype = 'nexus'
def _read_raxml(file, sep=sep):
header = []
ids = []
snps = []
for line in file:
content = line.strip().split(sep)
if header:
ids.append(content[0])
snps.append(content[1])
else:
header = content
self.n = int(header[0])
self.nSNP = int(header[1])
snps = [[letter for letter in snp]for snp in snps]
self.ids = ids
self.snps = np.array(snps)
self.filetype = 'raxml'
def _read_plink(file, filename, sep=sep):
map_name = filename.split(".")[0] + ".map"
map_file = open(map_name)
ids = []
meta = []
snps = []
for line in file:
content = line.strip().split(sep)
ids.append(content[1])
snps.append(content[6:])
meta.append(content[:6])
self.ids = ids
self.snps = np.array(snps)
self.nSNP = len(self.snps[0])
self.n = len(self.ids)
self.meta_data["pop"] = [i[0] for i in meta]
self.meta_data["dam"] = [i[2] for i in meta]
self.meta_data["sire"] = [i[3] for i in meta]
self.meta_data["sex"] = [i[4] for i in meta]
self.meta_data["phenotype"] = [i[5] for i in meta]
map_content = [line.strip().split() for line in map_file]
map_content = list(zip(*map_content))
self.snp_data['snp_chromosome'] = list(map_content[0])
self.snp_data['snp_id'] = list(map_content[1])
self.snp_data['snp_genetic_distance'] = list(map_content[2])
self.snp_data['snp_position'] = list(map_content[3])
map_file.close()
self.filetype = 'plink'
def _read_matrix(file, header=header, add_col=add_col, sep=sep):
content = [line.strip().split(sep)[add_col:] for line in file]
if header:
content = content[1:]
matrix = np.array([list(map(float, ind)) for ind in content])
self.matrix = matrix
self.n = len(matrix[0])
self.matrices['input'] = matrix
return matrix
def _read_snp_mat(file, sep):
matrix = np.array([line.strip().split(sep) for line in file])
self.snps = matrix
self.n = len(matrix[:, 1])
self.nSNP = len(matrix[1, :])
if self.ploidy == 'diploid':
self.snp_data['snp_id'] = [str(i) for i in range(self.nSNP//2)]
else:
self.snp_data['snp_id'] = [str(i) for i in range(self.nSNP)]
def _read_attributes(file):
content = [line.strip().split(',') for line in file]
head = content[0]
content = list(zip(*content[1:]))
for i in range(len(head)):
self.meta_data[head[i]] = content[i]
self.ids = list(content[0])
## Main Read ##
infile = open(file)
f = f.lower()
if f == "nexus":
_read_nexus(infile, sep)
elif f =="raxml":
_read_raxml(infile, sep)
elif f == "plink":
_read_plink(infile, file, sep)
elif f == "matrix":
matrix = _read_matrix(infile, header, add_col, sep)
elif f == 'snp_mat':
_read_snp_mat(infile, sep)
elif f == 'attributes':
_read_attributes(infile)
else:
print("File format not supported.")
raise IOError
infile.close()
if f != 'attributes':
alleles = np.unique(self.snps).tolist()
if self.missing in alleles:
alleles.remove(self.missing)
self.alleles = alleles
if f == 'matrix':
return matrix
### DATA WRITER ###
def writeData(self, f, file='data.out', sep="\t"):
def _write_raxml(outfile, sep):
outfile.write(str(self.n) + sep + str(self.nSNP) + "\n")
for i in range(self.n):
outfile.write(self.ids[i] + sep + ''.join(self.snps[i]) + "\n")
def _write_nexus(outfile, sep):
taxlabels = " ".join(self.ids)
header = '#nexus\nbegin data;\ndimensions ntax=' + str(self.n) + ' nchar=' + str(self.nSNP) + \
';\nformat symbols="AGCT" gap=. datatype=nucleotide;\ntaxlabels ' + taxlabels + ';\nmatrix\n'
tail = ";\nend;"
snps = list(zip(*self.snps))
outfile.write(header)
for i in range(self.nSNP):
if 'snp_chromosome' in self.snp_data.keys():
outfile.write(self.snp_data['snp_chromosome'][i] + "_")
else:
outfile.write(sep)
if 'snp_id' in self.snp_data.keys():
outfile.write(self.snp_data['snp_id'][i] + sep)
else:
outfile.write("SNP" + str(i) + sep)
outfile.write(sep.join(snps[i]) + "\n")
outfile.write(tail)
def _write_plink(outfile, filename, sep):
mapname = filename.split('.')[0] + ".map"
for i in range(self.n):
if 'pop' in self.meta_data.keys():
outfile.write(self.meta_data['pop'][i] + sep)
else:
outfile.write("NA" + sep)
if self.ids:
outfile.write(self.ids[i] + sep)
else:
outfile.write("N" + str(i+1) + sep)
if 'dam' in self.meta_data.keys():
outfile.write(self.meta_data['dam'][i] + sep)
else:
outfile.write("0" + sep)
if 'sire' in self.meta_data.keys():
outfile.write(self.meta_data['sire'][i] + sep)
else:
outfile.write("0" + sep)
if 'sex' in self.meta_data.keys():
outfile.write(self.meta_data['sex'][i] + sep)
else:
outfile.write("0" + sep)
if 'phenotype' in self.meta_data.keys():
outfile.write(self.meta_data['phenotype'][i] + sep)
else:
outfile.write("0" + sep)
outfile.write(sep.join(self.snps[i]) + "\n")
map_file = open(mapname, "w")
if 'snp_id' in self.snp_data:
for i in range(len(self.snp_data['snp_id'])):
if 'snp_chromosome' in self.snp_data.keys():
map_file.write(self.snp_data['snp_chromosome'][i] + sep)
else:
map_file.write("0" + sep)
if 'snp_id' in self.snp_data.keys():
map_file.write(self.snp_data['snp_id'][i] + sep)
else:
map_file.write("SNP" + str(i+1) + sep)
if 'snp_genetic_distance' in self.snp_data.keys():
map_file.write(self.snp_data['snp_genetic_distance'][i] + sep)
else:
map_file.write("0" + sep)
if 'snp_position' in self.snp_data.keys():
map_file.write(self.snp_data['snp_position'][i] + sep + "\n")
else:
map_file.write("0" + sep + "\n")
map_file.close()
def _write_metadata(outfile, sep):
outfile.write("#" + sep + "n=" + str(self.n) + sep + "nSNP=" +
str(self.nSNP) + sep + "(" + self.ploidy + ")\n")
ordered_keys = sorted([key for key in self.meta_data.keys()])
outfile.write("Isolate")
for key in ordered_keys:
outfile.write(sep + key)
outfile.write("\n")
for i in range(self.n):
if self.ids:
outfile.write(self.ids[i])
else:
outfile.write("N" + str(1))
for key in ordered_keys:
outfile.write(sep + self.meta_data[key][i])
outfile.write("\n")
def _write_snpdata(outfile, sep):
outfile.write("#" + sep + "n=" + str(self.n) + sep + "nSNP=" +
str(self.nSNP) + sep + "(" + self.ploidy + ")\n")
snp_data = dict(self.snp_data)
ordered_keys = sorted([key for key in snp_data.keys()])
outfile.write("SNP" + sep)
for key in ordered_keys:
outfile.write(sep + key)
outfile.write("\n")
for i in range(self.nSNP):
outfile.write("SNP_" + str(i))
for key in ordered_keys:
outfile.write(sep + snp_data[key][i])
outfile.write("\n")
def _write_attributes():
for key, value in self.meta_data.items():
outname = self.prefix + '_' + key + '.nat'
out = open(outname, 'w')
out.write('ID\t' + self.prefix + '_' + key + '\n')
for i in range(len(value)):
out.write(self.ids[i] + '\t' + value[i] + '\n')
out.close()
def _write_graph_json():
col_dict = {'dimgray': '#696969', 'olive': '#808000', 'burlywood': '#deb887', 'darkgreen': '#006400',
'navy': '#000080', 'white': '#ffffff', 'violet': '#ee82ee', 'darkblue': '#00008b',
'steelblue': '#4682b4', 'deepskyblue': '#00bfff', 'tan': '#d2b48c', 'rebeccapurple': '#663399',
'honeydew': '#f0fff0', 'slategray': '#708090', 'powderblue': '#b0e0e6', 'palevioletred': '#db7093',
'chocolate': '#d2691e', 'coral': '#ff7f50', 'azure': '#f0ffff', 'peru': '#cd853f',
'springgreen': '#00ff7f', 'darkorange': '#ff8c00', 'mediumvioletred': '#c71585',
'mediumaquamarine': '#66cdaa', 'darkmagenta': '#8b008b', 'mediumslateblue': '#7b68ee',
'mediumseagreen': '#3cb371', 'crimson': '#dc143c', 'gainsboro': '#dcdcdc', 'darkgray': '#a9a9a9',
'plum': '#dda0dd', 'forestgreen': '#228b22', 'seagreen': '#2e8b57', 'teal': '#008080',
'gold': '#ffd700', 'dodgerblue': '#1e90ff', 'lightpink': '#ffb6c1', 'papayawhip': '#ffefd5',
'orchid': '#da70d6', 'black': '#000000', 'cornflowerblue': '#6495ed', 'lightyellow': '#ffffe0',
'goldenrod': '#daa520', 'purple': '#800080', 'khaki': '#f0e68c', 'aquamarine': '#7fffd4',
'lightskyblue': '#87cefa', 'fuchsia': '#ff00ff', 'mediumblue': '#0000cd', 'sandybrown': '#f4a460',
'moccasin': '#ffe4b5', 'darkslategray': '#2f4f4f', 'cornsilk': '#fff8dc', 'lightcyan': '#e0ffff',
'darkolivegreen': '#556b2f', 'silver': '#c0c0c0', 'lightgoldenrodyellow': '#fafad2',
'navajowhite': '#ffdead', 'turquoise': '#40e0d0', 'rosybrown': '#bc8f8f', 'antiquewhite': '#faebd7',
'thistle': '#d8bfd8', 'lightcoral': '#f08080', 'floralwhite': '#fffaf0', 'indianred': '#cd5c5c',
'ghostwhite': '#f8f8ff', 'blue': '#0000ff', 'snow': '#fffafa', 'orangered': '#ff4500',
'darkred': '#8b0000', 'greenyellow': '#adff2f', 'ivory': '#fffff0', 'mediumorchid': '#ba55d3',
'lawngreen': '#7cfc00', 'lightsalmon': '#ffa07a', 'lightgray': '#d3d3d3',
'lightslategray': '#778899', 'mediumpurple': '#9370db', 'darkcyan': '#008b8b', 'tomato': '#ff6347',
'lightsteelblue': '#b0c4de', 'darkseagreen': '#8fbc8f', 'aqua': '#00ffff', 'olivedrab': '#6b8e23',
'darkgoldenrod': '#b8860b', 'darkorchid': '#9932cc', 'seashell': '#fff5ee', 'skyblue': '#87ceeb',
'blanchedalmond': '#ffebcd', 'beige': '#f5f5dc', 'darkturquoise': '#00ced1', 'slateblue': '#6a5acd',
'red': '#ff0000', 'lavender': '#e6e6fa', 'hotpink': '#ff69b4', 'yellowgreen': '#9acd32',
'cyan': '#00ffff', 'firebrick': '#b22222', 'lemonchiffon': '#fffacd', 'darksalmon': '#e9967a',
'sienna': '#a0522d', 'mediumturquoise': '#48d1cc', 'salmon': '#fa8072', 'green': '#008000',
'lightgreen': '#90ee90', 'deeppink': '#ff1493', 'palegoldenrod': '#eee8aa', 'orange': '#ffa500',
'wheat': '#f5deb3', 'lime': '#00ff00', 'lavenderblush': '#fff0f5', 'brown': '#a52a2a',
'blueviolet': '#8a2be2', 'magenta': '#ff00ff', 'lightseagreen': '#20b2aa', 'mistyrose': '#ffe4e1',
'saddlebrown': '#8b4513', 'midnightblue': '#191970', 'mediumspringgreen': '#00fa9a',
'cadetblue': '#5f9ea0', 'paleturquoise': '#afeeee', 'palegreen': '#98fb98', 'pink': '#ffc0cb',
'darkkhaki': '#bdb76b', 'oldlace': '#fdf5e6', 'whitesmoke': '#f5f5f5', 'royalblue': '#4169e1',
'gray': '#808080', 'lightblue': '#add8e6', 'maroon': '#800000', 'peachpuff': '#ffdab9',
'darkslateblue': '#483d8b', 'linen': '#faf0e6', 'limegreen': '#32cd32',
'mintcream': '#f5fffa', 'chartreuse': '#7fff00', 'yellow': '#ffff00', 'indigo': '#4b0082',
'bisque': '#ffe4c4', 'aliceblue': '#f0f8ff', 'darkviolet': '#9400d3'}
if self.networks.keys() == '':
print('No networks to write to JSON.')
templ_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
templ_file = open(os.path.join(templ_path, 'fd_network.html'))
templ_str = templ_file.read()
templ_file.close()
for network, properties in self.networks.items():
json_name = self.prefix + '_' + network + '.json'
html_name = self.prefix + '_' + network + '.html'
edges = properties[1]
weights = properties[2]
node_array = []
for i in range(len(self.ids)):
if 'lat' in self.meta_data.keys() and 'lon' in self.meta_data.keys():
node_array.append({'name': self.ids[i], 'group': self.meta_data['pop'][i], 'color':
col_dict[self.meta_data['col'][i]], 'lon': self.meta_data['lon'][i],
'lat': self.meta_data['lat'][i]})
else:
node_array.append({'name': self.ids[i], 'group': self.meta_data['pop'][i], 'color':
col_dict[self.meta_data['col'][i]]})
edge_array = []
for i in range(len(edges)):
if 'lat' in self.meta_data.keys() and 'lon' in self.meta_data.keys():
edge_array.append({'source': int(edges[i, 0]), 'target': int(edges[i, 1]), 'value':
float(weights[i]), 'slon': self.meta_data['lon'][edges[i, 0]], 'slat':
self.meta_data['lat'][edges[i, 0]], 'tlon': self.meta_data['lon'][edges[i, 1]],
'tlat': self.meta_data['lat'][edges[i, 1]]})
else:
edge_array.append({'source': int(edges[i, 0]), 'target': int(edges[i, 1]), 'value':
float(weights[i])})
json_file = open(json_name, 'w')
json_file.write(json.dumps({'nodes': node_array, 'links': edge_array, }, sort_keys=True, indent=2))
json_file.close()
if self.ploidy == 'diploid':
nsnps = self.nSNP//2
else:
nsnps = self.nSNP
html_file = open(html_name, 'w')
html = templ_str.replace('template.json', '"' + json_name + '"')
html = html.replace('temp_n', str(self.n))
html = html.replace('temp_snp', str(nsnps))
html = html.replace('temp_k', str(properties[0]))
html = html.replace('temp_project', str(self.project))
html_file.write(html)
html_file.close()
def _write_graph_edges():
if self.networks.keys() == '':
print(get_time() + '\t' + 'Warning: No networks to write to JSON.')
for network, properties in self.networks.items():
filename = network + '.edges'
edges = properties[1].tolist()
weights = properties[2].tolist()
mst_edges = properties[4].tolist()
out = open(filename, 'w')
out.write('Source\tTarget\tDistance\tMST\n')
for i in range(len(edges)):
out.write(str(self.ids[edges[i][0]]) + "\t" + str(self.ids[edges[i][1]]) +
"\t" + str(weights[i]))
if len(mst_edges) > 0:
if edges[i] in mst_edges:
out.write('\t' + 'red\n')
else:
out.write('\t' + 'grey\n')
else:
out.write("\n")
if len(mst_edges) == 0:
singletons = np.setdiff1d(np.arange(self.n), properties[1].flatten()).tolist()
if singletons:
for node in singletons:
out.write(str(self.ids[node]) + '\n')
out.close()
## Main Write ##
if f == 'attributes':
_write_attributes()
else:
filename = file
outfile = open(filename, "w")
f = f.lower()
if f == "nexus":
_write_nexus(outfile, sep)
elif f =="raxml":
_write_raxml(outfile, sep)
elif f == "plink":
_write_plink(outfile, file, sep)
elif f == "matrix":
np.savetxt(filename, self.matrix, fmt='%.9f', delimiter=sep)
elif f == "meta":
_write_metadata(outfile, sep)
elif f == "snp":
_write_snpdata(outfile, sep)
elif f == "json":
_write_graph_json()
elif f == 'edges':
_write_graph_edges()
else:
raise IOError("File format not supported.")
outfile.close()
def __str__(self):
return ('-----------\nNumber of Individuals: %i\nNumber of SNPs: %i\nPloidy: %s\n-----------\n') % \
(self.n, self.nSNP, self.ploidy)
#### Analysis Module ####
class Analysis:
def __init__(self, data):
self.data = data
def getDistance(self, target='snps', distance='hamming'):
print(get_time() + "\t" + 'Distance = ' + distance.upper())
if self.data.filetype == 'dist':
target = 'matrix'
if target == 'matrix':
matrix = np.array(self.data.matrix)
else:
# Convert alleles to numbers (e.g. A -> 1, B -> 2) for use in scipy.spatial.distance.pdist()
allele_codes = {}
for i in range(len(self.data.alleles)):
allele_codes[self.data.alleles[i]] = int(i+1)
allele_codes[self.data.missing] = 0 # missing can not be 1 to i
snps = self.data.snps
for a, code in allele_codes.items():
snps[snps == a] = code
matrix = snps
if distance == 'asd':
self.runPLINK(asd=True)
self.data.readData(file=self.data.prefix + '_plink.mdist', f='matrix', sep=' ')
else:
matrix = sd.squareform(sd.pdist(matrix, distance))
self.data.matrix = matrix
self.data.matrices[distance] = self.data.matrix
return matrix
def runPLINK(self, qc_parameters={}, commandstring='', asd=False, quality=False):
if self.data.ploidy == 'haploid':
raise AttributeError('Haploid genotypes not supported for PLINK.')
if commandstring:
subprocess.call(commandstring)
else:
self.data.writeData(file=self.data.prefix + '_plink_in.ped', f='plink')
if quality and qc_parameters:
command = ['plink', '--noweb', '--file', self.data.prefix + '_plink_in']
for key, value in qc_parameters.items():
command.append(key)
command.append(str(value))
command.append('--recode')
command.append('--out')
command.append(self.data.prefix + '_plink_qc')
subprocess.call(command, stdout=subprocess.DEVNULL)
if os.path.exists(self.data.prefix + '_plink_qc.ped'):
self.data.readData(file=self.data.prefix + '_plink_qc.ped', f='plink', sep=' ')
if asd:
subprocess.call(['plink', '--noweb', '--file', self.data.prefix + '_plink_in', '--cluster', '--distance-matrix',
'--out', self.data.prefix + '_plink'], stdout=subprocess.DEVNULL)
def updateNodeAttributes(self, attribute_file):
if os.path.isfile(self.data.prefix + '_plink_qc.irem'):
infile = open(self.data.prefix + '_plink_qc.irem')
to_remove = [line.strip().split()[1] for line in infile]
infile.close()
infile = open(attribute_file)
outname = attribute_file.split('.')[0] + '_qc.csv'
outfile = open(outname, 'w')
for line in infile:
content = line.strip().split(',')
if content[0] not in to_remove:
outfile.write(line)
infile.close()
outfile.close()
self.data.readData(file=outname, f='attributes', sep=',')
def runNetView(self, tree=True, start=10, stop=40, step=10, algorithm='auto', edges=False, html=True):
print(get_time() + "\t" + "Minimum Spanning Tree = " + str(tree).upper())
print(get_time() + "\t" + "Nearest Neighbour = " + algorithm.upper())
print(get_time() + "\t" + "k = " + str(start) + " - " + str(stop) + ' (by ' + str(step) + ')')
print(get_time() + "\t" + "---------------------------------")
self.data.netview_runs += 1
matrix = self.data.matrix
if tree:
mst = csg.minimum_spanning_tree(matrix)
mst = mst.toarray()
#self.data.networks[self.data.prefix + 'mst_' + str(self.data.netview_runs)] = mst
mst = mst + mst.T
else:
mst = None
pool = mp.Pool()
networks = [pool.apply_async(netview, args=(matrix, k, mst, algorithm, tree,), callback=netview_callback)
for k in range(start, stop+1, step)]
pool.close()
pool.join()
for item in networks:
result = item.get()
self.data.networks['netview_k' + str(result[0]) + '_' + str(self.data.netview_runs)] = result
print(get_time() + "\t" + "---------------------------------")
if html:
print(get_time() + "\t" + "Out = JSON")
self.data.writeData(f='json')
if edges:
self.data.writeData(f='edges')
print(get_time() + "\t" + "Out = Edges")
if __name__ == '__main__':
main()
|
esteinig/netviewP
|
program/win/0.7.1/netview.py
|
Python
|
gpl-2.0
| 37,628
|
[
"Biopython"
] |
20078e31b111eb7a69feeeabb90e73ca157addd89cce1bc7cc6e26b70b5cb9a2
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Audun Gravdal Johansen
"""
"""
import numpy as np
from struct import pack
from base64 import b64encode
from ..exceptions import VtkError
# index arrays to reorder element connectivity - keys are SESAM element id's
sesam2vtk_connectivity = {23 : np.array([0,2,1]),
28 : np.array([0,2,4,6,1,3,5,7])}
# index arrays to reorder element node results - keys are SESAM element id's
sesam2vtk_elemresults = {26 : np.array([0,2,4,1,3,5])}
# get vtk element type id
sesam2vtk_elementtypeid = {15 : 3,
23 : 21,
24 : 9,
25 : 5,
26 : 22,
28 : 23}
# get vtk ids from sesam ids
typeidmapper = np.vectorize(lambda x: sesam2vtk_elementtypeid[x])
# get vtk datatype name from numpy datatype name
numpy2vtk_datatype = {'int8' : 'Int8',
'uint8' : 'UInt8',
'int16' : 'Int16',
'uint16' : 'UInt16',
'int32' : 'Int32',
'uint32' : 'UInt32',
'int64' : 'Int64',
'uint64' : 'UInt64',
'float32' : 'Float32',
'float64' : 'Float64'}
def _reorder_data(data, offsets, types, indexarr_map):
start = 0
for stop, typ in zip(offsets, types):
indarr = indexarr_map.get(typ)
if indarr is not None:
data[start:stop] = data[start:stop][indarr]
start = stop
def _reorder_connectivity_vtk(cells):
"""Perform inplace reordering of connectivity array
"""
con, offsets, types = cells
_reorder_data(con, offsets, types, sesam2vtk_connectivity)
def _reorder_cellpointdata_vtk(resultarr, offsets, types):
"""Perform inplace reordering of element node results
"""
_reorder_data(resultarr, offsets, types, sesam2vtk_elemresults)
class VtuWriter(object):
"""write data to vtk xml format (UnstructuredGrid)
Examples
--------
::
import freesif as fs
f = fs.open_hdf5('R1.h5')
t1 = f['T1']
nodes = t1.get_nodes()
elems = t1.get_elements()
disp = t1.get_noderesults('displacement')
vtu = fs.utils.vtuwriter('out.vtu')
vtu.new_piece(nodes, elems)
vtu.start_pointdata()
vtu.add_data(disp, 'displacement')
vtu.close()
f.close()
"""
def __init__(self, filename):
"""
"""
self._file = open(filename, 'wb')
self._piece_open = False
self._pointdata_open = False
self._celldata_open = False
self._elements = []
self._current_offsets = None
self._current_types = None
self._open_element('VTKFile', type='UnstructuredGrid', version='1.0',
byte_order='LittleEndian', header_type='UInt64')
self._open_element('UnstructuredGrid')
def new_piece(self, points, cells):
"""
"""
self._close_data()
self._close_piece()
self._piece_open = True
_reorder_connectivity_vtk(cells)
connectivity, offsets, types = cells
# keep reference to offsets and types
self._current_offsets = offsets
self._current_types = types
types = typeidmapper(types)
self._open_element('Piece', NumberOfPoints=len(points),
NumberOfCells=len(offsets))
# write points
self._open_element('Points')
self._write_array(points, 'points')
self._close_element('Points')
# write cells
self._open_element('Cells')
self._write_array(connectivity, 'connectivity')
self._write_array(offsets, 'offsets')
self._write_array(types, 'types')
self._close_element('Cells')
def start_pointdata(self):
self._open_data('PointData')
def start_celldata(self):
self._open_data('CellData')
def add_data(self, arr, name, compnames=None):
"""
"""
if not self._pointdata_open and not self._celldata_open:
raise VtkError('Call start_pointdata() or start_celldata() first')
self._write_array(arr, name, compnames)
def add_data_cellpoints(self, arr, name, compnames=None):
"""
"""
_reorder_cellpointdata_vtk(
arr, self._current_offsets, self._current_types)
self.add_data(arr, name, compnames)
def close(self):
self._close_data()
self._close_piece()
self._close_element('UnstructuredGrid')
self._close_element('VTKFile')
self._file.close()
def _write_array(self, arr, name, compnames=None):
# accept 1d or 2d array
if arr.ndim == 2:
ncomps = arr.shape[1]
elif arr.ndim == 1:
ncomps = 1
else:
raise VtkError('array must be 1d or 2d')
# create comp name kwargs
if compnames: # sequence of str
compname_dict = {'ComponentName{}'.format(i): cn \
for i, cn in enumerate(compnames)}
else:
compname_dict = {}
vtktype = numpy2vtk_datatype[arr.dtype.name]
self._open_element('DataArray', type=vtktype, Name=name,
NumberOfComponents=ncomps, format='binary',
**compname_dict)
# write indent
self._file.write(b' '*len(self._elements))
# write header
nbytes = arr.size * arr.dtype.itemsize
# self._file.write(b64encode(pack('<Q', nbytes)))
# write array
# self._file.write(b64encode(arr.T.ravel().tostring(order='F')))
header = pack('<Q', nbytes)
arrstr = arr.ravel().tostring()
self._file.write(b64encode(header + arrstr))
# write newline
self._file.write(b'\n')
self._close_element('DataArray')
def _close_piece(self):
if self._piece_open:
self._close_element('Piece')
self._piece_open = False
def _open_data(self, datatype):
if not self._piece_open:
raise VtkError('Call new_piece() first')
self._close_data()
self._open_element(datatype)
if datatype == 'PointData':
self._pointdata_open = True
elif datatype == 'CellData':
self._celldata_open = True
def _close_data(self):
if self._pointdata_open:
self._close_element('PointData')
self._pointdata_open = False
elif self._celldata_open:
self._close_element('CellData')
self._celldata_open = False
def _open_element(self, tag, selfclosing=False, **kwargs):
s = b' '*len(self._elements) # indentation
s += b'<%s' % tag.encode()
if kwargs:
s += b' '
s += b' '.join([b'%s="%s"' % (str(k).encode(), str(v).encode())
for k, v in list(kwargs.items())])
if selfclosing:
s += b' />\n'
else:
s += b'>\n'
self._elements.append(tag)
self._file.write(s)
def _close_element(self, tag):
if not tag == self._elements.pop():
raise VtkError('{} has no corresponding opening tag'.format(tag))
s = b' '*len(self._elements)
s += b'</%s>\n' % tag.encode()
self._file.write(s)
|
agrav/freesif
|
freesif/utils/writers.py
|
Python
|
mit
| 7,490
|
[
"VTK"
] |
b40463db336a93c2db32f9208d0ff8768cb84ac50092bc9190a20aceca3faf4a
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup
import numpy
# Version number
version = '1.0'
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name = 'DGP',
version = version,
author = read('AUTHORS.txt'),
author_email = "damianou@amazon.com",
description = ("Deep Gaussian Process"),
license = "BSD 3-clause",
keywords = "deep learning",
url = "",
packages = ["deepgp",
"deepgp.inference",
"deepgp.layers",
"deepgp.util",
"deepgp.models"],
package_dir={'deepgp': 'deepgp'},
py_modules = ['deepgp.__init__'],
long_description=read('README.md'),
install_requires=['numpy>=1.7', 'scipy>=0.12','GPy>=1.0'],
include_dirs=[numpy.get_include()],
classifiers=['License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
]
)
|
zhenwendai/DeepGP
|
setup.py
|
Python
|
bsd-3-clause
| 1,434
|
[
"Gaussian"
] |
f0562e6d9652ba362948d83753f29945093281238f66dacc9c3686078bd16c1c
|
"""
Extensions to SQLAlchemy for altering existing tables.
At the moment, this isn't so much based off of ANSI as much as
things that just happen to work with multiple databases.
"""
import StringIO
import sqlalchemy as sa
from sqlalchemy.schema import SchemaVisitor
from sqlalchemy.engine.default import DefaultDialect
from sqlalchemy.sql import ClauseElement
from sqlalchemy.schema import (ForeignKeyConstraint,
PrimaryKeyConstraint,
CheckConstraint,
UniqueConstraint,
Index)
from migrate import exceptions
from migrate.changeset import constraint
from sqlalchemy.schema import AddConstraint, DropConstraint
from sqlalchemy.sql.compiler import DDLCompiler
SchemaGenerator = SchemaDropper = DDLCompiler
class AlterTableVisitor(SchemaVisitor):
"""Common operations for ``ALTER TABLE`` statements."""
# engine.Compiler looks for .statement
# when it spawns off a new compiler
statement = ClauseElement()
def append(self, s):
"""Append content to the SchemaIterator's query buffer."""
self.buffer.write(s)
def execute(self):
"""Execute the contents of the SchemaIterator's buffer."""
try:
return self.connection.execute(self.buffer.getvalue())
finally:
self.buffer.truncate(0)
def __init__(self, dialect, connection, **kw):
self.connection = connection
self.buffer = StringIO.StringIO()
self.preparer = dialect.identifier_preparer
self.dialect = dialect
def traverse_single(self, elem):
ret = super(AlterTableVisitor, self).traverse_single(elem)
if ret:
# adapt to 0.6 which uses a string-returning
# object
self.append(" %s" % ret)
def _to_table(self, param):
"""Returns the table object for the given param object."""
if isinstance(param, (sa.Column, sa.Index, sa.schema.Constraint)):
ret = param.table
else:
ret = param
return ret
def start_alter_table(self, param):
"""Returns the start of an ``ALTER TABLE`` SQL-Statement.
Use the param object to determine the table name and use it
for building the SQL statement.
:param param: object to determine the table from
:type param: :class:`sqlalchemy.Column`, :class:`sqlalchemy.Index`,
:class:`sqlalchemy.schema.Constraint`, :class:`sqlalchemy.Table`,
or string (table name)
"""
table = self._to_table(param)
self.append('\nALTER TABLE %s ' % self.preparer.format_table(table))
return table
class ANSIColumnGenerator(AlterTableVisitor, SchemaGenerator):
"""Extends ansisql generator for column creation (alter table add col)"""
def visit_column(self, column):
"""Create a column (table already exists).
:param column: column object
:type column: :class:`sqlalchemy.Column` instance
"""
if column.default is not None:
self.traverse_single(column.default)
table = self.start_alter_table(column)
self.append("ADD ")
self.append(self.get_column_specification(column))
for cons in column.constraints:
self.traverse_single(cons)
self.execute()
# ALTER TABLE STATEMENTS
# add indexes and unique constraints
if column.index_name:
Index(column.index_name,column).create()
elif column.unique_name:
constraint.UniqueConstraint(column,
name=column.unique_name).create()
# SA bounds FK constraints to table, add manually
for fk in column.foreign_keys:
self.add_foreignkey(fk.constraint)
# add primary key constraint if needed
if column.primary_key_name:
cons = constraint.PrimaryKeyConstraint(column,
name=column.primary_key_name)
cons.create()
def add_foreignkey(self, fk):
self.connection.execute(AddConstraint(fk))
class ANSIColumnDropper(AlterTableVisitor, SchemaDropper):
"""Extends ANSI SQL dropper for column dropping (``ALTER TABLE
DROP COLUMN``).
"""
def visit_column(self, column):
"""Drop a column from its table.
:param column: the column object
:type column: :class:`sqlalchemy.Column`
"""
table = self.start_alter_table(column)
self.append('DROP COLUMN %s' % self.preparer.format_column(column))
self.execute()
class ANSISchemaChanger(AlterTableVisitor, SchemaGenerator):
"""Manages changes to existing schema elements.
Note that columns are schema elements; ``ALTER TABLE ADD COLUMN``
is in SchemaGenerator.
All items may be renamed. Columns can also have many of their properties -
type, for example - changed.
Each function is passed a tuple, containing (object, name); where
object is a type of object you'd expect for that function
(ie. table for visit_table) and name is the object's new
name. NONE means the name is unchanged.
"""
def visit_table(self, table):
"""Rename a table. Other ops aren't supported."""
self.start_alter_table(table)
self.append("RENAME TO %s" % self.preparer.quote(table.new_name,
table.quote))
self.execute()
def visit_index(self, index):
"""Rename an index"""
if hasattr(self, '_validate_identifier'):
# SA <= 0.6.3
self.append("ALTER INDEX %s RENAME TO %s" % (
self.preparer.quote(
self._validate_identifier(
index.name, True), index.quote),
self.preparer.quote(
self._validate_identifier(
index.new_name, True), index.quote)))
else:
# SA >= 0.6.5
self.append("ALTER INDEX %s RENAME TO %s" % (
self.preparer.quote(
self._index_identifier(
index.name), index.quote),
self.preparer.quote(
self._index_identifier(
index.new_name), index.quote)))
self.execute()
def visit_column(self, delta):
"""Rename/change a column."""
# ALTER COLUMN is implemented as several ALTER statements
keys = delta.keys()
if 'type' in keys:
self._run_subvisit(delta, self._visit_column_type)
if 'nullable' in keys:
self._run_subvisit(delta, self._visit_column_nullable)
if 'server_default' in keys:
# Skip 'default': only handle server-side defaults, others
# are managed by the app, not the db.
self._run_subvisit(delta, self._visit_column_default)
if 'name' in keys:
self._run_subvisit(delta, self._visit_column_name, start_alter=False)
def _run_subvisit(self, delta, func, start_alter=True):
"""Runs visit method based on what needs to be changed on column"""
table = self._to_table(delta.table)
col_name = delta.current_name
if start_alter:
self.start_alter_column(table, col_name)
ret = func(table, delta.result_column, delta)
self.execute()
def start_alter_column(self, table, col_name):
"""Starts ALTER COLUMN"""
self.start_alter_table(table)
self.append("ALTER COLUMN %s " % self.preparer.quote(col_name, table.quote))
def _visit_column_nullable(self, table, column, delta):
nullable = delta['nullable']
if nullable:
self.append("DROP NOT NULL")
else:
self.append("SET NOT NULL")
def _visit_column_default(self, table, column, delta):
default_text = self.get_column_default_string(column)
if default_text is not None:
self.append("SET DEFAULT %s" % default_text)
else:
self.append("DROP DEFAULT")
def _visit_column_type(self, table, column, delta):
type_ = delta['type']
type_text = str(type_.compile(dialect=self.dialect))
self.append("TYPE %s" % type_text)
def _visit_column_name(self, table, column, delta):
self.start_alter_table(table)
col_name = self.preparer.quote(delta.current_name, table.quote)
new_name = self.preparer.format_column(delta.result_column)
self.append('RENAME COLUMN %s TO %s' % (col_name, new_name))
class ANSIConstraintCommon(AlterTableVisitor):
"""
Migrate's constraints require a separate creation function from
SA's: Migrate's constraints are created independently of a table;
SA's are created at the same time as the table.
"""
def get_constraint_name(self, cons):
"""Gets a name for the given constraint.
If the name is already set it will be used otherwise the
constraint's :meth:`autoname <migrate.changeset.constraint.ConstraintChangeset.autoname>`
method is used.
:param cons: constraint object
"""
if cons.name is not None:
ret = cons.name
else:
ret = cons.name = cons.autoname()
return self.preparer.quote(ret, cons.quote)
def visit_migrate_primary_key_constraint(self, *p, **k):
self._visit_constraint(*p, **k)
def visit_migrate_foreign_key_constraint(self, *p, **k):
self._visit_constraint(*p, **k)
def visit_migrate_check_constraint(self, *p, **k):
self._visit_constraint(*p, **k)
def visit_migrate_unique_constraint(self, *p, **k):
self._visit_constraint(*p, **k)
class ANSIConstraintGenerator(ANSIConstraintCommon, SchemaGenerator):
def _visit_constraint(self, constraint):
constraint.name = self.get_constraint_name(constraint)
self.append(self.process(AddConstraint(constraint)))
self.execute()
class ANSIConstraintDropper(ANSIConstraintCommon, SchemaDropper):
def _visit_constraint(self, constraint):
constraint.name = self.get_constraint_name(constraint)
self.append(self.process(DropConstraint(constraint, cascade=constraint.cascade)))
self.execute()
class ANSIDialect(DefaultDialect):
columngenerator = ANSIColumnGenerator
columndropper = ANSIColumnDropper
schemachanger = ANSISchemaChanger
constraintgenerator = ANSIConstraintGenerator
constraintdropper = ANSIConstraintDropper
|
SohKai/ChronoLogger
|
web/flask/lib/python2.7/site-packages/migrate/changeset/ansisql.py
|
Python
|
mit
| 10,694
|
[
"VisIt"
] |
9973ec7115304d222dfc7595345d035871e63505a6a02384c63316f2231a172c
|
import sys
import os.path
import scipy
import numpy
import partial_terms as pt
import kernels
from numpy.linalg.linalg import LinAlgError
from scg_adapted import SCG_adapted
D = 3
Q = 2
M = 10
N = {}
Y = {}
X_mu = {}
X_S = {}
Kmm = {}
Kmm_inv = {}
accumulated_statistics = {}
flat_global_statistics_bounds = {}
global_statistics_names = {}
fix_beta = {}
def main():
global N, Y, X_mu, X_S, flat_global_statistics_bounds, fix_beta, global_statistics_names
iterations = 20
# Load data
Y = numpy.concatenate((
numpy.genfromtxt('./easydata/inputs/easy_1', delimiter=','),
numpy.genfromtxt('./easydata/inputs/easy_2', delimiter=','),
numpy.genfromtxt('./easydata/inputs/easy_3', delimiter=','),
numpy.genfromtxt('./easydata/inputs/easy_4', delimiter=',')))
N = Y.shape[0]
# We have several differet possible initialisations for the embeddings
#X_mu = numpy.load(X_file)
#X_mu = PCA(Y_file, Q)
#X_mu = scipy.randn(N, Q)
X_S = numpy.clip(numpy.ones((N, Q)) * 0.5
+ 0.01 * scipy.randn(N, Q),
0.001, 1)
#X_S = numpy.zeros((N, Q))
X_mu = numpy.concatenate((
scipy.load('./easydata/embeddings/easy_1.embedding.npy'),
scipy.load('./easydata/embeddings/easy_2.embedding.npy'),
scipy.load('./easydata/embeddings/easy_3.embedding.npy'),
scipy.load('./easydata/embeddings/easy_4.embedding.npy')))
'''
X_S = numpy.concatenate((
scipy.load('./easydata/embeddings/easy_1.variance.npy'),
scipy.load('./easydata/embeddings/easy_2.variance.npy'),
scipy.load('./easydata/embeddings/easy_3.variance.npy'),
scipy.load('./easydata/embeddings/easy_4.variance.npy')))
'''
# Initialise the inducing points
Z = X_mu[numpy.random.permutation(N)[:M],:]
#Z = X_mu[:M,:]
Z += scipy.randn(M, Q) * 0.1
global_statistics_names = {
'Z' : (M, Q), 'sf2' : (1,1), 'alpha' : (1, Q), 'beta' : (1,1), 'X_mu' : (N, Q), 'X_S' : (N, Q)
}
# Initialise the global statistics
global_statistics = {
'Z' : Z, # see GPy models/bayesian_gplvm.py
'sf2' : numpy.array([[1.0]]), # see GPy kern/rbf.py
'alpha' : scipy.ones((1, Q)), # see GPy kern/rbf.py
'beta' : numpy.array([[1.0]]), # see GPy likelihood/gaussian.py
'X_Zmu' : X_mu,
'X_S' : X_S
}
# Initialise bounds for optimisation
global_statistics_bounds = {
'Z' : [(None, None) for i in range(M * Q)],
'sf2' : [(0, None)],
'alpha' : [(0, None) for i in range(Q)],
'beta' : [(0, None)],
'X_mu' : [(None, None) for i in range(N * Q)],
'X_S' : [(0, None) for i in range(N * Q)]
}
flat_global_statistics_bounds = []
for key, statistic in global_statistics_bounds.items():
flat_global_statistics_bounds = flat_global_statistics_bounds+statistic
'''
Run the optimiser
'''
x0 = flatten_global_statistics(global_statistics)
# Transform the positiv parameters to be in the range (-Inf, Inf)
x0 = numpy.array([transform_back(b, x) for b, x in zip(flat_global_statistics_bounds, x0)])
'''
SCG optimisation (adapted from GPy implementation to reduce function calls)
The number of iterations might be greater than max_f_eval
'''
#fix_beta = True
# We set the flag fixed_embeddings to true because we just need the globals (which now include the embeddings) optimised
x = SCG_adapted(likelihood_and_gradient, x0, './easydata/tmp/', fixed_embeddings=True, display=True, maxiters=iterations)
#fix_beta = False
#x = SC(likelihood_and_gradient, x[0], display=True, maxiters=iterations)
flat_array = x[0]
# Transform the parameters that have to be positive to be positive
flat_array_transformed = numpy.array([transform(b, x) for b, x in zip(flat_global_statistics_bounds, flat_array)])
global_statistics = rebuild_global_statistics(global_statistics_names, flat_array_transformed)
print 'Final global_statistics'
print global_statistics
'''
Likelihood and gradient functions
'''
def likelihood_and_gradient(flat_array, iteration=0, step_size=0):
global Kmm, Kmm_inv, accumulated_statistics, N, Y, flat_global_statistics_bounds, fix_beta, global_statistics_names
# Transform the parameters that have to be positive to be positive
flat_array_transformed = numpy.array([transform(b, x) for b, x in zip(flat_global_statistics_bounds, flat_array)])
global_statistics = rebuild_global_statistics(global_statistics_names, flat_array_transformed)
#print 'global_statistics'
#print global_statistics
Z = global_statistics['Z']
sf2 = float(global_statistics['sf2'])
beta = float(global_statistics['beta'])
alpha = numpy.squeeze(global_statistics['alpha'])
X_mu = global_statistics['X_mu']
X_S = global_statistics['X_S']
# We can calculate the global statistics once
kernel = kernels.rbf(Q, sf=sf2**0.5, ard=alpha**-0.5)
Kmm = kernel.K(Z)
Kmm_inv = numpy.linalg.inv(Kmm)
# Calculate partial statistics...
partial_terms = pt.partial_terms(Z, sf2, alpha, beta, M, Q, N, D, update_global_statistics=True)
partial_terms.set_data(Y, X_mu, X_S, is_set_statistics=True)
terms = partial_terms.get_local_statistics()
accumulated_statistics = {
'sum_YYT' : terms['sum_YYT'],
'sum_exp_K_ii' : terms['sum_exp_K_ii'],
'sum_exp_K_mi_K_im' : terms['sum_exp_K_mi_K_im'],
'sum_exp_K_miY' : terms['exp_K_miY'],
'sum_KL' : terms['KL'],
'sum_d_Kmm_d_Z' : partial_terms.dKmm_dZ(),
'sum_d_exp_K_miY_d_Z' : partial_terms.dexp_K_miY_dZ(),
'sum_d_exp_K_mi_K_im_d_Z' : partial_terms.dexp_K_mi_K_im_dZ(),
'sum_d_Kmm_d_alpha' : partial_terms.dKmm_dalpha(),
'sum_d_exp_K_miY_d_alpha' : partial_terms.dexp_K_miY_dalpha(),
'sum_d_exp_K_mi_K_im_d_alpha' : partial_terms.dexp_K_mi_K_im_dalpha(),
'sum_d_Kmm_d_sf2' : partial_terms.dKmm_dsf2(),
'sum_d_exp_K_ii_d_sf2' : partial_terms.dexp_K_ii_dsf2(),
'sum_d_exp_K_miY_d_sf2' : partial_terms.dexp_K_miY_dsf2(),
'sum_d_exp_K_mi_K_im_d_sf2' : partial_terms.dexp_K_mi_K_im_dsf2()
}
'''
Calculates global statistics such as F and partial derivatives of F
In the parallel code we create a new partial_terms object and
load the data into it. Here we use the previous one for debugging.
'''
partial_derivatives = {
'F' : partial_terms.logmarglik(),
'dF_dsum_exp_K_ii' : partial_terms.dF_dexp_K_ii(),
'dF_dsum_exp_K_miY' : partial_terms.dF_dexp_K_miY(),
'dF_dsum_exp_K_mi_K_im' : partial_terms.dF_dexp_K_mi_K_im(),
'dF_dKmm' : partial_terms.dF_dKmm()
}
'''
Evaluate the gradient for 'Z', 'sf2', 'alpha', and 'beta'
'''
grad_Z = partial_terms.grad_Z(partial_derivatives['dF_dKmm'],
accumulated_statistics['sum_d_Kmm_d_Z'],
partial_derivatives['dF_dsum_exp_K_miY'],
accumulated_statistics['sum_d_exp_K_miY_d_Z'],
partial_derivatives['dF_dsum_exp_K_mi_K_im'],
accumulated_statistics['sum_d_exp_K_mi_K_im_d_Z'])
grad_alpha = partial_terms.grad_alpha(partial_derivatives['dF_dKmm'],
accumulated_statistics['sum_d_Kmm_d_alpha'],
partial_derivatives['dF_dsum_exp_K_miY'],
accumulated_statistics['sum_d_exp_K_miY_d_alpha'],
partial_derivatives['dF_dsum_exp_K_mi_K_im'],
accumulated_statistics['sum_d_exp_K_mi_K_im_d_alpha'])
grad_sf2 = partial_terms.grad_sf2(partial_derivatives['dF_dKmm'],
accumulated_statistics['sum_d_Kmm_d_sf2'],
partial_derivatives['dF_dsum_exp_K_ii'],
accumulated_statistics['sum_d_exp_K_ii_d_sf2'],
partial_derivatives['dF_dsum_exp_K_miY'],
accumulated_statistics['sum_d_exp_K_miY_d_sf2'],
partial_derivatives['dF_dsum_exp_K_mi_K_im'],
accumulated_statistics['sum_d_exp_K_mi_K_im_d_sf2'])
if fix_beta:
grad_beta = numpy.zeros(1)
else:
grad_beta = partial_terms.grad_beta()
grad_X_mu = partial_terms.grad_X_mu()
grad_X_S = partial_terms.grad_X_S()
####################################################################################################################
# Debug comparison to GPy
####################################################################################################################
import GPy
gkern = GPy.kern.rbf(Q, global_statistics['sf2'].squeeze(), global_statistics['alpha'].squeeze()**-0.5, True)
gpy = GPy.models.BayesianGPLVM(GPy.likelihoods.Gaussian(Y, global_statistics['beta']**-1), Q, X_mu, X_S, num_inducing=M, Z=global_statistics['Z'], kernel=gkern)
GPy_lml = gpy.log_likelihood()
GPy_grad = gpy._log_likelihood_gradients()
dF_dmu = GPy_grad[0:(N * Q)].reshape(N, Q)
dF_ds = GPy_grad[(N * Q):2*(N * Q)].reshape(N, Q)
dF_dZ = GPy_grad[2*(N * Q):2*(N * Q)+(M*Q)].reshape(M, Q)
dF_dsigma2 = GPy_grad[2*(N * Q)+(M*Q)]
dF_dalpha = GPy_grad[2*(N * Q)+(M*Q)+1:2*(N * Q)+(M*Q)+3]
dF_dbeta = GPy_grad[2*(N * Q)+(M*Q)+3:]
dF_dmu2 = grad_X_mu
dF_ds2 = grad_X_S
dF_dZ2 = grad_Z
dF_dalpha2 = grad_alpha * -2 * global_statistics['alpha']**1.5
dF_dsigma22 = grad_sf2
dF_dbeta2 = grad_beta * -1 * global_statistics['beta']**2
if not numpy.sum(numpy.abs(dF_dmu - dF_dmu2)) < 10**-6:
print '1'
if not numpy.sum(numpy.abs(dF_dZ - dF_dZ2)) < 10**-6:
print '2'
if not numpy.sum(numpy.abs(dF_ds - dF_ds2)) < 10**-6:
print '3'
if not numpy.sum(numpy.abs(dF_dalpha - dF_dalpha2)) < 10**-6:
print '4'
if not numpy.sum(numpy.abs(dF_dsigma2 - dF_dsigma22)) < 10**-6:
print '5'
if not numpy.sum(numpy.abs(dF_dbeta - dF_dbeta2)) < 10**-6:
print '6'
if not numpy.abs(GPy_lml - partial_derivatives['F']) < 10**-6:
print '7'
#print 'gradient'
#print gradient
#gradient = {'Z' : dF_dZ,
# 'sf2' : dF_dsigma2,
# 'alpha' : dF_dalpha * -0.5 * global_statistics['alpha']**-1.5,
# 'beta' : dF_dbeta * -1 * global_statistics['beta']**-2,
# 'X_mu' : dF_dmu,
# 'X_S' : dF_ds}
#gradient = flatten_global_statistics(gradient)
#likelihood = GPy_lml
gradient = {'Z' : grad_Z,
'sf2' : grad_sf2,
'alpha' : grad_alpha,
'beta' : grad_beta,
'X_mu' : grad_X_mu,
'X_S' : grad_X_S}
gradient = flatten_global_statistics(gradient)
likelihood = partial_derivatives['F']
# Transform the gradient parameters that have to be positive by multiplying
# them by the gradeint of the transform f: g(f(x))' = g'(f(x))f'(x)
gradient = numpy.array([g * transform_grad(b, x) for b, x, g in zip(flat_global_statistics_bounds, flat_array, gradient)])
return -1 * likelihood, -1 * gradient
def PCA(Y_name, input_dim):
"""
Principal component analysis: maximum likelihood solution by SVD
Adapted from GPy.util.linalg
Arguments
---------
:param Y: NxD np.array of data
:param input_dim: int, dimension of projection
Returns
-------
:rval X: - Nxinput_dim np.array of dimensionality reduced data
W - input_dimxD mapping from X to Y
"""
Y = numpy.genfromtxt(Y_name, delimiter=',')
Z = numpy.linalg.svd(Y - Y.mean(axis=0), full_matrices=False)
[X, W] = [Z[0][:, 0:input_dim], numpy.dot(numpy.diag(Z[1]), Z[2]).T[:, 0:input_dim]]
v = X.std(axis=0)
X /= v;
W *= v;
return X
def flatten_global_statistics(global_statistics):
flat_array = numpy.array([])
for key, statistic in global_statistics.items():
flat_array = numpy.concatenate((flat_array, statistic.flatten()))
return flat_array
def rebuild_global_statistics(global_statistics_names, flat_array):
global_statistics = {}
start = 0
for key, shape in global_statistics_names.items():
size = shape[0] * shape[1]
global_statistics[key] = flat_array[start:start+size].reshape(shape)
start = start + size
return global_statistics
lim_val = -numpy.log(sys.float_info.epsilon)
# Transform a parameter to be in (0, inf) if the bound constraints it to be positive
def transform(b, x):
if b == (0, None):
if x > lim_val:
return x
elif x < -lim_val:
return numpy.log(1 + numpy.exp(-lim_val))
else:
return numpy.log(1 + numpy.exp(x))
elif b == (None, None):
return x
# Transform a parameter back to be in (-inf, inf) if the bound constraints it to be positive
def transform_back(b, x):
if b == (0, None):
if x > lim_val:
return x
elif x <= sys.float_info.epsilon:
return numpy.log(-1 + numpy.exp(sys.float_info.epsilon))
else:
return numpy.log(-1 + numpy.exp(x))
elif b == (None, None):
return x
# Gradient of the (0, inf) transform if the bound constraints it to be positive
def transform_grad(b, x):
if b == (0, None):
if x > lim_val:
return 1
elif x < -lim_val:
return numpy.exp(lim_val) / (numpy.exp(lim_val) + 1)
else:
return 1 / (numpy.exp(-x) + 1)
elif b == (None, None):
return 1
if __name__ == '__main__':
main()
|
markvdw/GParML
|
scg_adapted-example.py
|
Python
|
bsd-3-clause
| 13,706
|
[
"Gaussian"
] |
6fb70fdf0c180fcdc9691bb80980030f934411ff1ce31377c57dcd0d24358d10
|
#! /usr/bin/env python
from openturns import *
TESTPREAMBLE()
RandomGenerator().SetSeed(0)
aCollection = DistributionCollection(0);
R = CorrelationMatrix(2)
R[0, 1] = -0.99
aCollection.add( Normal([-1.0, 1.0], [1.0, 1.0], R) )
R[0, 1] = 0.99
aCollection.add( Normal([1.0, 1.0], [1.0, 1.0], R) )
# Instanciate one distribution object
distribution = Mixture(aCollection, NumericalPoint(aCollection.getSize(), 1.0))
# Create a mixture classifier
classifier = MixtureClassifier(distribution)
# Create local experts
experts = Basis()
experts.add(NumericalMathFunction("x", "-x"))
experts.add(NumericalMathFunction("x", "x"))
# Create a mixture of experts
moe = ExpertMixture(experts, classifier)
moeNMF = NumericalMathFunction(moe)
print "Mixture of experts=", moe
# Evaluate the mixture of experts on some points
for i in range(5):
p = NumericalPoint(1, -0.3 + 0.8 * i / 4.0)
print "moe ( %.6g )=" % p[0], moe(p)
print "moeNMF( %.6g )=" % p[0], moeNMF(p)
|
dbarbier/privot
|
python/test/t_ExpertMixture_std.py
|
Python
|
lgpl-3.0
| 975
|
[
"MOE"
] |
da6ccb60fcc96a5e1ab5a91824021fbd1c44d76a825b25fd888f5fe15842772c
|
#!/usr/bin/env python
"""
ugrid classes
set of classes for working with unstructured model grids
The "ugrid" class is the base class: it stores everything in memory
It can read from and write to netcdf files in the UGRID format.
It may be able to reference a netcdf file at some point, rather than storing
directly in memory.
NOTE: only tested for triangular and quad mesh grids at the moment.
"""
from __future__ import (absolute_import, division, print_function)
import hashlib
from collections import OrderedDict
import numpy as np
import gridded.pyugrid.read_netcdf as read_netcdf
from gridded.pyugrid.util import point_in_tri
from gridded.utilities import get_writable_dataset
# from gridded.pyugrid.uvar import UVar
# __all__ = ['UGrid',
# 'UVar']
# datatype used for indexes -- might want to change for 64 bit some day.
IND_DT = np.int32
NODE_DT = np.float64 # datatype used for node coordinates.
class UGrid(object):
"""
A basic class to hold an unstructured grid as defined in the UGrid convention.
The internal structure mirrors the netcdf data standard.
"""
def __init__(self,
nodes=None,
node_lon=None,
node_lat=None,
faces=None,
edges=None,
boundaries=None,
face_face_connectivity=None,
face_edge_connectivity=None,
edge_coordinates=None,
face_coordinates=None,
boundary_coordinates=None,
data=None,
mesh_name="mesh",
):
"""
ugrid class -- holds, saves, etc. an unstructured grid
:param nodes=None : the coordinates of the nodes
:type nodes: (NX2) array of floats
:param faces=None : the faces of the grid. Indexes for the nodes array.
:type faces: (NX3) array of integers
:param edges=None : the edges of the grid. Indexes for the nodes array.
:type edges: (NX2) array of integers
:param boundaries=None: specification of the boundaries are usually a
subset of edges where boundary condition
information, etc is stored.
(NX2) integer array of indexes for the nodes
array.
:type boundaries: numpy array of integers
:param face_face_connectivity=None: connectivity arrays.
:param face_edge_connectivity=None: connectivity arrays.
:param edge_coordinates=None: representative coordinate of the edges.
:param face_coordinates=None: representative coordinate of the faces.
:param boundary_coordinates=None: representative coordinate of the
boundaries.
:param edge_coordinates=None: representative coordinate of the edges
:type edge_coordinates: (NX2) array of floats
:param face_coordinates=None: representative coordinate of the faces
(NX2) float array
:type face_coordinates: (NX2) array of floats
:param boundary_coordinates=None: representative coordinate of the
boundaries
:type boundary_coordinates: (NX2) array of floats
:param data = None: associated variables
:type data: dict of UVar objects
:param mesh_name = "mesh": optional name for the mesh
:type mesh_name: string
Often this is too much data to pass in as literals -- so usually
specialized constructors will be used instead (load from file, etc).
The index variables faces can be a masked array. The mask is
used for so called flexible meshes. Flexible meshes contain
cells with varying number of nodes per face. See the flexible
mesh section in the convention for further details.
"""
if ((nodes is not None) and
((node_lon is not None) or
(node_lat is not None))):
raise TypeError("You need to provide a single nodes array "
"or node_lon and node_lat")
if nodes is None:
if node_lon is not None and node_lat is not None:
nodes = np.ma.column_stack((node_lon, node_lat))
self.nodes = nodes
self.faces = faces
self.edges = edges
self.boundaries = boundaries
self.face_face_connectivity = face_face_connectivity
self.face_edge_connectivity = face_edge_connectivity
self.edge_coordinates = edge_coordinates
self.face_coordinates = face_coordinates
self.boundary_coordinates = boundary_coordinates
self.mesh_name = mesh_name
# # the data associated with the grid
# # should be a dict of UVar objects
# self._data = {} # The data associated with the grid.
# if data is not None:
# for dataset in data.values():
# self.add_data(dataset)
# A kdtree is used to locate nodes.
# It will be created if/when it is needed.
self._kdtree = None
self._cell_tree = None
self._ind_memo_dict = OrderedDict()
self._alpha_memo_dict = OrderedDict()
@classmethod
def from_ncfile(klass, nc_url, mesh_name=None): # , load_data=False):
"""
create a UGrid object from a netcdf file name (or opendap url)
:param nc_url: the filename or OpenDap url you want to load
:param mesh_name=None: the name of the mesh you want. If None, then
you'll get the only mesh in the file. If there
is more than one mesh in the file, a ValueError
Will be raised
"""
grid = klass()
read_netcdf.load_grid_from_ncfilename(nc_url, grid, mesh_name) # , load_data)
return grid
@classmethod
def from_nc_dataset(klass, nc, mesh_name=None): # , load_data=False):
"""
create a UGrid object from a netcdf file (or opendap url)
:param nc: An already open Dataset object
:type nc: netCDF4.DataSet
:param mesh_name=None: the name of the mesh you want. If None, then
you'll get the only mesh in the file. If there
is more than one mesh in the file, a ValueError
Will be raised
# :param load_data=False: flag to indicate whether you want to load the
# associated data or not. The mesh will be
# loaded in any case. If False, only the mesh
# will be loaded. If True, then all the data
# associated with the mesh will be loaded.
# This could be huge!
# :type load_data: boolean
"""
grid = klass()
read_netcdf.load_grid_from_nc_dataset(nc, grid, mesh_name) # , load_data)
return grid
@property
def info(self):
"""
summary of information about the grid
"""
msg = ["UGrid object:"]
msg.append("Number of nodes: %i" % len(self.nodes))
msg.append("Number of faces: %i with %i vertices per face" %
(len(self.faces), self.num_vertices))
if self.boundaries is not None:
msg.append("Number of boundaries: %i" % len(self.boundaries))
# if self._data:
# msg.append("Variables: " + ", ".join([str(v) for v in self._data.keys()]))
return "\n".join(msg)
def check_consistent(self):
"""
Check if the various data is consistent: the edges and faces reference
existing nodes, etc.
"""
raise NotImplementedError
@property
def num_vertices(self):
"""
Maximum number of vertices in a face.
"""
if self._faces is None:
return None
else:
return self._faces.shape[1]
@property
def nodes(self):
return self._nodes
@property
def node_lon(self):
return self._nodes[:, 0]
@property
def node_lat(self):
return self._nodes[:, 1]
@nodes.setter
def nodes(self, nodes_coords):
# Room here to do consistency checking, etc.
# For now -- simply make sure it's a numpy array.
if nodes_coords is None:
self.nodes = np.zeros((0, 2), dtype=NODE_DT)
else:
self._nodes = np.asanyarray(nodes_coords, dtype=NODE_DT)
@nodes.deleter
def nodes(self):
# If there are no nodes, there can't be anything else.
self._nodes = np.zeros((0, 2), dtype=NODE_DT)
self._edges = None
self._faces = None
self._boundaries = None
@property
def faces(self):
return self._faces
@faces.setter
def faces(self, faces_indexes):
# Room here to do consistency checking, etc.
# For now -- simply make sure it's a numpy array.
if faces_indexes is not None:
self._faces = np.asanyarray(faces_indexes, dtype=IND_DT)
else:
self._faces = None
# Other things are no longer valid.
self._face_face_connectivity = None
self._face_edge_connectivity = None
@faces.deleter
def faces(self):
self._faces = None
self._faces = None
# Other things are no longer valid.
self._face_face_connectivity = None
self._face_edge_connectivity = None
self.edge_coordinates = None
@property
def edges(self):
return self._edges
@edges.setter
def edges(self, edges_indexes):
# Room here to do consistency checking, etc.
# For now -- simply make sure it's a numpy array.
if edges_indexes is not None:
self._edges = np.asanyarray(edges_indexes, dtype=IND_DT)
else:
self._edges = None
self._face_edge_connectivity = None
@edges.deleter
def edges(self):
self._edges = None
self._face_edge_connectivity = None
self.edge_coordinates = None
@property
def boundaries(self):
return self._boundaries
@boundaries.setter
def boundaries(self, boundaries_indexes):
# Room here to do consistency checking, etc.
# For now -- simply make sure it's a numpy array.
if boundaries_indexes is not None:
self._boundaries = np.asanyarray(boundaries_indexes, dtype=IND_DT)
else:
self._boundaries = None
@boundaries.deleter
def boundaries(self):
self._boundaries = None
self.boundary_coordinates = None
@property
def face_face_connectivity(self):
return self._face_face_connectivity
@face_face_connectivity.setter
def face_face_connectivity(self, face_face_connectivity):
# Add more checking?
if face_face_connectivity is not None:
face_face_connectivity = np.asanyarray(face_face_connectivity,
dtype=IND_DT)
if face_face_connectivity.shape != (len(self.faces),
self.num_vertices):
msg = ("face_face_connectivity must be size "
"(num_faces, {})").format
raise ValueError(msg(self.num_vertices))
self._face_face_connectivity = face_face_connectivity
@face_face_connectivity.deleter
def face_face_connectivity(self):
self._face_face_connectivity = None
@property
def face_edge_connectivity(self):
return self._face_edge_connectivity
@face_edge_connectivity.setter
def face_edge_connectivity(self, face_edge_connectivity):
# Add more checking?
if face_edge_connectivity is not None:
face_edge_connectivity = np.asanyarray(face_edge_connectivity,
dtype=IND_DT)
if face_edge_connectivity.shape != (len(self.faces),
self.num_vertices):
msg = ("face_face_connectivity must be size "
"(num_face, {})").format
raise ValueError(msg(self.num_vertices))
self._face_edge_connectivity = face_edge_connectivity
@face_edge_connectivity.deleter
def face_edge_connectivity(self):
self._face_edge_connectivity = None
# @property
# def data(self):
# """
# dict of data associated with the data arrays
# You can't set this -- must use UGrid.add_data().
# """
# return self._data
def infer_location(self, data):
"""
:param data:
:returns: 'nodes' if data will fit to the nodes,
'faces' if the data will fit to the faces,
'boundaries' if the data will fit the boundaries.
None otherwise.
If data is a netcdf variable, the "location" attribute is checked.
"""
# We should never be calling infer_locations if it was already defined
# try:
# loc = data.location
# if loc == "face":
# # FIXME: should we check the array size in this case?
# return "face"
# except AttributeError:
# pass # try checking array size
# # fixme: should use UGRID compliant nc_attributes if possible
try:
size = data.shape[-1]
except IndexError:
return None # Variable has a size-zero data array
if size == self.nodes.shape[0]:
return 'node'
if self.faces is not None and size == self.faces.shape[0]:
return 'face'
if self.boundaries is not None and size == self.boundaries.shape[0]:
return 'boundary'
return None
# def add_data(self, uvar):
# """
# Add a UVar to the data dict
# :param uvar: the UVar object to add.
# Its name will be the key in the data dict.
# :type uvar: a ugrid.UVar object
# Some sanity checking is done to make sure array sizes are correct.
# """
# # Size check:
# if uvar.location == 'node':
# if self.nodes is None:
# raise ValueError("adding data to nodes "
# "but nodes are None")
# if len(uvar.data) != len(self.nodes):
# raise ValueError("length of data array must match "
# "the number of nodes")
# elif uvar.location == 'edge':
# if self.edges is None:
# raise ValueError("adding data to edges "
# "but edges are None")
# if len(uvar.data) != len(self.edges):
# raise ValueError("length of data array must match "
# "the number of edges")
# elif uvar.location == 'face':
# if self.faces is None:
# raise ValueError("adding data to faces "
# "but faces are None")
# if len(uvar.data) != len(self.faces):
# raise ValueError("length of data array must match "
# "the number of faces")
# elif uvar.location == 'boundary':
# if self.boundaries is None:
# raise ValueError("adding data to boundaries "
# "but boundaries are None")
# if len(uvar.data) != len(self.boundaries):
# raise ValueError("length of data array must match "
# "the number of boundaries")
# else:
# msg = "Can't add data associated with '{}'".format
# raise ValueError(msg(uvar.location))
# self._data[uvar.name] = uvar
# def find_uvars(self, standard_name, location=None):
# """
# Find all :class:`.UVar` objects that match the specified standard name
# :param str standard_name: the standard name attribute.
# Based on the UGRID conventions.
# :keyword location: optional attribute location to narrow the returned
# :py:class:`UVar` objects
# (one of 'node', 'edge', 'face', or 'boundary').
# :return: set of matching :py:class:`UVar` objects
# """
# found = set()
# for ds in self._data.values():
# if not ds.attributes or 'standard_name' not in ds.attributes:
# continue
# if ds.attributes['standard_name'] == standard_name:
# if location is not None and ds.location != location:
# continue
# found.add(ds)
# return found
def locate_nodes(self, points):
"""
Returns the index of the closest nodes to the input locations.
:param points: the lons/lats of locations you want the nodes
closest to.
:type point: a (N, 2) ndarray of points
(or something that can be converted).
:returns: the index of the closest node.
"""
if self._kdtree is None:
self._build_kdtree()
node_inds = self._kdtree.query(points, k=1)[1]
return node_inds
def _build_kdtree(self):
# Only import if it's used.
try:
from scipy.spatial import cKDTree
except ImportError:
raise ImportError("The scipy package is required to use "
"UGrid.locate_nodes\n"
" -- nearest neighbor interpolation")
self._kdtree = cKDTree(self.nodes)
def _hash_of_pts(self, points):
"""
Returns a SHA1 hash of the array of points passed in
"""
return hashlib.sha1(points.tobytes()).hexdigest()
def _add_memo(self, points, item, D, _copy=False, _hash=None):
"""
:param points: List of points to be hashed.
:param item: Result of computation to be stored.
:param location: Name of grid on which computation was done.
:param D: Dict that will store hash -> item mapping.
:param _hash: If hash is already computed it may be passed in here.
"""
if _copy:
item = item.copy()
item.setflags(write=False)
if _hash is None:
_hash = self._hash_of_pts(points)
if D is not None:
D[_hash] = item
if len(D.keys()) > 6:
D.popitem(last=False)
D[_hash].setflags(write=False)
def _get_memoed(self, points, D, _copy=False, _hash=None):
if _hash is None:
_hash = self._hash_of_pts(points)
if (D is not None and _hash in D):
return D[_hash].copy() if _copy else D[_hash]
else:
return None
def locate_faces(self, points, method='celltree', _copy=False, _memo=True, _hash=None):
"""
Returns the face indices, one per point.
Points that are not in the mesh will have an index of -1
If a single point is passed in, a single index will be returned
If a sequence of points is passed in an array of indexes will be returned.
:param points: The points that you want to locate -- (lon, lat). If the shape of point
is 1D, function will return a scalar index. If it is 2D, it will return
a 1D array of indices
:type point: array-like containing one or more points: shape (2,) for one point, shape (N, 2)
for more than one point.
:param method='celltree': method to use. Options are 'celltree', 'simple'.
for 'celltree' the celltree2d pacakge must be installed:
https://github.com/NOAA-ORR-ERD/cell_tree2d/
'simple' is very, very slow for large grids.
:type simple: str
This version utilizes the CellTree data structure.
"""
points = np.asarray(points, dtype=np.float64)
just_one = (points.ndim == 1)
points.shape = (-1, 2)
if _memo:
if _hash is None:
_hash = self._hash_of_pts(points)
result = self._get_memoed(points, self._ind_memo_dict, _copy, _hash)
if result is not None:
return result
if method == 'celltree':
try:
import cell_tree2d
except ImportError:
raise ImportError("the cell_tree2d package must be installed to use the celltree search:\n"
"https://github.com/NOAA-ORR-ERD/cell_tree2d/")
if self._cell_tree is None:
self.build_celltree()
indices = self._cell_tree.locate(points)
elif method == 'simple':
indices = np.zeros((points.shape[0]), dtype=IND_DT)
for n, point in enumerate(points):
for i, face in enumerate(self._faces):
f = self._nodes[face]
if point_in_tri(f, point):
indices[n] = i
break
else:
indices[n] = -1
else:
raise ValueError('"method" must be one of: "celltree", "simple"')
if _memo:
self._add_memo(points, indices, self._ind_memo_dict, _copy, _hash)
if just_one:
return indices[0]
else:
return indices
def build_celltree(self):
"""
Tries to build the celltree for the current UGrid. Will fail if nodes
or faces is not defined.
"""
from cell_tree2d import CellTree
if self.nodes is None or self.faces is None:
raise ValueError(
"Nodes and faces must be defined in order to create and use CellTree")
self._cell_tree = CellTree(self.nodes, self.faces)
def interpolation_alphas(self, points, indices=None, _copy=False, _memo=True, _hash=None):
"""
Given an array of points, this function will return the bilinear interpolation alphas
for each of the three nodes of the face that the point is located in. If the point is
not located on the grid, the alphas are set to 0
:param points: Nx2 numpy array of lat/lon coordinates
:param indices: If the face indices of the points is already known, it can be passed
in to save repeating the effort.
:return: Nx3 numpy array of interpolation factors
TODO: mask the indices that aren't on the grid properly.
"""
if _memo:
if _hash is None:
_hash = self._hash_of_pts(points)
result = self._get_memoed(points, self._alpha_memo_dict, _copy, _hash)
if result is not None:
return result
if indices is None:
indices = self.locate_faces(points, 'celltree', _copy, _memo, _hash)
node_positions = self.nodes[self.faces[indices]]
(lon1, lon2, lon3) = node_positions[:, :, 0].T
(lat1, lat2, lat3) = node_positions[:, :, 1].T
reflats = points[:, 1]
reflons = points[:, 0]
denoms = (
(lat3 - lat1) * (lon2 - lon1) - (lon3 - lon1) * (lat2 - lat1))
# alphas should all add up to 1
alpha1s = (reflats - lat3) * (lon3 - lon2) - \
(reflons - lon3) * (lat3 - lat2)
alpha2s = (reflons - lon1) * (lat3 - lat1) - \
(reflats - lat1) * (lon3 - lon1)
alpha3s = (reflats - lat1) * (lon2 - lon1) - \
(reflons - lon1) * (lat2 - lat1)
alphas = np.column_stack(
(alpha1s / denoms, alpha2s / denoms, alpha3s / denoms))
alphas[indices == -1] *= 0
if _memo:
self._add_memo(points, alphas, self._alpha_memo_dict, _copy, _hash)
return alphas
def interpolate_var_to_points(self,
points,
variable,
location=None,
fill_value=0,
indices=None,
alphas=None,
slices=None,
_copy=False,
_memo=True,
_hash=None):
"""
Interpolates a variable on one of the grids to an array of points.
:param points: Nx2 Array of lon/lat coordinates to be interpolated to.
:param variable: Array-like of values to associate at location on grid
(node, center, edge1, edge2). This may be more than a
2-dimensional array, but you must pass 'slices' kwarg
with appropriate slice collection to reduce it to
2 dimensions.
:param location: One of ('node', 'center', 'edge1', 'edge2') 'edge1' is
conventionally associated with the 'vertical' edges and
likewise 'edge2' with the 'horizontal'
:param fill_value: If masked values are encountered in interpolation, this
value takes the place of the masked value
:param indices: If computed already, array of Nx2 cell indices can be passed
in to increase speed.
:param alphas: If computed already, array of alphas can be passed in to increase
speed.
With a numpy array:
sgrid.interpolate_var_to_points(points, sgrid.u[time_idx, depth_idx])
With a raw netCDF Variable:
sgrid.interpolate_var_to_points(points, nc.variables['u'], slices=[time_idx, depth_idx])
If you have pre-computed information, you can pass it in to avoid unnecessary
computation and increase performance.
- ind = # precomputed indices of points
- alphas = # precomputed alphas (useful if interpolating to the same points frequently)
"""
points = np.asarray(points, dtype=np.float64).reshape(-1, 2)
# location should be already known by the variable
if hasattr(variable, 'location'):
location = variable.location
# But if it's not, then it can be inferred
# (for compatibility with old code)
if location is None:
location = self.infer_location(variable)
variable.location = location
if location is None:
raise ValueError("Data is incompatible with grid nodes or faces")
if slices is not None:
if len(slices) == 1:
slices = slices[0]
variable = variable[slices]
_hash = self._hash_of_pts(points)
inds = self.locate_faces(points, 'celltree', _copy, _memo, _hash)
if location == 'face':
vals = variable[inds]
vals[inds == -1] = vals[inds == -1] * 0
return vals
# raise NotImplementedError("Currently does not support interpolation of a "
# "variable defined on the faces")
if location == 'node':
pos_alphas = self.interpolation_alphas(points, inds, _copy, _memo, _hash)
vals = variable[self.faces[inds]]
vals[inds == -1] = vals[inds == -1] * 0
return np.sum(vals * pos_alphas, axis=1)
return None
interpolate = interpolate_var_to_points
def build_face_face_connectivity(self):
"""
Builds the face_face_connectivity array: giving the neighbors of each cell.
Note: arbitrary order and CW vs CCW may not be consistent.
"""
num_vertices = self.num_vertices
num_faces = self.faces.shape[0]
face_face = np.zeros((num_faces, num_vertices), dtype=IND_DT)
face_face += -1 # Fill with -1.
# Loop through all the faces to find the matching edges:
edges = {} # dict to store the edges.
for i, face in enumerate(self.faces):
# Loop through edges of the cell:
for j in range(num_vertices):
if j < self.num_vertices - 1:
edge = (face[j], face[j + 1])
else:
edge = (face[-1], face[0])
if edge[0] > edge[1]: # Sort the node numbers.
edge = (edge[1], edge[0])
# see if it is already in there
prev_edge = edges.pop(edge, None)
if prev_edge is not None:
face_num, edge_num = prev_edge
face_face[i, j] = face_num
face_face[face_num, edge_num] = i
else:
edges[edge] = (i, j) # face num, edge_num.
self._face_face_connectivity = face_face
def get_lines(self):
if self.edges is None:
self.build_edges()
return self.nodes[self.edges]
def build_edges(self):
"""
Builds the edges array: all the edges defined by the faces
This will replace the existing edge array, if there is one.
NOTE: arbitrary order -- should the order be preserved?
"""
num_vertices = self.num_vertices
if self.faces is None:
# No faces means no edges
self._edges = None
return
num_faces = self.faces.shape[0]
face_face = np.zeros((num_faces, num_vertices), dtype=IND_DT)
face_face += -1 # Fill with -1.
# Loop through all the faces to find all the edges:
edges = set() # Use a set so no duplicates.
for i, face in enumerate(self.faces):
# Loop through edges:
for j in range(num_vertices):
edge = (face[j - 1], face[j])
if edge[0] > edge[1]: # Flip them
edge = (edge[1], edge[0])
edges.add(edge)
self._edges = np.array(list(edges), dtype=IND_DT)
def build_boundaries(self):
"""
Builds the boundary segments from the cell array.
It is assumed that -1 means no neighbor, which indicates a boundary
This will over-write the existing boundaries array if there is one.
This is a not-very-smart just loop through all the faces method.
"""
boundaries = []
for i, face in enumerate(self.face_face_connectivity):
for j, neighbor in enumerate(face):
if neighbor == -1:
if j == self.num_vertices - 1:
bound = (self.faces[i, -1], self.faces[i, 0])
else:
bound = (self.faces[i, j], self.faces[i, j + 1])
boundaries.append(bound)
self.boundaries = boundaries
def build_face_edge_connectivity(self):
"""
Builds the face-edge connectivity array
Not implemented yet.
"""
try:
from scipy.spatial import cKDTree
except ImportError:
raise ImportError("The scipy package is required to use "
"UGrid.locatbuild_face_edge_connectivity")
faces = self.faces
edges = self.edges.copy()
face_edges = np.dstack([faces, np.roll(faces, 1, 1)])
if np.ma.isMA(faces) and np.ndim(faces.mask):
face_edges.mask = np.dstack([
faces.mask, np.roll(faces.mask, 1, 1)
])
face_edges.sort(axis=-1)
edges.sort(axis=-1)
tree = cKDTree(edges)
face_edge_2d = face_edges.reshape((-1, 2))
if np.ma.isMA(faces) and faces.mask.any():
mask = face_edge_2d.mask.any(-1)
connectivity = np.ma.ones(
len(face_edge_2d), dtype=face_edge_2d.dtype,
)
connectivity.mask = mask
connectivity[~mask] = tree.query(face_edge_2d[~mask])[1]
else:
connectivity = tree.query(face_edge_2d)[1]
self.face_edge_connectivity = np.roll(
connectivity.reshape(faces.shape), -1, -1
)
def build_face_coordinates(self):
"""
Builds the face_coordinates array, using the average of the
nodes defining each face.
Note that you may want a different definition of the face
coordinates than this computes, but this is here to have
an easy default.
This will write-over an existing face_coordinates array.
Useful if you want this in the output file.
"""
self.face_coordinates = self.nodes[self.faces].mean(axis=1)
def build_edge_coordinates(self):
"""
Builds the edge_coordinates array, using the average of the
nodes defining each edge.
Note that you may want a different definition of the edge
coordinates than this computes, but this is here to have
an easy default.
This will write-over an existing edge_coordinates array
Useful if you want this in the output file
"""
self.edge_coordinates = self.nodes[self.edges].mean(axis=1)
def build_boundary_coordinates(self):
"""
Builds the boundary_coordinates array, using the average of the
nodes defining each boundary segment.
Note that you may want a different definition of the boundary
coordinates than this computes, but this is here to have
an easy default.
This will write-over an existing face_coordinates array
Useful if you want this in the output file
"""
self.boundary_coordinates = self.nodes[self.boundaries].mean(axis=1)
def save_as_netcdf(self, filename, format='netcdf4'):
"""
save the dataset to a file
:param filename: full path to file to save to.
:param format: format to save -- 'netcdf3' or 'netcdf4'
are the only options at this point.
"""
self.save(filename, format='netcdf4')
def save(self, filepath, format='netcdf4', variables={}):
"""
Save the ugrid object as a netcdf file.
:param filepath: path to file you want o save to. An existing one
will be clobbered if it already exists.
:param variables: dict of gridded.Variable objects to save to file
Follows the convention established by the netcdf UGRID working group:
http://ugrid-conventions.github.io/ugrid-conventions
NOTE: Variables are saved here, because different conventions do it
differently.
"""
format_options = ('netcdf3', 'netcdf4')
if format not in format_options:
raise ValueError("format: {} not supported. Options are: {}".format(format, format_options))
mesh_name = self.mesh_name
nclocal = get_writable_dataset(filepath)
nclocal.createDimension(mesh_name + "_num_node", len(self.nodes))
if self._edges is not None:
nclocal.createDimension(
mesh_name + "_num_edge", len(self._edges))
if self._boundaries is not None:
nclocal.createDimension(mesh_name + "_num_boundary",
len(self._boundaries))
if self._faces is not None:
nclocal.createDimension(
mesh_name + "_num_face", len(self._faces))
nclocal.createDimension(mesh_name + "_num_vertices",
self._faces.shape[1])
nclocal.createDimension("two", 2)
# mesh topology
mesh = nclocal.createVariable(mesh_name, IND_DT, (),)
mesh.cf_role = "mesh_topology"
mesh.long_name = "Topology data of 2D unstructured mesh"
mesh.topology_dimension = 2
mesh.node_coordinates = "{0}_node_lon {0}_node_lat".format(mesh_name) # noqa
if self._edges is not None:
# Attribute required if variables will be defined on edges.
mesh.edge_node_connectivity = mesh_name + "_edge_nodes"
if self.edge_coordinates is not None:
# Optional attribute (requires edge_node_connectivity).
coord = "{0}_edge_lon {0}_edge_lat".format
mesh.edge_coordinates = coord(mesh_name)
if self._faces is not None:
mesh.face_node_connectivity = mesh_name + "_face_nodes"
if self.face_coordinates is not None:
# Optional attribute.
coord = "{0}_face_lon {0}_face_lat".format
mesh.face_coordinates = coord(mesh_name)
if self.face_edge_connectivity is not None:
# Optional attribute (requires edge_node_connectivity).
mesh.face_edge_connectivity = mesh_name + "_face_edges"
if self.face_face_connectivity is not None:
# Optional attribute.
mesh.face_face_connectivity = mesh_name + "_face_links"
if self._boundaries is not None:
mesh.boundary_node_connectivity = mesh_name + "_boundary_nodes"
# FIXME: This could be re-factored to be more generic, rather than
# separate for each type of data see the coordinates example below.
if self._faces is not None:
nc_create_var = nclocal.createVariable
face_nodes = nc_create_var(mesh_name + "_face_nodes", IND_DT,
(mesh_name + '_num_face',
mesh_name + '_num_vertices'),)
face_nodes[:] = self.faces
face_nodes.cf_role = "face_node_connectivity"
face_nodes.long_name = ("Maps every triangular face to "
"its three corner nodes.")
face_nodes.start_index = IND_DT(0)
if self._edges is not None:
nc_create_var = nclocal.createVariable
edge_nodes = nc_create_var(mesh_name + "_edge_nodes", IND_DT,
(mesh_name + '_num_edge', 'two'),)
edge_nodes[:] = self.edges
edge_nodes.cf_role = "edge_node_connectivity"
edge_nodes.long_name = ("Maps every edge to the two "
"nodes that it connects.")
edge_nodes.start_index = IND_DT(0)
if self._boundaries is not None:
nc_create_var = nclocal.createVariable
boundary_nodes = nc_create_var(mesh_name + "_boundary_nodes",
IND_DT,
(mesh_name + '_num_boundary',
'two'),)
boundary_nodes[:] = self.boundaries
boundary_nodes.cf_role = "boundary_node_connectivity"
boundary_nodes.long_name = ("Maps every boundary segment to "
"the two nodes that it connects.")
boundary_nodes.start_index = IND_DT(0)
# Optional "coordinate variables."
for location in ['face', 'edge', 'boundary']:
loc = "{0}_coordinates".format(location)
if getattr(self, loc) is not None:
for axis, ind in [('lat', 1), ('lon', 0)]:
nc_create_var = nclocal.createVariable
name = "{0}_{1}_{2}".format(mesh_name, location, axis)
dimensions = "{0}_num_{1}".format(mesh_name, location)
var = nc_create_var(name, NODE_DT,
dimensions=(dimensions),)
loc = "{0}_coordinates".format(location)
var[:] = getattr(self, loc)[:, ind]
# Attributes of the variable.
var.standard_name = ("longitude" if axis == 'lon'
else 'latitude')
var.units = ("degrees_east" if axis == 'lon'
else 'degrees_north')
name = "Characteristics {0} of 2D mesh {1}".format
var.long_name = name(var.standard_name, location)
# The node data.
node_lon = nclocal.createVariable(mesh_name + '_node_lon',
self._nodes.dtype,
(mesh_name + '_num_node',),
chunksizes=(len(self.nodes),),
# zlib=False,
# complevel=0,
)
node_lon[:] = self.nodes[:, 0]
node_lon.standard_name = "longitude"
node_lon.long_name = "Longitude of 2D mesh nodes."
node_lon.units = "degrees_east"
node_lat = nclocal.createVariable(mesh_name + '_node_lat',
self._nodes.dtype,
(mesh_name + '_num_node',),
chunksizes=(len(self.nodes),),
# zlib=False,
# complevel=0,
)
node_lat[:] = self.nodes[:, 1]
node_lat.standard_name = "latitude"
node_lat.long_name = "Latitude of 2D mesh nodes."
node_lat.units = "degrees_north"
self._save_variables(nclocal, variables)
nclocal.sync()
return nclocal
def _save_variables(self, nclocal, variables):
"""
Save the Variables
"""
mesh_name = self.mesh_name
for name, var in variables.items():
if var.location == 'node':
shape = (mesh_name + '_num_node',)
coordinates = "{0}_node_lon {0}_node_lat".format(mesh_name)
chunksizes = (len(self.nodes),)
elif var.location == 'face':
shape = (mesh_name + '_num_face',)
coord = "{0}_face_lon {0}_face_lat".format
coordinates = (coord(mesh_name) if self.face_coordinates
is not None else None)
chunksizes = (len(self.faces),)
elif var.location == 'edge':
shape = (mesh_name + '_num_edge',)
coord = "{0}_edge_lon {0}_edge_lat".format
coordinates = (coord(mesh_name) if self.edge_coordinates
is not None else None)
chunksizes = (len(self.edges),)
elif var.location == 'boundary':
shape = (mesh_name + '_num_boundary',)
coord = "{0}_boundary_lon {0}_boundary_lat".format
bcoord = self.boundary_coordinates
coordinates = (coord(mesh_name) if bcoord
is not None else None)
chunksizes = (len(self.boundaries),)
else:
raise ValueError("I don't know how to save a variable located on: {}".format(var.location))
print("Saving:", var)
print("name is:", var.name)
print("var data is:", var.data)
print("var data shape is:", var.data.shape)
data_var = nclocal.createVariable(var.name,
var.data.dtype,
shape,
chunksizes=chunksizes,
# zlib=False,
# complevel=0,
)
print("new dat var shape:", shape)
data_var[:] = var.data[:]
# Add the standard attributes:
data_var.location = var.location
data_var.mesh = mesh_name
if coordinates is not None:
data_var.coordinates = coordinates
# Add the extra attributes.
for att_name, att_value in var.attributes.items():
setattr(data_var, att_name, att_value)
|
NOAA-ORR-ERD/gridded
|
gridded/pyugrid/ugrid.py
|
Python
|
unlicense
| 44,524
|
[
"NetCDF"
] |
ba439e4bcdcccc6fcc28df7f58d14eb09da4cf9e0843d5c94d9e0666e7419ca0
|
# This is a Command Line SWD,BITS PILANI Searcher (Python)
# Make sure you are connected on BITS,Pilani LAN.
# Make Sure you are using LINUX
# Run the Python FIle from the Terminal as : python swd_query.py
from splinter import Browser
import os,splinter
def return_results(browser) :
# Find and click the 'Search' button
button = browser.find_by_name('searchBtn')
button.click()
#Handle the Output
results = browser.find_by_id('searchResultGridView')
return results
sname = str(raw_input("Name or Part of Name to search(May Be Empty) : "))
idno = str(raw_input("Id No to Search : (May Be Empty) : "))
with Browser('firefox') as browser:
# Visit URL
try :
url = "http://swd/StudentSearch.aspx"
browser.visit(url)
except :
url = "http://www.bits-pilani.ac.in:12349/StudentSearch.aspx"
browser.visit(url)
browser.fill('idnoTxt',idno)
browser.fill('nameTxt',sname)
try :
results = return_results(browser)
print results[0].value
except splinter.exceptions.ElementDoesNotExist :
try :
browser.fill('idnoTxt',idno)
browser.fill('nameTxt',sname.split(" ")[0])
results = return_results(browser)
print results[0].value
except splinter.exceptions.ElementDoesNotExist :
try :
browser.fill('idnoTxt',idno)
browser.fill('nameTxt',sname.split(" ")[1])
results = return_results(browser)
print results[0].value
except splinter.exceptions.ElementDoesNotExist :
print "No Such Search Result Found"
except :
print "ERROR Occured !! \nPlease Try Again !!"
except :
print "ERROR Occured !! \nPlease Try Again !!"
except :
print "ERROR Occured !! \nPlease Try Again !!"
#The Following Code should be uncommented if you want the Output in a HTML File
#f=open('query_results.html',"w");
#f.write("<html><table>"+results.html+"</table></html>")
#f.close()
#Output the Results
#os.system("firefox query_results.html")
|
devenbansod/SWD-Query
|
swd-query.py
|
Python
|
gpl-2.0
| 2,302
|
[
"VisIt"
] |
d0273637c753c551fabea703d6ecff71cfe0f9b767afdfadc444813c8450006e
|
"""annotate fusion outputs from STAR and Tophat
Supported:
oncofuse: http://www.unav.es/genetica/oncofuse.html
github: https://github.com/mikessh/oncofuse
"""
from __future__ import print_function
import os
import pysam
from bcbio.utils import file_exists
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.provenance import do
# ## oncofuse fusion trancript detection
def run(data):
#cmd line: java -Xmx1G -jar Oncofuse.jar input_file input_type tissue_type output_file
config = data["config"]
genome_build = data.get("genome_build", "")
input_type, input_dir, input_file = _get_input_para(data)
if genome_build == "GRCh37": # assume genome_build is hg19 otherwise
if config["algorithm"].get("aligner") in ["star"]:
input_file = _fix_star_junction_output(input_file)
if config["algorithm"].get("aligner") in ["tophat", "tophat2"]:
input_file = _fix_tophat_junction_output(input_file)
elif "hg19" not in genome_build:
return None
#handle cases when fusion file doesn't exist
if not file_exists(input_file):
return None
out_file = os.path.join(input_dir, "oncofuse_out.txt")
if file_exists(out_file):
return out_file
oncofuse_jar = config_utils.get_jar("Oncofuse",
config_utils.get_program("oncofuse", config, "dir"))
tissue_type = _oncofuse_tissue_arg_from_config(data)
resources = config_utils.get_resources("oncofuse", config)
if not file_exists(out_file):
cl = ["java"]
cl += resources.get("jvm_opts", ["-Xms750m", "-Xmx5g"])
with file_transaction(data, out_file) as tx_out_file:
cl += ["-jar", oncofuse_jar, input_file, input_type, tissue_type, tx_out_file]
cmd = " ".join(cl)
try:
do.run(cmd, "oncofuse fusion detection", data)
except:
do.run("touch %s && echo '# failed' >> %s" % (tx_out_file, tx_out_file), "oncofuse failed", data)
#return out_file
return out_file
def is_non_zero_file(fpath):
return True if os.path.isfile(fpath) and os.path.getsize(fpath) > 0 else False
def _get_input_para(data):
TOPHAT_FUSION_OUTFILE = "fusions.out"
STAR_FUSION_OUTFILE = "Chimeric.out.junction"
config = data["config"]
is_disambiguate = len(config["algorithm"].get("disambiguate", [])) > 0
aligner = config["algorithm"].get("aligner")
if aligner == "tophat2":
aligner = "tophat"
names = data["rgnames"]
# set some default hard filters:
N = 2 # min. spanning reads
M = 4 # min. supporting reads (spanning + encompassing)
align_dir_parts = os.path.join(data["dirs"]["work"], "align", names["sample"])
align_dir_parts = os.path.join(align_dir_parts, data["genome_build"]) if is_disambiguate else align_dir_parts
if aligner in ["tophat", "tophat2"]:
align_dir_parts = os.path.join(data["dirs"]["work"], "align", names["sample"], names["lane"]+"_%s" % aligner)
return "tophat-%d-%d" % (N,M), align_dir_parts, os.path.join(align_dir_parts, TOPHAT_FUSION_OUTFILE)
if aligner in ["star"]:
star_junction_file = os.path.join(align_dir_parts, names["lane"]+STAR_FUSION_OUTFILE)
if is_disambiguate:
contamination_bam = data["disambiguate"][ config["algorithm"]["disambiguate"][0] ]
disambig_out_file = star_junction_file + "_disambiguated"
if file_exists(disambig_out_file):
star_junction_file = disambig_out_file
elif file_exists(star_junction_file) and file_exists(contamination_bam):
star_junction_file = _disambiguate_star_fusion_junctions(star_junction_file, contamination_bam,
disambig_out_file, data)
return "rnastar-%d-%d" % (N,M), align_dir_parts, star_junction_file
return None
def _fix_tophat_junction_output(chimeric_out_junction_file):
#for fusion.out
out_file = chimeric_out_junction_file + ".hg19"
with open(out_file, "w") as out_handle:
with open(chimeric_out_junction_file, "r") as in_handle:
for line in in_handle:
parts = line.split("\t")
left, right = parts[0].split("-")
leftchr = _h37tohg19(left)
rightchr = _h37tohg19(right)
if not leftchr or not rightchr:
continue
parts[0] = "%s-%s" % (_h37tohg19(left), _h37tohg19(right))
out_handle.write("\t".join(parts))
return out_file
def _fix_star_junction_output(chimeric_out_junction_file):
#for Chimeric.out.junction
out_file = chimeric_out_junction_file + ".hg19"
with open(out_file, "w") as out_handle:
with open(chimeric_out_junction_file, "r") as in_handle:
for line in in_handle:
parts = line.split("\t")
parts[0] = _h37tohg19(parts[0])
parts[3] = _h37tohg19(parts[3])
if not parts[0] or not parts[3]:
continue
out_handle.write("\t".join(parts))
return out_file
def _h37tohg19(chromosome):
MAX_CHROMOSOMES = 23
if chromosome in [str(x) for x in range(1, MAX_CHROMOSOMES)] + ["X", "Y"]:
new_chrom = "chr%s" % chromosome
elif chromosome == "MT":
new_chrom = "chrM"
# not a supported chromosome
else:
return None
return new_chrom
def _oncofuse_tissue_arg_from_config(data):
"""Retrieve oncofuse arguments supplied through input configuration.
tissue_type is the library argument, which tells Oncofuse to use its
own pre-built gene expression libraries. There are four pre-built
libraries, corresponding to the four supported tissue types:
EPI (epithelial origin),
HEM (hematological origin),
MES (mesenchymal origin) and
AVG (average expression, if tissue source is unknown).
"""
SUPPORTED_TISSUE_TYPE = ["EPI", "HEM", "MES", "AVG"]
if data.get("metadata", {}).get("tissue") in SUPPORTED_TISSUE_TYPE:
return data.get("metadata", {}).get("tissue")
else:
return "AVG"
def _disambiguate_star_fusion_junctions(star_junction_file, contamination_bam, disambig_out_file, data):
""" Disambiguate detected fusions based on alignments to another species.
"""
out_file = disambig_out_file
fusiondict = {}
for my_line in open(star_junction_file, "r"):
my_line_split = my_line.strip().split("\t")
if len(my_line_split) < 10:
continue
fusiondict[my_line_split[9]] = my_line.strip("\n")
samfile = pysam.Samfile(contamination_bam, "rb")
for my_read in samfile:
if 0x4 & my_read.flag or my_read.is_secondary: # flag 0x4 means unaligned
continue
if my_read.qname in fusiondict:
fusiondict.pop(my_read.qname)
with file_transaction(data, out_file) as tx_out_file:
myhandle = open(tx_out_file, 'w')
for my_key in fusiondict:
print(fusiondict[my_key], file=myhandle)
return out_file
|
guillermo-carrasco/bcbio-nextgen
|
bcbio/rnaseq/oncofuse.py
|
Python
|
mit
| 7,201
|
[
"pysam"
] |
a4a355fbc7d500dadf7f191b5ef2af84dd1aa3bef945d4fb7bdb5c873590e41f
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
"""
Miscellaneous tools used by OpenERP.
"""
import cProfile
import collections
import datetime
import hmac as hmac_lib
import hashlib
import io
import os
import pickle as pickle_
import re
import socket
import subprocess
import sys
import threading
import time
import traceback
import types
import unicodedata
import zipfile
from collections import OrderedDict
from collections.abc import Iterable, Mapping, MutableMapping, MutableSet
from contextlib import contextmanager
from difflib import HtmlDiff
from functools import wraps
from itertools import islice, groupby as itergroupby
from operator import itemgetter
import babel
import babel.dates
import passlib.utils
import pytz
import werkzeug.utils
from lxml import etree
import odoo
import odoo.addons
# get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are moved to loglevels until we refactor tools.
from odoo.loglevels import get_encodings, ustr, exception_to_unicode # noqa
from . import pycompat
from .cache import *
from .config import config
from .parse_version import parse_version
from .which import which
_logger = logging.getLogger(__name__)
# List of etree._Element subclasses that we choose to ignore when parsing XML.
# We include the *Base ones just in case, currently they seem to be subclasses of the _* ones.
SKIPPED_ELEMENT_TYPES = (etree._Comment, etree._ProcessingInstruction, etree.CommentBase, etree.PIBase, etree._Entity)
# Configure default global parser
etree.set_default_parser(etree.XMLParser(resolve_entities=False))
#----------------------------------------------------------
# Subprocesses
#----------------------------------------------------------
def find_in_path(name):
path = os.environ.get('PATH', os.defpath).split(os.pathsep)
if config.get('bin_path') and config['bin_path'] != 'None':
path.append(config['bin_path'])
return which(name, path=os.pathsep.join(path))
def _exec_pipe(prog, args, env=None):
cmd = (prog,) + args
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
close_fds = os.name=="posix"
pop = subprocess.Popen(cmd, bufsize=-1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=close_fds, env=env)
return pop.stdin, pop.stdout
def exec_command_pipe(name, *args):
prog = find_in_path(name)
if not prog:
raise Exception('Command `%s` not found.' % name)
return _exec_pipe(prog, args)
#----------------------------------------------------------
# Postgres subprocesses
#----------------------------------------------------------
def find_pg_tool(name):
path = None
if config['pg_path'] and config['pg_path'] != 'None':
path = config['pg_path']
try:
return which(name, path=path)
except IOError:
raise Exception('Command `%s` not found.' % name)
def exec_pg_environ():
"""
Force the database PostgreSQL environment variables to the database
configuration of Odoo.
Note: On systems where pg_restore/pg_dump require an explicit password
(i.e. on Windows where TCP sockets are used), it is necessary to pass the
postgres user password in the PGPASSWORD environment variable or in a
special .pgpass file.
See also http://www.postgresql.org/docs/8.4/static/libpq-envars.html
"""
env = os.environ.copy()
if odoo.tools.config['db_host']:
env['PGHOST'] = odoo.tools.config['db_host']
if odoo.tools.config['db_port']:
env['PGPORT'] = str(odoo.tools.config['db_port'])
if odoo.tools.config['db_user']:
env['PGUSER'] = odoo.tools.config['db_user']
if odoo.tools.config['db_password']:
env['PGPASSWORD'] = odoo.tools.config['db_password']
return env
def exec_pg_command(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
with open(os.devnull) as dn:
args2 = (prog,) + args
rc = subprocess.call(args2, env=env, stdout=dn, stderr=subprocess.STDOUT)
if rc:
raise Exception('Postgres subprocess %s error %s' % (args2, rc))
def exec_pg_command_pipe(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
return _exec_pipe(prog, args, env)
#----------------------------------------------------------
# File paths
#----------------------------------------------------------
#file_path_root = os.getcwd()
#file_path_addons = os.path.join(file_path_root, 'addons')
def file_open(name, mode="r", subdir='addons', pathinfo=False):
"""Open a file from the OpenERP root, using a subdir folder.
Example::
>>> file_open('hr/report/timesheer.xsl')
>>> file_open('addons/hr/report/timesheet.xsl')
@param name name of the file
@param mode file open mode
@param subdir subdirectory
@param pathinfo if True returns tuple (fileobject, filepath)
@return fileobject if pathinfo is False else (fileobject, filepath)
"""
adps = odoo.addons.__path__
rtp = os.path.normcase(os.path.abspath(config['root_path']))
basename = name
if os.path.isabs(name):
# It is an absolute path
# Is it below 'addons_path' or 'root_path'?
name = os.path.normcase(os.path.normpath(name))
for root in adps + [rtp]:
root = os.path.normcase(os.path.normpath(root)) + os.sep
if name.startswith(root):
base = root.rstrip(os.sep)
name = name[len(base) + 1:]
break
else:
# It is outside the OpenERP root: skip zipfile lookup.
base, name = os.path.split(name)
return _fileopen(name, mode=mode, basedir=base, pathinfo=pathinfo, basename=basename)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
elif subdir:
name = os.path.join(subdir, name)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
else:
name2 = name
# First, try to locate in addons_path
if subdir:
for adp in adps:
try:
return _fileopen(name2, mode=mode, basedir=adp,
pathinfo=pathinfo, basename=basename)
except IOError:
pass
# Second, try to locate in root_path
return _fileopen(name, mode=mode, basedir=rtp, pathinfo=pathinfo, basename=basename)
def _fileopen(path, mode, basedir, pathinfo, basename=None):
name = os.path.normpath(os.path.normcase(os.path.join(basedir, path)))
paths = odoo.addons.__path__ + [config['root_path']]
for addons_path in paths:
addons_path = os.path.normpath(os.path.normcase(addons_path)) + os.sep
if name.startswith(addons_path):
break
else:
raise ValueError("Unknown path: %s" % name)
if basename is None:
basename = name
# Give higher priority to module directories, which is
# a more common case than zipped modules.
if os.path.isfile(name):
if 'b' in mode:
fo = open(name, mode)
else:
fo = io.open(name, mode, encoding='utf-8')
if pathinfo:
return fo, name
return fo
# Support for loading modules in zipped form.
# This will not work for zipped modules that are sitting
# outside of known addons paths.
head = os.path.normpath(path)
zipname = False
while os.sep in head:
head, tail = os.path.split(head)
if not tail:
break
if zipname:
zipname = os.path.join(tail, zipname)
else:
zipname = tail
zpath = os.path.join(basedir, head + '.zip')
if zipfile.is_zipfile(zpath):
zfile = zipfile.ZipFile(zpath)
try:
fo = io.BytesIO()
fo.write(zfile.read(os.path.join(
os.path.basename(head), zipname).replace(
os.sep, '/')))
fo.seek(0)
if pathinfo:
return fo, name
return fo
except Exception:
pass
# Not found
if name.endswith('.rml'):
raise IOError('Report %r does not exist or has been deleted' % basename)
raise IOError('File not found: %s' % basename)
#----------------------------------------------------------
# iterables
#----------------------------------------------------------
def flatten(list):
"""Flatten a list of elements into a unique list
Author: Christophe Simonis (christophe@tinyerp.com)
Examples::
>>> flatten(['a'])
['a']
>>> flatten('b')
['b']
>>> flatten( [] )
[]
>>> flatten( [[], [[]]] )
[]
>>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] )
['a', 'b', 'c', 'd', 'e', 'f']
>>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]])
>>> flatten(t)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
"""
r = []
for e in list:
if isinstance(e, (bytes, str)) or not isinstance(e, collections.Iterable):
r.append(e)
else:
r.extend(flatten(e))
return r
def reverse_enumerate(l):
"""Like enumerate but in the other direction
Usage::
>>> a = ['a', 'b', 'c']
>>> it = reverse_enumerate(a)
>>> it.next()
(2, 'c')
>>> it.next()
(1, 'b')
>>> it.next()
(0, 'a')
>>> it.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
return zip(range(len(l)-1, -1, -1), reversed(l))
def partition(pred, elems):
""" Return a pair equivalent to:
``filter(pred, elems), filter(lambda x: not pred(x), elems)` """
yes, nos = [], []
for elem in elems:
(yes if pred(elem) else nos).append(elem)
return yes, nos
def topological_sort(elems):
""" Return a list of elements sorted so that their dependencies are listed
before them in the result.
:param elems: specifies the elements to sort with their dependencies; it is
a dictionary like `{element: dependencies}` where `dependencies` is a
collection of elements that must appear before `element`. The elements
of `dependencies` are not required to appear in `elems`; they will
simply not appear in the result.
:returns: a list with the keys of `elems` sorted according to their
specification.
"""
# the algorithm is inspired by [Tarjan 1976],
# http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
result = []
visited = set()
def visit(n):
if n not in visited:
visited.add(n)
if n in elems:
# first visit all dependencies of n, then append n to result
for it in elems[n]:
visit(it)
result.append(n)
for el in elems:
visit(el)
return result
def merge_sequences(*iterables):
""" Merge several iterables into a list. The result is the union of the
iterables, ordered following the partial order given by the iterables,
with a bias towards the end for the last iterable::
seq = merge_sequences(['A', 'B', 'C'])
assert seq == ['A', 'B', 'C']
seq = merge_sequences(
['A', 'B', 'C'],
['Z'], # 'Z' can be anywhere
['Y', 'C'], # 'Y' must precede 'C';
['A', 'X', 'Y'], # 'X' must follow 'A' and precede 'Y'
)
assert seq == ['A', 'B', 'X', 'Y', 'C', 'Z']
"""
# we use an OrderedDict to keep elements in order by default
deps = OrderedDict() # {item: elems_before_item}
for iterable in iterables:
prev = None
for index, item in enumerate(iterable):
if not index:
deps.setdefault(item, [])
else:
deps.setdefault(item, []).append(prev)
prev = item
return topological_sort(deps)
try:
import xlwt
# add some sanitization to respect the excel sheet name restrictions
# as the sheet name is often translatable, can not control the input
class PatchedWorkbook(xlwt.Workbook):
def add_sheet(self, name, cell_overwrite_ok=False):
# invalid Excel character: []:*?/\
name = re.sub(r'[\[\]:*?/\\]', '', name)
# maximum size is 31 characters
name = name[:31]
return super(PatchedWorkbook, self).add_sheet(name, cell_overwrite_ok=cell_overwrite_ok)
xlwt.Workbook = PatchedWorkbook
except ImportError:
xlwt = None
try:
import xlsxwriter
# add some sanitization to respect the excel sheet name restrictions
# as the sheet name is often translatable, can not control the input
class PatchedXlsxWorkbook(xlsxwriter.Workbook):
# TODO when xlsxwriter bump to 0.9.8, add worksheet_class=None parameter instead of kw
def add_worksheet(self, name=None, **kw):
if name:
# invalid Excel character: []:*?/\
name = re.sub(r'[\[\]:*?/\\]', '', name)
# maximum size is 31 characters
name = name[:31]
return super(PatchedXlsxWorkbook, self).add_worksheet(name, **kw)
xlsxwriter.Workbook = PatchedXlsxWorkbook
except ImportError:
xlsxwriter = None
def to_xml(s):
return s.replace('&','&').replace('<','<').replace('>','>')
def get_iso_codes(lang):
if lang.find('_') != -1:
if lang.split('_')[0] == lang.split('_')[1].lower():
lang = lang.split('_')[0]
return lang
def scan_languages():
""" Returns all languages supported by OpenERP for translation
:returns: a list of (lang_code, lang_name) pairs
:rtype: [(str, unicode)]
"""
csvpath = odoo.modules.module.get_resource_path('base', 'data', 'res.lang.csv')
try:
# read (code, name) from languages in base/data/res.lang.csv
with open(csvpath, 'rb') as csvfile:
reader = pycompat.csv_reader(csvfile, delimiter=',', quotechar='"')
fields = next(reader)
code_index = fields.index("code")
name_index = fields.index("name")
result = [
(row[code_index], row[name_index])
for row in reader
]
except Exception:
_logger.error("Could not read %s", csvpath)
result = []
return sorted(result or [('en_US', u'English')], key=itemgetter(1))
def mod10r(number):
"""
Input number : account or invoice number
Output return: the same number completed with the recursive mod10
key
"""
codec=[0,9,4,6,8,2,7,1,3,5]
report = 0
result=""
for digit in number:
result += digit
if digit.isdigit():
report = codec[ (int(digit) + report) % 10 ]
return result + str((10 - report) % 10)
def str2bool(s, default=None):
s = ustr(s).lower()
y = 'y yes 1 true t on'.split()
n = 'n no 0 false f off'.split()
if s not in (y + n):
if default is None:
raise ValueError('Use 0/1/yes/no/true/false/on/off')
return bool(default)
return s in y
def human_size(sz):
"""
Return the size in a human readable format
"""
if not sz:
return False
units = ('bytes', 'Kb', 'Mb', 'Gb', 'Tb')
if isinstance(sz, str):
sz=len(sz)
s, i = float(sz), 0
while s >= 1024 and i < len(units)-1:
s /= 1024
i += 1
return "%0.2f %s" % (s, units[i])
def logged(f):
@wraps(f)
def wrapper(*args, **kwargs):
from pprint import pformat
vector = ['Call -> function: %r' % f]
for i, arg in enumerate(args):
vector.append(' arg %02d: %s' % (i, pformat(arg)))
for key, value in kwargs.items():
vector.append(' kwarg %10s: %s' % (key, pformat(value)))
timeb4 = time.time()
res = f(*args, **kwargs)
vector.append(' result: %s' % pformat(res))
vector.append(' time delta: %s' % (time.time() - timeb4))
_logger.debug('\n'.join(vector))
return res
return wrapper
class profile(object):
def __init__(self, fname=None):
self.fname = fname
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
profile = cProfile.Profile()
result = profile.runcall(f, *args, **kwargs)
profile.dump_stats(self.fname or ("%s.cprof" % (f.__name__,)))
return result
return wrapper
def detect_ip_addr():
"""Try a very crude method to figure out a valid external
IP or hostname for the current machine. Don't rely on this
for binding to an interface, but it could be used as basis
for constructing a remote URL to the server.
"""
def _detect_ip_addr():
from array import array
from struct import pack, unpack
try:
import fcntl
except ImportError:
fcntl = None
ip_addr = None
if not fcntl: # not UNIX:
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
else: # UNIX:
# get all interfaces:
nbytes = 128 * 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array('B', '\0' * nbytes)
#print 'names: ', names
outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0]
namestr = names.tostring()
# try 64 bit kernel:
for i in range(0, outbytes, 40):
name = namestr[i:i+16].split('\0', 1)[0]
if name != 'lo':
ip_addr = socket.inet_ntoa(namestr[i+20:i+24])
break
# try 32 bit kernel:
if ip_addr is None:
ifaces = [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)]
for ifname in [iface for iface in ifaces if iface if iface != 'lo']:
ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24])
break
return ip_addr or 'localhost'
try:
ip_addr = _detect_ip_addr()
except Exception:
ip_addr = 'localhost'
return ip_addr
DEFAULT_SERVER_DATE_FORMAT = "%Y-%m-%d"
DEFAULT_SERVER_TIME_FORMAT = "%H:%M:%S"
DEFAULT_SERVER_DATETIME_FORMAT = "%s %s" % (
DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_TIME_FORMAT)
DATE_LENGTH = len(datetime.date.today().strftime(DEFAULT_SERVER_DATE_FORMAT))
# Python's strftime supports only the format directives
# that are available on the platform's libc, so in order to
# be cross-platform we map to the directives required by
# the C standard (1989 version), always available on platforms
# with a C standard implementation.
DATETIME_FORMATS_MAP = {
'%C': '', # century
'%D': '%m/%d/%Y', # modified %y->%Y
'%e': '%d',
'%E': '', # special modifier
'%F': '%Y-%m-%d',
'%g': '%Y', # modified %y->%Y
'%G': '%Y',
'%h': '%b',
'%k': '%H',
'%l': '%I',
'%n': '\n',
'%O': '', # special modifier
'%P': '%p',
'%R': '%H:%M',
'%r': '%I:%M:%S %p',
'%s': '', #num of seconds since epoch
'%T': '%H:%M:%S',
'%t': ' ', # tab
'%u': ' %w',
'%V': '%W',
'%y': '%Y', # Even if %y works, it's ambiguous, so we should use %Y
'%+': '%Y-%m-%d %H:%M:%S',
# %Z is a special case that causes 2 problems at least:
# - the timezone names we use (in res_user.context_tz) come
# from pytz, but not all these names are recognized by
# strptime(), so we cannot convert in both directions
# when such a timezone is selected and %Z is in the format
# - %Z is replaced by an empty string in strftime() when
# there is not tzinfo in a datetime value (e.g when the user
# did not pick a context_tz). The resulting string does not
# parse back if the format requires %Z.
# As a consequence, we strip it completely from format strings.
# The user can always have a look at the context_tz in
# preferences to check the timezone.
'%z': '',
'%Z': '',
}
POSIX_TO_LDML = {
'a': 'E',
'A': 'EEEE',
'b': 'MMM',
'B': 'MMMM',
#'c': '',
'd': 'dd',
'H': 'HH',
'I': 'hh',
'j': 'DDD',
'm': 'MM',
'M': 'mm',
'p': 'a',
'S': 'ss',
'U': 'w',
'w': 'e',
'W': 'w',
'y': 'yy',
'Y': 'yyyy',
# see comments above, and babel's format_datetime assumes an UTC timezone
# for naive datetime objects
#'z': 'Z',
#'Z': 'z',
}
def posix_to_ldml(fmt, locale):
""" Converts a posix/strftime pattern into an LDML date format pattern.
:param fmt: non-extended C89/C90 strftime pattern
:param locale: babel locale used for locale-specific conversions (e.g. %x and %X)
:return: unicode
"""
buf = []
pc = False
quoted = []
for c in fmt:
# LDML date format patterns uses letters, so letters must be quoted
if not pc and c.isalpha():
quoted.append(c if c != "'" else "''")
continue
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
quoted = []
if pc:
if c == '%': # escaped percent
buf.append('%')
elif c == 'x': # date format, short seems to match
buf.append(locale.date_formats['short'].pattern)
elif c == 'X': # time format, seems to include seconds. short does not
buf.append(locale.time_formats['medium'].pattern)
else: # look up format char in static mapping
buf.append(POSIX_TO_LDML[c])
pc = False
elif c == '%':
pc = True
else:
buf.append(c)
# flush anything remaining in quoted buffer
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
return ''.join(buf)
def split_every(n, iterable, piece_maker=tuple):
"""Splits an iterable into length-n pieces. The last piece will be shorter
if ``n`` does not evenly divide the iterable length.
:param int n: maximum size of each generated chunk
:param Iterable iterable: iterable to chunk into pieces
:param piece_maker: callable taking an iterable and collecting each
chunk from its slice, *must consume the entire slice*.
"""
iterator = iter(iterable)
piece = piece_maker(islice(iterator, n))
while piece:
yield piece
piece = piece_maker(islice(iterator, n))
def get_and_group_by_field(cr, uid, obj, ids, field, context=None):
""" Read the values of ``field´´ for the given ``ids´´ and group ids by value.
:param string field: name of the field we want to read and group by
:return: mapping of field values to the list of ids that have it
:rtype: dict
"""
res = {}
for record in obj.read(cr, uid, ids, [field], context=context):
key = record[field]
res.setdefault(key[0] if isinstance(key, tuple) else key, []).append(record['id'])
return res
def get_and_group_by_company(cr, uid, obj, ids, context=None):
return get_and_group_by_field(cr, uid, obj, ids, field='company_id', context=context)
# port of python 2.6's attrgetter with support for dotted notation
def resolve_attr(obj, attr):
for name in attr.split("."):
obj = getattr(obj, name)
return obj
def attrgetter(*items):
if len(items) == 1:
attr = items[0]
def g(obj):
return resolve_attr(obj, attr)
else:
def g(obj):
return tuple(resolve_attr(obj, attr) for attr in items)
return g
# ---------------------------------------------
# String management
# ---------------------------------------------
# Inspired by http://stackoverflow.com/questions/517923
def remove_accents(input_str):
"""Suboptimal-but-better-than-nothing way to replace accented
latin letters by an ASCII equivalent. Will obviously change the
meaning of input_str and work only for some cases"""
if not input_str:
return input_str
input_str = ustr(input_str)
nkfd_form = unicodedata.normalize('NFKD', input_str)
return u''.join([c for c in nkfd_form if not unicodedata.combining(c)])
class unquote(str):
"""A subclass of str that implements repr() without enclosing quotation marks
or escaping, keeping the original string untouched. The name come from Lisp's unquote.
One of the uses for this is to preserve or insert bare variable names within dicts during eval()
of a dict's repr(). Use with care.
Some examples (notice that there are never quotes surrounding
the ``active_id`` name:
>>> unquote('active_id')
active_id
>>> d = {'test': unquote('active_id')}
>>> d
{'test': active_id}
>>> print d
{'test': active_id}
"""
def __repr__(self):
return self
class UnquoteEvalContext(defaultdict):
"""Defaultdict-based evaluation context that returns
an ``unquote`` string for any missing name used during
the evaluation.
Mostly useful for evaluating OpenERP domains/contexts that
may refer to names that are unknown at the time of eval,
so that when the context/domain is converted back to a string,
the original names are preserved.
**Warning**: using an ``UnquoteEvalContext`` as context for ``eval()`` or
``safe_eval()`` will shadow the builtins, which may cause other
failures, depending on what is evaluated.
Example (notice that ``section_id`` is preserved in the final
result) :
>>> context_str = "{'default_user_id': uid, 'default_section_id': section_id}"
>>> eval(context_str, UnquoteEvalContext(uid=1))
{'default_user_id': 1, 'default_section_id': section_id}
"""
def __init__(self, *args, **kwargs):
super(UnquoteEvalContext, self).__init__(None, *args, **kwargs)
def __missing__(self, key):
return unquote(key)
class mute_logger(object):
"""Temporary suppress the logging.
Can be used as context manager or decorator.
@mute_logger('odoo.plic.ploc')
def do_stuff():
blahblah()
with mute_logger('odoo.foo.bar'):
do_suff()
"""
def __init__(self, *loggers):
self.loggers = loggers
def filter(self, record):
return 0
def __enter__(self):
for logger in self.loggers:
assert isinstance(logger, str),\
"A logger name must be a string, got %s" % type(logger)
logging.getLogger(logger).addFilter(self)
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
for logger in self.loggers:
logging.getLogger(logger).removeFilter(self)
def __call__(self, func):
@wraps(func)
def deco(*args, **kwargs):
with self:
return func(*args, **kwargs)
return deco
_ph = object()
class CountingStream(object):
""" Stream wrapper counting the number of element it has yielded. Similar
role to ``enumerate``, but for use when the iteration process of the stream
isn't fully under caller control (the stream can be iterated from multiple
points including within a library)
``start`` allows overriding the starting index (the index before the first
item is returned).
On each iteration (call to :meth:`~.next`), increases its :attr:`~.index`
by one.
.. attribute:: index
``int``, index of the last yielded element in the stream. If the stream
has ended, will give an index 1-past the stream
"""
def __init__(self, stream, start=-1):
self.stream = iter(stream)
self.index = start
self.stopped = False
def __iter__(self):
return self
def next(self):
if self.stopped: raise StopIteration()
self.index += 1
val = next(self.stream, _ph)
if val is _ph:
self.stopped = True
raise StopIteration()
return val
__next__ = next
def stripped_sys_argv(*strip_args):
"""Return sys.argv with some arguments stripped, suitable for reexecution or subprocesses"""
strip_args = sorted(set(strip_args) | set(['-s', '--save', '-u', '--update', '-i', '--init', '--i18n-overwrite']))
assert all(config.parser.has_option(s) for s in strip_args)
takes_value = dict((s, config.parser.get_option(s).takes_value()) for s in strip_args)
longs, shorts = list(tuple(y) for _, y in itergroupby(strip_args, lambda x: x.startswith('--')))
longs_eq = tuple(l + '=' for l in longs if takes_value[l])
args = sys.argv[:]
def strip(args, i):
return args[i].startswith(shorts) \
or args[i].startswith(longs_eq) or (args[i] in longs) \
or (i >= 1 and (args[i - 1] in strip_args) and takes_value[args[i - 1]])
return [x for i, x in enumerate(args) if not strip(args, i)]
class ConstantMapping(Mapping):
"""
An immutable mapping returning the provided value for every single key.
Useful for default value to methods
"""
__slots__ = ['_value']
def __init__(self, val):
self._value = val
def __len__(self):
"""
defaultdict updates its length for each individually requested key, is
that really useful?
"""
return 0
def __iter__(self):
"""
same as len, defaultdict updates its iterable keyset with each key
requested, is there a point for this?
"""
return iter([])
def __getitem__(self, item):
return self._value
def dumpstacks(sig=None, frame=None, thread_idents=None):
""" Signal handler: dump a stack trace for each existing thread or given
thread(s) specified through the ``thread_idents`` sequence.
"""
code = []
def extract_stack(stack):
for filename, lineno, name, line in traceback.extract_stack(stack):
yield 'File: "%s", line %d, in %s' % (filename, lineno, name)
if line:
yield " %s" % (line.strip(),)
# code from http://stackoverflow.com/questions/132058/getting-stack-trace-from-a-running-python-application#answer-2569696
# modified for python 2.5 compatibility
threads_info = {th.ident: {'repr': repr(th),
'uid': getattr(th, 'uid', 'n/a'),
'dbname': getattr(th, 'dbname', 'n/a'),
'url': getattr(th, 'url', 'n/a')}
for th in threading.enumerate()}
for threadId, stack in sys._current_frames().items():
if not thread_idents or threadId in thread_idents:
thread_info = threads_info.get(threadId, {})
code.append("\n# Thread: %s (db:%s) (uid:%s) (url:%s)" %
(thread_info.get('repr', threadId),
thread_info.get('dbname', 'n/a'),
thread_info.get('uid', 'n/a'),
thread_info.get('url', 'n/a')))
for line in extract_stack(stack):
code.append(line)
if odoo.evented:
# code from http://stackoverflow.com/questions/12510648/in-gevent-how-can-i-dump-stack-traces-of-all-running-greenlets
import gc
from greenlet import greenlet
for ob in gc.get_objects():
if not isinstance(ob, greenlet) or not ob:
continue
code.append("\n# Greenlet: %r" % (ob,))
for line in extract_stack(ob.gr_frame):
code.append(line)
_logger.info("\n".join(code))
def freehash(arg):
try:
return hash(arg)
except Exception:
if isinstance(arg, Mapping):
return hash(frozendict(arg))
elif isinstance(arg, Iterable):
return hash(frozenset(freehash(item) for item in arg))
else:
return id(arg)
def clean_context(context):
""" This function take a dictionary and remove each entry with its key starting with 'default_' """
return {k: v for k, v in context.items() if not k.startswith('default_')}
class frozendict(dict):
""" An implementation of an immutable dictionary. """
def __delitem__(self, key):
raise NotImplementedError("'__delitem__' not supported on frozendict")
def __setitem__(self, key, val):
raise NotImplementedError("'__setitem__' not supported on frozendict")
def clear(self):
raise NotImplementedError("'clear' not supported on frozendict")
def pop(self, key, default=None):
raise NotImplementedError("'pop' not supported on frozendict")
def popitem(self):
raise NotImplementedError("'popitem' not supported on frozendict")
def setdefault(self, key, default=None):
raise NotImplementedError("'setdefault' not supported on frozendict")
def update(self, *args, **kwargs):
raise NotImplementedError("'update' not supported on frozendict")
def __hash__(self):
return hash(frozenset((key, freehash(val)) for key, val in self.items()))
class Collector(Mapping):
""" A mapping from keys to lists. This is essentially a space optimization
for ``defaultdict(list)``.
"""
__slots__ = ['_map']
def __init__(self):
self._map = {}
def add(self, key, val):
vals = self._map.setdefault(key, [])
if val not in vals:
vals.append(val)
def __getitem__(self, key):
return self._map.get(key, ())
def __iter__(self):
return iter(self._map)
def __len__(self):
return len(self._map)
class StackMap(MutableMapping):
""" A stack of mappings behaving as a single mapping, and used to implement
nested scopes. The lookups search the stack from top to bottom, and
returns the first value found. Mutable operations modify the topmost
mapping only.
"""
__slots__ = ['_maps']
def __init__(self, m=None):
self._maps = [] if m is None else [m]
def __getitem__(self, key):
for mapping in reversed(self._maps):
try:
return mapping[key]
except KeyError:
pass
raise KeyError(key)
def __setitem__(self, key, val):
self._maps[-1][key] = val
def __delitem__(self, key):
del self._maps[-1][key]
def __iter__(self):
return iter({key for mapping in self._maps for key in mapping})
def __len__(self):
return sum(1 for key in self)
def __str__(self):
return u"<StackMap %s>" % self._maps
def pushmap(self, m=None):
self._maps.append({} if m is None else m)
def popmap(self):
return self._maps.pop()
class OrderedSet(MutableSet):
""" A set collection that remembers the elements first insertion order. """
__slots__ = ['_map']
def __init__(self, elems=()):
self._map = OrderedDict((elem, None) for elem in elems)
def __contains__(self, elem):
return elem in self._map
def __iter__(self):
return iter(self._map)
def __len__(self):
return len(self._map)
def add(self, elem):
self._map[elem] = None
def discard(self, elem):
self._map.pop(elem, None)
class LastOrderedSet(OrderedSet):
""" A set collection that remembers the elements last insertion order. """
def add(self, elem):
OrderedSet.discard(self, elem)
OrderedSet.add(self, elem)
class Callbacks:
""" A simple queue of callback functions. Upon run, every function is
called (in addition order), and the queue is emptied.
callbacks = Callbacks()
# add foo
def foo():
print("foo")
callbacks.add(foo)
# add bar
callbacks.add
def bar():
print("bar")
# add foo again
callbacks.add(foo)
# call foo(), bar(), foo(), then clear the callback queue
callbacks.run()
The queue also provides a ``data`` dictionary, that may be freely used to
store anything, but is mostly aimed at aggregating data for callbacks. The
dictionary is automatically cleared by ``run()`` once all callback functions
have been called.
# register foo to process aggregated data
@callbacks.add
def foo():
print(sum(callbacks.data['foo']))
callbacks.data.setdefault('foo', []).append(1)
...
callbacks.data.setdefault('foo', []).append(2)
...
callbacks.data.setdefault('foo', []).append(3)
# call foo(), which prints 6
callbacks.run()
Given the global nature of ``data``, the keys should identify in a unique
way the data being stored. It is recommended to use strings with a
structure like ``"{module}.{feature}"``.
"""
__slots__ = ['_funcs', 'data']
def __init__(self):
self._funcs = collections.deque()
self.data = {}
def add(self, func):
""" Add the given function. """
self._funcs.append(func)
def run(self):
""" Call all the functions (in addition order), then clear associated data.
"""
while self._funcs:
func = self._funcs.popleft()
func()
self.clear()
def clear(self):
""" Remove all callbacks and data from self. """
self._funcs.clear()
self.data.clear()
class IterableGenerator:
""" An iterable object based on a generator function, which is called each
time the object is iterated over.
"""
__slots__ = ['func', 'args']
def __init__(self, func, *args):
self.func = func
self.args = args
def __iter__(self):
return self.func(*self.args)
def groupby(iterable, key=None):
""" Return a collection of pairs ``(key, elements)`` from ``iterable``. The
``key`` is a function computing a key value for each element. This
function is similar to ``itertools.groupby``, but aggregates all
elements under the same key, not only consecutive elements.
"""
if key is None:
key = lambda arg: arg
groups = defaultdict(list)
for elem in iterable:
groups[key(elem)].append(elem)
return groups.items()
def unique(it):
""" "Uniquifier" for the provided iterable: will output each element of
the iterable once.
The iterable's elements must be hashahble.
:param Iterable it:
:rtype: Iterator
"""
seen = set()
for e in it:
if e not in seen:
seen.add(e)
yield e
class Reverse(object):
""" Wraps a value and reverses its ordering, useful in key functions when
mixing ascending and descending sort on non-numeric data as the
``reverse`` parameter can not do piecemeal reordering.
"""
__slots__ = ['val']
def __init__(self, val):
self.val = val
def __eq__(self, other): return self.val == other.val
def __ne__(self, other): return self.val != other.val
def __ge__(self, other): return self.val <= other.val
def __gt__(self, other): return self.val < other.val
def __le__(self, other): return self.val >= other.val
def __lt__(self, other): return self.val > other.val
@contextmanager
def ignore(*exc):
try:
yield
except exc:
pass
# Avoid DeprecationWarning while still remaining compatible with werkzeug pre-0.9
if parse_version(getattr(werkzeug, '__version__', '0.0')) < parse_version('0.9.0'):
def html_escape(text):
return werkzeug.utils.escape(text, quote=True)
else:
def html_escape(text):
return werkzeug.utils.escape(text)
def get_lang(env, lang_code=False):
"""
Retrieve the first lang object installed, by checking the parameter lang_code,
the context and then the company. If no lang is installed from those variables,
fallback on the first lang installed in the system.
:param str lang_code: the locale (i.e. en_US)
:return res.lang: the first lang found that is installed on the system.
"""
langs = [code for code, _ in env['res.lang'].get_installed()]
lang = langs[0]
if lang_code and lang_code in langs:
lang = lang_code
elif env.context.get('lang') in langs:
lang = env.context.get('lang')
elif env.user.company_id.partner_id.lang in langs:
lang = env.user.company_id.partner_id.lang
return env['res.lang']._lang_get(lang)
def babel_locale_parse(lang_code):
try:
return babel.Locale.parse(lang_code)
except:
try:
return babel.Locale.default()
except:
return babel.Locale.parse("en_US")
def formatLang(env, value, digits=None, grouping=True, monetary=False, dp=False, currency_obj=False):
"""
Assuming 'Account' decimal.precision=3:
formatLang(value) -> digits=2 (default)
formatLang(value, digits=4) -> digits=4
formatLang(value, dp='Account') -> digits=3
formatLang(value, digits=5, dp='Account') -> digits=5
"""
if digits is None:
digits = DEFAULT_DIGITS = 2
if dp:
decimal_precision_obj = env['decimal.precision']
digits = decimal_precision_obj.precision_get(dp)
elif currency_obj:
digits = currency_obj.decimal_places
if isinstance(value, str) and not value:
return ''
lang_obj = get_lang(env)
res = lang_obj.format('%.' + str(digits) + 'f', value, grouping=grouping, monetary=monetary)
if currency_obj and currency_obj.symbol:
if currency_obj.position == 'after':
res = '%s %s' % (res, currency_obj.symbol)
elif currency_obj and currency_obj.position == 'before':
res = '%s %s' % (currency_obj.symbol, res)
return res
def format_date(env, value, lang_code=False, date_format=False):
'''
Formats the date in a given format.
:param env: an environment.
:param date, datetime or string value: the date to format.
:param string lang_code: the lang code, if not specified it is extracted from the
environment context.
:param string date_format: the format or the date (LDML format), if not specified the
default format of the lang.
:return: date formatted in the specified format.
:rtype: string
'''
if not value:
return ''
if isinstance(value, str):
if len(value) < DATE_LENGTH:
return ''
if len(value) > DATE_LENGTH:
# a datetime, convert to correct timezone
value = odoo.fields.Datetime.from_string(value)
value = odoo.fields.Datetime.context_timestamp(env['res.lang'], value)
else:
value = odoo.fields.Datetime.from_string(value)
lang = get_lang(env, lang_code)
locale = babel_locale_parse(lang.code)
if not date_format:
date_format = posix_to_ldml(lang.date_format, locale=locale)
return babel.dates.format_date(value, format=date_format, locale=locale)
def parse_date(env, value, lang_code=False):
'''
Parse the date from a given format. If it is not a valid format for the
localization, return the original string.
:param env: an environment.
:param string value: the date to parse.
:param string lang_code: the lang code, if not specified it is extracted from the
environment context.
:return: date object from the localized string
:rtype: datetime.date
'''
lang = get_lang(env, lang_code)
locale = babel_locale_parse(lang.code)
try:
return babel.dates.parse_date(value, locale=locale)
except:
return value
def format_datetime(env, value, tz=False, dt_format='medium', lang_code=False):
""" Formats the datetime in a given format.
:param {str, datetime} value: naive datetime to format either in string or in datetime
:param {str} tz: name of the timezone in which the given datetime should be localized
:param {str} dt_format: one of “full”, “long”, “medium”, or “short”, or a custom date/time pattern compatible with `babel` lib
:param {str} lang_code: ISO code of the language to use to render the given datetime
"""
if not value:
return ''
if isinstance(value, str):
timestamp = odoo.fields.Datetime.from_string(value)
else:
timestamp = value
tz_name = tz or env.user.tz or 'UTC'
utc_datetime = pytz.utc.localize(timestamp, is_dst=False)
try:
context_tz = pytz.timezone(tz_name)
localized_datetime = utc_datetime.astimezone(context_tz)
except Exception:
localized_datetime = utc_datetime
lang = get_lang(env, lang_code)
locale = babel_locale_parse(lang.code or lang_code) # lang can be inactive, so `lang`is empty
if not dt_format:
date_format = posix_to_ldml(lang.date_format, locale=locale)
time_format = posix_to_ldml(lang.time_format, locale=locale)
dt_format = '%s %s' % (date_format, time_format)
# Babel allows to format datetime in a specific language without change locale
# So month 1 = January in English, and janvier in French
# Be aware that the default value for format is 'medium', instead of 'short'
# medium: Jan 5, 2016, 10:20:31 PM | 5 janv. 2016 22:20:31
# short: 1/5/16, 10:20 PM | 5/01/16 22:20
# Formatting available here : http://babel.pocoo.org/en/latest/dates.html#date-fields
return babel.dates.format_datetime(localized_datetime, dt_format, locale=locale)
def format_time(env, value, tz=False, time_format='medium', lang_code=False):
""" Format the given time (hour, minute and second) with the current user preference (language, format, ...)
:param value: the time to format
:type value: `datetime.time` instance. Could be timezoned to display tzinfo according to format (e.i.: 'full' format)
:param format: one of “full”, “long”, “medium”, or “short”, or a custom date/time pattern
:param lang_code: ISO
:rtype str
"""
if not value:
return ''
lang = get_lang(env, lang_code)
locale = babel_locale_parse(lang.code)
if not time_format:
time_format = posix_to_ldml(lang.time_format, locale=locale)
return babel.dates.format_time(value, format=time_format, locale=locale)
def _format_time_ago(env, time_delta, lang_code=False, add_direction=True):
if not lang_code:
langs = [code for code, _ in env['res.lang'].get_installed()]
lang_code = env.context['lang'] if env.context.get('lang') in langs else (env.user.company_id.partner_id.lang or langs[0])
locale = babel_locale_parse(lang_code)
return babel.dates.format_timedelta(-time_delta, add_direction=add_direction, locale=locale)
def format_decimalized_number(number, decimal=1):
"""Format a number to display to nearest metrics unit next to it.
Do not display digits if all visible digits are null.
Do not display units higher then "Tera" because most of people don't know what
a "Yotta" is.
>>> format_decimalized_number(123_456.789)
123.5k
>>> format_decimalized_number(123_000.789)
123k
>>> format_decimalized_number(-123_456.789)
-123.5k
>>> format_decimalized_number(0.789)
0.8
"""
for unit in ['', 'k', 'M', 'G']:
if abs(number) < 1000.0:
return "%g%s" % (round(number, decimal), unit)
number /= 1000.0
return "%g%s" % (round(number, decimal), 'T')
def format_decimalized_amount(amount, currency=None):
"""Format a amount to display the currency and also display the metric unit of the amount.
>>> format_decimalized_amount(123_456.789, res.currency("$"))
$123.5k
"""
formated_amount = format_decimalized_number(amount)
if not currency:
return formated_amount
if currency.position == 'before':
return "%s%s" % (currency.symbol or '', formated_amount)
return "%s %s" % (formated_amount, currency.symbol or '')
def format_amount(env, amount, currency, lang_code=False):
fmt = "%.{0}f".format(currency.decimal_places)
lang = get_lang(env, lang_code)
formatted_amount = lang.format(fmt, currency.round(amount), grouping=True, monetary=True)\
.replace(r' ', u'\N{NO-BREAK SPACE}').replace(r'-', u'-\N{ZERO WIDTH NO-BREAK SPACE}')
pre = post = u''
if currency.position == 'before':
pre = u'{symbol}\N{NO-BREAK SPACE}'.format(symbol=currency.symbol or '')
else:
post = u'\N{NO-BREAK SPACE}{symbol}'.format(symbol=currency.symbol or '')
return u'{pre}{0}{post}'.format(formatted_amount, pre=pre, post=post)
def format_duration(value):
""" Format a float: used to display integral or fractional values as
human-readable time spans (e.g. 1.5 as "01:30").
"""
hours, minutes = divmod(abs(value) * 60, 60)
minutes = round(minutes)
if minutes == 60:
minutes = 0
hours += 1
if value < 0:
return '-%02d:%02d' % (hours, minutes)
return '%02d:%02d' % (hours, minutes)
def _consteq(str1, str2):
""" Constant-time string comparison. Suitable to compare bytestrings of fixed,
known length only, because length difference is optimized. """
return len(str1) == len(str2) and sum(ord(x)^ord(y) for x, y in zip(str1, str2)) == 0
consteq = getattr(passlib.utils, 'consteq', _consteq)
# forbid globals entirely: str/unicode, int/long, float, bool, tuple, list, dict, None
class Unpickler(pickle_.Unpickler, object):
find_global = None # Python 2
find_class = None # Python 3
def _pickle_load(stream, encoding='ASCII', errors=False):
if sys.version_info[0] == 3:
unpickler = Unpickler(stream, encoding=encoding)
else:
unpickler = Unpickler(stream)
try:
return unpickler.load()
except Exception:
_logger.warning('Failed unpickling data, returning default: %r',
errors, exc_info=True)
return errors
pickle = types.ModuleType(__name__ + '.pickle')
pickle.load = _pickle_load
pickle.loads = lambda text, encoding='ASCII': _pickle_load(io.BytesIO(text), encoding=encoding)
pickle.dump = pickle_.dump
pickle.dumps = pickle_.dumps
class DotDict(dict):
"""Helper for dot.notation access to dictionary attributes
E.g.
foo = DotDict({'bar': False})
return foo.bar
"""
def __getattr__(self, attrib):
val = self.get(attrib)
return DotDict(val) if type(val) is dict else val
def get_diff(data_from, data_to, custom_style=False):
"""
Return, in an HTML table, the diff between two texts.
:param tuple data_from: tuple(text, name), name will be used as table header
:param tuple data_to: tuple(text, name), name will be used as table header
:param tuple custom_style: string, style css including <style> tag.
:return: a string containing the diff in an HTML table format.
"""
def handle_style(html_diff, custom_style):
""" The HtmlDiff lib will add some usefull classes on the DOM to
identify elements. Simply append to those classes some BS4 ones.
For the table to fit the modal width, some custom style is needed.
"""
to_append = {
'diff_header': 'bg-600 text-center align-top px-2',
'diff_next': 'd-none',
'diff_add': 'bg-success',
'diff_chg': 'bg-warning',
'diff_sub': 'bg-danger',
}
for old, new in to_append.items():
html_diff = html_diff.replace(old, "%s %s" % (old, new))
html_diff = html_diff.replace('nowrap', '')
html_diff += custom_style or '''
<style>
table.diff { width: 100%; }
table.diff th.diff_header { width: 50%; }
table.diff td.diff_header { white-space: nowrap; }
table.diff td { word-break: break-all; }
</style>
'''
return html_diff
diff = HtmlDiff(tabsize=2).make_table(
data_from[0].splitlines(),
data_to[0].splitlines(),
data_from[1],
data_to[1],
context=True, # Show only diff lines, not all the code
numlines=3,
)
return handle_style(diff, custom_style)
def traverse_containers(val, type_):
""" Yields atoms filtered by specified type_ (or type tuple), traverses
through standard containers (non-string mappings or sequences) *unless*
they're selected by the type filter
"""
from odoo.models import BaseModel
if isinstance(val, type_):
yield val
elif isinstance(val, (str, bytes, BaseModel)):
return
elif isinstance(val, Mapping):
for k, v in val.items():
yield from traverse_containers(k, type_)
yield from traverse_containers(v, type_)
elif isinstance(val, collections.abc.Sequence):
for v in val:
yield from traverse_containers(v, type_)
def hmac(env, scope, message, hash_function=hashlib.sha256):
"""Compute HMAC with `database.secret` config parameter as key.
:param env: sudo environment to use for retrieving config parameter
:param message: message to authenticate
:param scope: scope of the authentication, to have different signature for the same
message in different usage
:param hash_function: hash function to use for HMAC (default: SHA-256)
"""
if not scope:
raise ValueError('Non-empty scope required')
secret = env['ir.config_parameter'].get_param('database.secret')
message = repr((scope, message))
return hmac_lib.new(
secret.encode(),
message.encode(),
hash_function,
).hexdigest()
|
rven/odoo
|
odoo/tools/misc.py
|
Python
|
agpl-3.0
| 54,452
|
[
"VisIt"
] |
1d733e45291c7ec89d4e40f421f339e230fa371099a639a92567deef320e06eb
|
"""
Copyright (c) 2009 John Markus Bjoerndalen <jmb@cs.uit.no>,
Brian Vinter <vinter@nbi.dk>, Rune M. Friborg <rune.m.friborg@gmail.com>.
See LICENSE.txt for licensing details (MIT License).
"""
import sys
from pycsp_import import *
elements = 64
@process
def element(this_read, next_write):
while True:
token = this_read()
next_write(token + 1)
@process
def root(cycles, this_read, next_write):
next_write(1)
token = this_read()
sys.stdout.write("start\n")
sys.stdout.flush()
while cycles:
next_write(token + 1)
token = this_read()
cycles = cycles - 1
sys.stdout.write("end\n")
sys.stdout.flush()
sys.stdout.write(str(token) + "\n")
retire(next_write, this_read)
def ring(args):
global elements
cycles = 0
if len(args) > 0:
cycles = int(args[0])
head = Channel()
this = head
#chanlist to avoid Channel() datastructures to be gb_collected, when used in element processes
chanlist = [head]
for i in range(elements - 1):
next = Channel()
chanlist.append(next)
Spawn(element(this.reader(), next.writer()))
this = next
Parallel(root(cycles, this.reader(), head.writer()))
if __name__ == "__main__":
ring(sys.argv[2:])
shutdown()
|
runefriborg/pycsp
|
examples/TokenRing.py
|
Python
|
mit
| 1,454
|
[
"Brian"
] |
3b14d6540fc684b5aed441f54bf3338b7b2303e44f469763db782ac444e1aabc
|
"""
CP decomposition by classic alternating least squares (ALS).
Author: N. Benjamin Erichson <erichson@uw.edu> and Alex H. Williams
"""
import numpy as np
from tensortools.operations import unfold, khatri_rao
from tensortools.tensors import KTensor
from tensortools.optimize import FitResult, optim_utils
def ncp_bcd(
X, rank, random_state=None, init='rand',
skip_modes=[], negative_modes=[], **options):
"""
Fits nonnegative CP Decomposition using the Block Coordinate Descent (BCD)
Method.
Parameters
----------
X : (I_1, ..., I_N) array_like
A real array with nonnegative entries and ``X.ndim >= 3``.
rank : integer
The `rank` sets the number of components to be computed.
random_state : integer, RandomState instance or None, optional (default ``None``)
If integer, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by np.random.
init : str, or KTensor, optional (default ``'rand'``).
Specifies initial guess for KTensor factor matrices.
If ``'randn'``, Gaussian random numbers are used to initialize.
If ``'rand'``, uniform random numbers are used to initialize.
If KTensor instance, a copy is made to initialize the optimization.
skip_modes : iterable, optional (default ``[]``).
Specifies modes of the tensor that are not fit. This can be
used to fix certain factor matrices that have been previously
fit.
negative_modes : iterable, optional (default ``[]``).
Specifies modes of the tensor whose factors are not constrained
to be nonnegative.
options : dict, specifying fitting options.
tol : float, optional (default ``tol=1E-5``)
Stopping tolerance for reconstruction error.
max_iter : integer, optional (default ``max_iter = 500``)
Maximum number of iterations to perform before exiting.
min_iter : integer, optional (default ``min_iter = 1``)
Minimum number of iterations to perform before exiting.
max_time : integer, optional (default ``max_time = np.inf``)
Maximum computational time before exiting.
verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)
Display progress.
Returns
-------
result : FitResult instance
Object which holds the fitted results. It provides the factor matrices
in form of a KTensor, ``result.factors``.
Notes
-----
This implemenation is using the Block Coordinate Descent Method.
References
----------
Xu, Yangyang, and Wotao Yin. "A block coordinate descent method for
regularized multiconvex optimization with applications to
negative tensor factorization and completion."
SIAM Journal on imaging sciences 6.3 (2013): 1758-1789.
Examples
--------
"""
# Check inputs.
optim_utils._check_cpd_inputs(X, rank)
# Store norm of X for computing objective function.
N = X.ndim
# Initialize problem.
U, normX = optim_utils._get_initial_ktensor(init, X, rank, random_state)
result = FitResult(U, 'NCP_BCD', **options)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Block coordinate descent
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Um = U.copy() # Extrapolations of compoenents
extraw = 1 # Used for extrapolation weight update
weights_U = np.ones(N) # Extrapolation weights
L = np.ones(N) # Lipschitz constants
obj_bcd = 0.5 * normX**2 # Initial objective value
# Main optimization loop.
while result.still_optimizing:
obj_bcd_old = obj_bcd # Old objective value
U_old = U.copy()
extraw_old = extraw
for n in range(N):
# Skip modes that are specified as fixed.
if n in skip_modes:
continue
# Select all components, but U_n
components = [U[j] for j in range(N) if j != n]
# i) compute the N-1 gram matrices
grams = np.prod([arr.T.dot(arr) for arr in components], axis=0)
# Update gradient Lipschnitz constant
L0 = L # Lipschitz constants
L[n] = np.linalg.norm(grams, 2)
# ii) Compute Khatri-Rao product
kr = khatri_rao(components)
p = unfold(X, n).dot(kr)
# Compute Gradient.
grad = Um[n].dot(grams) - p
# Enforce nonnegativity (project onto nonnegative orthant).
U[n] = Um[n] - grad / L[n]
if n not in negative_modes:
U[n] = np.maximum(0.0, U[n])
# Compute objective function and update optimization result.
# grams *= U[X.ndim - 1].T.dot(U[X.ndim - 1])
# obj = np.sqrt(np.sum(grams) - 2 * np.sum(U[X.ndim - 1] * p) + normX**2) / normX
obj = np.linalg.norm(X - U.full()) / normX
result.update(obj)
# Correction and extrapolation.
n = np.setdiff1d(np.arange(X.ndim), skip_modes).max()
grams *= U[n].T.dot(U[n])
obj_bcd = 0.5 * (np.sum(grams) - 2 * np.sum(U[n] * p) + normX**2)
extraw = (1 + np.sqrt(1 + 4 * extraw_old**2)) / 2.0
if obj_bcd >= obj_bcd_old:
# restore previous A to make the objective nonincreasing
Um = U_old
else:
# apply extrapolation
w = (extraw_old - 1.0) / extraw # Extrapolation weight
for n in range(N):
if n not in skip_modes:
weights_U[n] = min(w, 1.0 * np.sqrt(L0[n] / L[n])) # choose smaller weights for convergence
Um[n] = U[n] + weights_U[n] * (U[n] - U_old[n]) # extrapolation
# Finalize and return the optimization result.
return result.finalize()
|
ahwillia/tensortools
|
tensortools/optimize/ncp_bcd.py
|
Python
|
mit
| 6,015
|
[
"Gaussian"
] |
01d658cd440c3a89f5f0a4062947da44527a39a2aad385989360cd90cc651b7d
|
import unittest
from pyramid import testing
class TutorialViewTests(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def test_home(self):
from tutorial.views import home
request = testing.DummyRequest()
response = home(request)
self.assertIsNotNone(response)
self.assertEqual(response.status_code, 200)
self.assertIn(b'Visit', response.body)
def test_hello(self):
from tutorial.views import hello
request = testing.DummyRequest()
response = hello(request)
self.assertIsNotNone(response)
self.assertEqual(response.status_code, 200)
self.assertIn(b'Hello World!', response.body)
self.assertIn(b'Go back', response.body)
if __name__ == "__main__":
unittest.main()
|
FedericoStra/pyramid_tutorial
|
07_views/tests/test_views.py
|
Python
|
mit
| 869
|
[
"VisIt"
] |
43d62fe5635249bfc1bbd35e0354cf4ef3cf2f886df740afe333a3fa8c8d4aa1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016
# Author(s):
# Thomas Leppelt <thomas.leppelt@dwd.de>
# This file is part of sauventory.
# Spatial Autocorrelated Uncertainty of Inventories
# sauventory is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# sauventory is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# sauventory comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
# This is free software, and you are welcome to redistribute it
# under certain conditions; type `show c' for details.
"""
This module perform unittests for variogram functions within the
sauventory package.
"""
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import os
import random
import unittest
from sauventory import spatialinventory
from sauventory import variogram
class VariogramTest(unittest.TestCase):
def setUp(self):
self.r = np.array([random.randrange(1, 1000) for _ in range(0, 1000)])
# Setup test raster file names and location.
self.invin = os.path.join(os.path.dirname(__file__),
"data/model_peat_examp_1.tiff")
self.uncertin = os.path.join(os.path.dirname(__file__),
"data/uncert_peat_examp_1.tiff")
# Setup test vector file names and location.
self.invvector = os.path.join(os.path.dirname(__file__),
"data/n2o_eu_2010_inventory/"
"n2o_eu_2010_inventory.shp")
# Import soil test dataset
# Calcium and magnesium contents in soil samples at the 0-20cm and
# 20-40 soil layers. Source: Capeche, C. L. et. al. (1997).
# Used also in Diggle & Ribeiro Jr (2007) Model Based Geostatistics
soilfile = open('./data/soildata.txt', 'r')
sdata = soilfile.readlines()
self.sdata = np.array([map(float, l.split()) for l in sdata[1:]])
def test_soil_variogram(self):
v = variogram.Variogram()
si = spatialinventory.SpatialInventory("Soil data", "g/m2",
"Example soil data",
("1990-01-01 00:00:00",
"1990-01-01 00:00:00"))
data = self.sdata[:, :3]
si.inv_coord = data[:, :2]
si.inv_array = data[:, 2]
# print(data)
# Define variogram parameters
bw = 100 # Bandwidth
hmax = 1200 # Maximum distance
hs = np.arange(0, 600, bw) # Distance intervals
sv, svmodel, c, c0 = si.get_variogram(bw, hmax, model=True,
type=v.nugget)
cov = si.get_covariance(bw, hmax)
si.plot_variogram(file="/tmp/sauventory_test_soil_vario.png",
caxis=True, naxis=True)
def test_single_variogram_raster(self):
si = spatialinventory.RasterInventory("N2O-Agrar-2012", "g/m2",
"Example N2O inventory of "
"organic soils",
("2012-01-01 00:00:00",
"2013-01-01 00:00:00"),
creator="Tester")
si.import_inventory_as_raster(self.invin, self.uncertin)
v = variogram.Variogram()
coords = si.get_coord()
data = np.hstack((coords, si.inv_array.reshape((si.inv_array.size,
1))))
sv1 = v.semivvarh(data, 5, 5)
sv2 = v.semivvarh(data, 50, 5)
self.assertEqual(round(sv1, 3), 0.168)
self.assertEqual(round(sv2, 3), 0.217)
def test_single_variogram_vector(self):
si = spatialinventory.VectorInventory("N2O-Agrar-EU-2010", "Gg",
"N2O inventory for EU-27"
"emissions from agriculture",
("2010-01-01 00:00:00",
"2011-01-01 00:00:00"),
creator="Tester")
si.import_inventory_as_vector(self.invvector, 'n2o_Gg',
uncert='uncert_Gg', index='NUTS_ID',
relative=True)
v = variogram.Variogram()
coords = si.get_coord()
data = np.hstack((coords, si.inv_array.reshape((si.inv_array.size,
1))))
sv1 = v.semivvarh(data, 10, 8)
sv2 = v.semivvarh(data, 20, 8)
self.assertEqual(round(sv1, 3), 136703201.471)
self.assertEqual(round(sv2, 3), 145110190.277)
def test_empirical_variogram_raster(self):
si = spatialinventory.RasterInventory("N2O-Agrar-2012", "g/m2",
"Example N2O inventory of "
"organic soils",
("2012-01-01 00:00:00",
"2013-01-01 00:00:00"),
creator="Tester")
si.import_inventory_as_raster(self.invin, self.uncertin)
v = variogram.Variogram()
coords = si.get_coord()
data = np.hstack((coords, si.inv_array.reshape((si.inv_array.size,
1))))
# Define variogram parameters
bw = 10 # Bandwidth
hs = np.arange(0, 80, bw) # Distance intervals
svario = v.semivvar(data, hs, bw)
self.assertEqual(round(np.max(svario[1]), 3), 0.245)
self.assertEqual(round(np.min(svario[1]), 3), 0.168)
def test_empirical_variogram_vector(self):
si = spatialinventory.VectorInventory("N2O-Agrar-EU-2010", "Gg",
"N2O inventory for EU-27"
"emissions from agriculture",
("2010-01-01 00:00:00",
"2011-01-01 00:00:00"),
creator="Tester")
si.import_inventory_as_vector(self.invvector, 'n2o_Gg',
uncert='uncert_Gg', index='NUTS_ID',
relative=True)
v = variogram.Variogram()
coords = si.get_coord()
data = np.hstack((coords, si.inv_array.reshape((si.inv_array.size,
1))))
# Define variogram parameters
bw = 10 # Bandwidth
hs = np.arange(0, 80, bw) # Distance intervals
svario = v.semivvar(data, hs, bw)
self.assertEqual(round(np.max(svario[1]), 3), 142924111.258)
self.assertEqual(round(np.min(svario[1]), 3), 16520.533)
def test_nugget_variogram(self):
si = spatialinventory.RasterInventory("N2O-Agrar-2012", "g/m2",
"Example N2O inventory of "
"organic soils",
("2012-01-01 00:00:00",
"2013-01-01 00:00:00"),
creator="Tester")
si.import_inventory_as_raster(self.invin, self.uncertin)
v = variogram.Variogram()
coords = si.get_coord()
data = np.hstack((coords, si.inv_array.reshape((si.inv_array.size,
1))))
# Define variogram parameters
bw = 10 # Bandwidth
hs = np.arange(0, 80, bw) # Distance intervals
# svario = v.semivvar(data, hs, bw)
svmodel, svario, c, c0 = v.cvmodel(data, hs, bw, model=v.nugget)
self.assertEqual(round(svmodel(svario[0][0]), 3), 0.167)
self.assertEqual(round(svmodel(svario[0][1]), 3), 0.340)
def test_spherical_variogram_raster(self):
si = spatialinventory.RasterInventory("N2O-Agrar-2012", "g/m2",
"Example N2O inventory of "
"organic soils",
("2012-01-01 00:00:00",
"2013-01-01 00:00:00"),
creator="Tester")
si.import_inventory_as_raster(self.invin, self.uncertin)
v = variogram.Variogram()
coords = si.get_coord()
data = np.hstack((coords, si.inv_array.reshape((si.inv_array.size,
1))))
# Define variogram parameters
bw = 10 # Bandwidth
hs = np.arange(0, 80, bw) # Distance intervals
# svario = v.semivvar(data, hs, bw)
svmodel, svario, c, c0 = v.cvmodel(data, hs, bw, model=v.spherical)
self.assertEqual(round(svmodel(svario[0][0]), 3), 0.)
self.assertEqual(round(svmodel(svario[0][7]), 3), 0.340)
def test_gaussian_variogram_raster(self):
si = spatialinventory.RasterInventory("N2O-Agrar-2012", "g/m2",
"Example N2O inventory of "
"organic soils",
("2012-01-01 00:00:00",
"2013-01-01 00:00:00"),
creator="Tester")
si.import_inventory_as_raster(self.invin, self.uncertin)
v = variogram.Variogram()
coords = si.get_coord()
data = np.hstack((coords, si.inv_array.reshape((si.inv_array.size,
1))))
# Define variogram parameters
bw = 10 # Bandwidth
hs = np.arange(0, 80, bw) # Distance intervals
# svario = v.semivvar(data, hs, bw)
svmodel, svario, c, c0 = v.cvmodel(data, hs, bw, model=v.gaussian)
self.assertEqual(round(svmodel(svario[0][0]), 3), 0.)
self.assertEqual(round(svmodel(svario[0][7]), 3), 0.340)
def test_exponential_variogram_raster(self):
si = spatialinventory.RasterInventory("N2O-Agrar-2012", "g/m2",
"Example N2O inventory of "
"organic soils",
("2012-01-01 00:00:00",
"2013-01-01 00:00:00"),
creator="Tester")
si.import_inventory_as_raster(self.invin, self.uncertin)
v = variogram.Variogram()
coords = si.get_coord()
data = np.hstack((coords, si.inv_array.reshape((si.inv_array.size,
1))))
# Define variogram parameters
bw = 10 # Bandwidth
hs = np.arange(0, 80, bw) # Distance intervals
# svario = v.semivvar(data, hs, bw)
svmodel, svario, c, c0 = v.cvmodel(data, hs, bw, model=v.exponential)
self.assertEqual(round(svmodel(svario[0][0]), 3), 0.)
self.assertEqual(round(svmodel(svario[0][7]), 3), 0.323)
def test_plot_variogram_raster(self):
si = spatialinventory.RasterInventory("N2O-Agrar-2012", "g/m2",
"Example N2O inventory of "
"organic soils",
("2012-01-01 00:00:00",
"2013-01-01 00:00:00"),
creator="Tester")
v = variogram.Variogram()
si.import_inventory_as_raster(self.invin, self.uncertin)
sv, svm, c, c0 = si.get_variogram(10, 80, True)
si.plot_variogram("/tmp/sauventory_test_spherical_vario.png")
sv, svm, c, c0 = si.get_variogram(10, 80, True, type=v.gaussian)
si.plot_variogram("/tmp/sauventory_test_gaussian_vario.png")
sv, svm, c, c0 = si.get_variogram(10, 80, True, type=v.exponential)
si.plot_variogram("/tmp/sauventory_test_exponential_vario.png")
self.assertEqual(round(np.max(sv[1]), 3), 0.245)
self.assertEqual(round(np.min(si.inv_sv[1]), 3), 0.168)
def suite():
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTest(loader.loadTestsFromTestCase(VariogramTest))
return suite
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=2).run(suite())
|
m4sth0/sauventory
|
tests/test_variogram.py
|
Python
|
gpl-3.0
| 13,270
|
[
"Gaussian"
] |
45a9a11eb05e3160c3526184095c95ec16b84e90c224d2ffc703a72c4aad84e0
|
from brian import *
class AbstractNeuronGroup(NeuronGroup):
'''
Extends NeuronGroup to include additional methods
'''
def get_parameters(self):
'''Return a dictionary of parameter names and values'''
raise NotImplementedError('Not implemented yet')
def save_parameters(self, out_f):
'''Save parameters to out_f file object with write permission'''
for p,v in self.get_parameters().iteritems():
out_f.write('%s\t%s\n' % (p,str(v)))
|
blennon/MLI_PKJ_net
|
MLI_PKJ_net/abstract_neuron_group.py
|
Python
|
mit
| 509
|
[
"Brian"
] |
02e7e171fc4bddeb30eccb67d9b62a8d502f2c5c497ef2eb7805a7729ed3d3b8
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkBlankStructuredGridWithImage(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkBlankStructuredGridWithImage(), 'Processing.',
('vtkStructuredGrid', 'vtkImageData'), ('vtkStructuredGrid',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
chrisidefix/devide
|
modules/vtk_basic/vtkBlankStructuredGridWithImage.py
|
Python
|
bsd-3-clause
| 544
|
[
"VTK"
] |
66e646e4e3b758074ae7bed9a4f32cb1a7bec6a94fdaad83be089ec8fbe58e04
|
import sys
import numpy as np
import time
import moose
print('Using moose from %s' % moose.__file__ )
compt = moose.CubeMesh( '/compt' )
compt.volume = 1e-20
pools = []
for r in range( 10 ):
a1 = moose.Pool( '/compt/a1%s' % r )
a1.concInit = 10
a2 = moose.Pool( '/compt/a2%s' % r )
a2.concInit = 5
b1 = moose.Pool( '/compt/b1%s' % r )
b1.concInit = 0.054
b2 = moose.Pool( '/compt/b2%s' % r )
b2.concInit = 3.9
r = moose.Reac( '/compt/reac%s'% r )
moose.connect( r, 'sub', a1, 'reac' )
moose.connect( r, 'sub', a2, 'reac' )
moose.connect( r, 'prd', b1, 'reac' )
moose.connect( r, 'prd', b2, 'reac' )
r.Kf = 2.9
r.Kb = 4.5
pools += [ a1, a2, b1, b2 ]
ksolve = moose.Ksolve( '/compt/ksolve' )
stoich = moose.Stoich( '/compt/stoich' )
stoich.compartment = compt
stoich.ksolve = ksolve
stoich.path = '/compt/##'
moose.reinit()
print( '[INFO] Using method = %s' % ksolve.method )
t1 = time.time()
moose.start( 10 )
print('[INFO] Time taken %s' % (time.time() - t1 ))
expected = [ 7.77859 , 2.77858 , 2.27541 , 6.12141 , 7.77858 , 2.77858
, 2.27541 , 6.12141 , 7.77858 , 2.77858 , 2.27541 , 6.12141 , 7.77858
, 2.77858 , 2.27541 , 6.12141 , 7.77858 , 2.77858 , 2.27541 , 6.12141
, 7.77858 , 2.77858 , 2.27541 , 6.12141 , 7.77858 , 2.77858 , 2.27541
, 6.12141 , 7.77858 , 2.77858 , 2.27541 , 6.12141 , 7.77858 , 2.77858
, 2.27541 , 6.12141 , 7.77858 , 2.77858 , 2.27541 , 6.12141
]
concs = [ p.conc for p in pools ]
if(not np.isclose( concs, expected ).all() ):
print( " Expected %s" % expected )
print( " Got %s" % concs )
quit(1)
print( 'Test passed' )
|
subhacom/moose-core
|
tests/python/test_ksolve.py
|
Python
|
gpl-3.0
| 1,677
|
[
"MOOSE"
] |
06f594d2b51d42d405c739ea56cf602b0fdc6eb40588efba41eb3309a332f2f3
|
#!/usr/bin/env python3
# License MIT
# Copyright 2016-2021 Alex Winkler
# Version 3.0.0
wikibaseurl = 'https://liquipedia.net/'
wikis = [
'ageofempires',
'apexlegends',
'arenafps',
'arenaofvalor',
'artifact',
'autochess',
'battalion',
'battlerite',
'brawlstars',
'callofduty',
'clashroyale',
'commons',
'counterstrike',
'criticalops',
'crossfire',
'dota2',
'fifa',
'fighters',
'fortnite',
'freefire',
'halo',
'hearthstone',
'heroes',
'leagueoflegends',
'magic',
'mobilelegends',
'overwatch',
'paladins',
'pokemon',
'pubg',
'rainbowsix',
'rocketleague',
'runeterra',
'sideswipe',
'simracing',
'smash',
'splitgate',
'squadrons',
'starcraft',
'starcraft2',
'teamfortress',
'tft',
'trackmania',
'underlords',
'valorant',
'warcraft',
'wildrift',
'worldofwarcraft',
]
lies = [
'Liquipedia is not awesome... (good that this is a lie ^^)',
'salle is a young girl',
'Pizza is bad and no one likes it',
'salle\'s ideas are always realistic',
'Chrome is a decent browser',
'blame swampflare',
'The revision system of Liquipedia is useless, just kill the history',
'I played Half Life 3 recently, it sucked',
'WarCraft 4 is just about to be released',
'Dota 2 is so tiny, we should focus on big esports like Nokia Snake instead',
'https://files.catbox.moe/o8tify.gif',
'Swampflare never laundered memory in his Lithuanian bakery!',
]
wikiroles = {
'editor': 'Editor',
'reviewer': 'Reviewer'
}
# Wiki roles
botroleswikis = [
'Age of Empires',
'Apex Legends',
'Arena FPS',
'Arena of Valor',
'Artifact',
'Auto Chess',
'Battalion',
'Battlerite',
'Brawl Stars',
'Call of Duty',
'Clash Royale',
'Commons',
'Counter-Strike',
'Critical Ops',
'CrossFire',
'Dota 2',
'FIFA',
'Fighters',
'Fortnite',
'Free Fire',
'Halo',
'Hearthstone',
'Heroes',
'League of Legends',
'Magic',
'Overwatch',
'Paladins',
'Pokémon',
'PUBG',
'Rainbow Six',
'Rocket League',
'Runeterra',
'Sim Racing',
'Smash',
'Starcraft 2',
'Starcraft',
'Team Fortress',
'Teamfight Tactics',
'TrackMania',
'Underlords',
'VALORANT',
'Warcraft',
'Wild Rift',
'World of Warcraft',
]
# Language roles
botroleslanguages = [
'Arabic',
'Belarusian',
'Bosnian',
'Bulgarian',
'Chinese (Mandarin)',
'Croatian',
'Czech',
'Danish',
'English (native)',
'French',
'German',
'Hindi',
'Hungarian',
'Italian',
'Japanese',
'Korean',
'Macedonian',
'Mongolian',
'Norwegian',
'Polish',
'Portuguese',
'Russian',
'Serbian',
'Slovak',
'Slovene',
'Spanish',
'Swedish',
'Tagalog',
'Thai',
'Turkish',
'Ukrainian',
'Vietnamese',
]
# Misc roles
botrolesmisc = [
'Announcements',
'CS Predictions',
'Game Night',
'Please Ping Me',
'Random Stats of the Day',
'Templates',
]
|
FO-nTTaX/Liquipedia-Discord-Bot
|
ftsbot/data.py
|
Python
|
mit
| 2,734
|
[
"MOE"
] |
9737b9d4c5721e91dc5835b26c8d2f0cea284cfba7ffffd94ebed239d394b22e
|
../../../../../../../share/pyshared/orca/scripts/apps/ekiga/script.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/ekiga/script.py
|
Python
|
gpl-3.0
| 69
|
[
"ORCA"
] |
68a676fe914f9c00e236e33f39a055c69abb2b00e4140f9be0058b9106e7118b
|
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
# Python 2 and 3 support
try:
# Python 2
unicode
str = unicode
except NameError:
# Python 3
unicode = str
import codecs
import errno
import logging
import os
import ntpath
import posixpath
import shutil
import stat
import tempfile
from commoncode import system
from commoncode import text
from commoncode import filetype
from commoncode.filetype import is_rwx
# this exception is not available on posix
try:
WindowsError # @UndefinedVariable
except NameError:
WindowsError = None # @ReservedAssignment
DEBUG = False
logger = logging.getLogger(__name__)
"""
File, paths and directory utility functions.
"""
#
# DIRECTORIES
#
def create_dir(location):
"""
Create directory and all sub-directories recursively at location ensuring these
are readable and writeable.
Raise Exceptions if it fails to create the directory.
"""
if os.path.exists(location):
if not os.path.isdir(location):
err = ('Cannot create directory: existing file '
'in the way ''%(location)s.')
raise OSError(err % locals())
else:
# may fail on win if the path is too long
# FIXME: consider using UNC ?\\ paths
try:
os.makedirs(location)
chmod(location, RW, recurse=False)
# avoid multi-process TOCTOU conditions when creating dirs
# the directory may have been created since the exist check
except WindowsError, e:
# [Error 183] Cannot create a file when that file already exists
if e and e.winerror == 183:
if not os.path.isdir(location):
raise
else:
raise
except (IOError, OSError), o:
if o.errno == errno.EEXIST:
if not os.path.isdir(location):
raise
else:
raise
def system_temp_dir():
"""
Return the global temp directory for the current user.
"""
temp_dir = os.getenv('SCANCODE_TMP')
if not temp_dir:
sc = text.python_safe_name('scancode_' + system.username)
temp_dir = os.path.join(tempfile.gettempdir(), sc)
create_dir(temp_dir)
return temp_dir
def get_temp_dir(base_dir, prefix=''):
"""
Return the path to a new unique temporary directory, created under
the system-wide `system_temp_dir` temp directory as a subdir of the
base_dir path (a path relative to the `system_temp_dir`).
"""
base = os.path.join(system_temp_dir(), base_dir)
create_dir(base)
return tempfile.mkdtemp(prefix=prefix, dir=base)
#
# FILE READING
#
def file_chunks(file_object, chunk_size=1024):
"""
Yield a file piece by piece. Default chunk size: 1k.
"""
while 1:
data = file_object.read(chunk_size)
if data:
yield data
else:
break
# FIXME: reading a whole file could be an issue: could we stream by line?
def _text(location, encoding, universal_new_lines=True):
"""
Read file at `location` as a text file with the specified `encoding`. If
`universal_new_lines` is True, update lines endings to be posix LF \n.
Return a unicode string.
Note: Universal newlines in the codecs package was removed in
Python2.6 see http://bugs.python.org/issue691291
"""
with codecs.open(location, 'r', encoding) as f:
text = f.read()
if universal_new_lines:
text = u'\n'.join(text.splitlines(False))
return text
def read_text_file(location, universal_new_lines=True):
"""
Return the text content of file at `location` trying to find the best
encoding.
"""
try:
text = _text(location, 'utf-8', universal_new_lines)
except:
text = _text(location, 'latin-1', universal_new_lines)
return text
#
# PATHS AND NAMES MANIPULATIONS
#
# TODO: move these functions to paths.py
def is_posixpath(location):
"""
Return True if the `location` path is likely a POSIX-like path using POSIX path
separators (slash or "/")or has no path separator.
Return False if the `location` path is likely a Windows-like path using backslash
as path separators (e.g. "\").
"""
has_slashes = '/' in location
has_backslashes = '\\' in location
# windows paths with drive
if location:
drive, _ = ntpath.splitdrive(location)
if drive:
return False
# a path is always POSIX unless it contains ONLY backslahes
# which is a rough approximation (it could still be posix)
is_posix = True
if has_backslashes and not has_slashes:
is_posix = False
return is_posix
def as_posixpath(location):
"""
Return a POSIX-like path using POSIX path separators (slash or "/") for a
`location` path. This converts Windows paths to look like POSIX paths: Python
accepts gracefully POSIX paths on Windows.
"""
return location.replace(ntpath.sep, posixpath.sep)
def as_winpath(location):
"""
Return a Windows-like path using Windows path separators (backslash or "\") for a
`location` path.
"""
return location.replace(posixpath.sep, ntpath.sep)
def split_parent_resource(path, force_posix=False):
"""
Return a tuple of (parent directory path, resource name).
"""
use_posix = force_posix or is_posixpath(path)
splitter = use_posix and posixpath or ntpath
path = path.rstrip('/\\')
return splitter.split(path)
def resource_name(path, force_posix=False):
"""
Return the resource name (file name or directory name) from `path` which
is the last path segment.
"""
_left, right = split_parent_resource(path, force_posix)
return right or ''
def file_name(path, force_posix=False):
"""
Return the file name (or directory name) of a path.
"""
return resource_name(path, force_posix)
def parent_directory(path, force_posix=False):
"""
Return the parent directory path of a file or directory `path`.
"""
left, _right = split_parent_resource(path, force_posix)
use_posix = force_posix or is_posixpath(path)
sep = use_posix and '/' or '\\'
trail = sep if left != sep else ''
return left + trail
def file_base_name(path, force_posix=False):
"""
Return the file base name for a path. The base name is the base name of
the file minus the extension. For a directory return an empty string.
"""
return splitext(path, force_posix)[0]
def file_extension(path, force_posix=False):
"""
Return the file extension for a path.
"""
return splitext(path, force_posix)[1]
def splitext(path, force_posix=False):
"""
Return a tuple of strings (basename, extension) for a path. The basename is
the file name minus its extension. Return an empty extension string for a
directory. A directory is identified by ending with a path separator. Not
the same as os.path.splitext.
For example:
>>> expected = 'path', '.ext'
>>> assert expected == splitext('C:\\dir\path.ext')
Directories even with dotted names have no extension:
>>> import ntpath
>>> expected = 'path.ext', ''
>>> assert expected == splitext('C:\\dir\\path.ext' + ntpath.sep)
>>> expected = 'path.ext', ''
>>> assert expected == splitext('/dir/path.ext/')
>>> expected = 'file', '.txt'
>>> assert expected == splitext('/some/file.txt')
Composite extensions for tarballs are properly handled:
>>> expected = 'archive', '.tar.gz'
>>> assert expected == splitext('archive.tar.gz')
"""
base_name = ''
extension = ''
if not path:
return base_name, extension
ppath = as_posixpath(path)
name = resource_name(path, force_posix)
name = name.strip('\\/')
if ppath.endswith('/'):
# directories never have an extension
base_name = name
extension = ''
elif name.startswith('.') and '.' not in name[1:]:
# .dot files base name is the full name and they do not have an extension
base_name = name
extension = ''
else:
base_name, extension = posixpath.splitext(name)
# handle composed extensions of tar.gz, bz, zx,etc
if base_name.endswith('.tar'):
base_name, extension2 = posixpath.splitext(base_name)
extension = extension2 + extension
return base_name, extension
#
# DIRECTORY AND FILES WALKING/ITERATION
#
ignore_nothing = lambda _: False
def walk(location, ignored=ignore_nothing):
"""
Walk location returning the same tuples as os.walk but with a different
behavior:
- always walk top-down, breadth-first.
- always ignore and never follow symlinks, .
- always ignore special files (FIFOs, etc.)
- optionally ignore files and directories by invoking the `ignored`
callable on files and directories returning True if it should be ignored.
- location is a directory or a file: for a file, the file is returned.
"""
# TODO: consider using the new "scandir" module for some speed-up.
if DEBUG:
ign = ignored(location)
logger.debug('walk: ignored:', location, ign)
if ignored(location):
return
if filetype.is_file(location) :
yield parent_directory(location), [], [file_name(location)]
elif filetype.is_dir(location):
dirs = []
files = []
# TODO: consider using scandir
for name in os.listdir(location):
loc = os.path.join(location, name)
if filetype.is_special(loc) or ignored(loc):
if DEBUG:
ign = ignored(loc)
logger.debug('walk: ignored:', loc, ign)
continue
# special files and symlinks are always ignored
if filetype.is_dir(loc):
dirs.append(name)
elif filetype.is_file(loc):
files.append(name)
yield location, dirs, files
for dr in dirs:
for tripple in walk(os.path.join(location, dr), ignored):
yield tripple
def file_iter(location, ignored=ignore_nothing):
"""
Return an iterable of files at `location` recursively.
:param location: a file or a directory.
:param ignored: a callable accepting a location argument and returning True
if the location should be ignored.
:return: an iterable of file locations.
"""
return resource_iter(location, ignored, with_dirs=False)
def dir_iter(location, ignored=ignore_nothing):
"""
Return an iterable of directories at `location` recursively.
:param location: a directory.
:param ignored: a callable accepting a location argument and returning True
if the location should be ignored.
:return: an iterable of directory locations.
"""
return resource_iter(location, ignored, with_files=False)
def resource_iter(location, ignored=ignore_nothing, with_files=True, with_dirs=True):
"""
Return an iterable of resources at `location` recursively.
:param location: a file or a directory.
:param ignored: a callable accepting a location argument and returning True
if the location should be ignored.
:param with_dirs: If True, include the directories.
:param with_files: If True, include the files.
:return: an iterable of file and directory locations.
"""
assert with_dirs or with_files, "fileutils.resource_iter: One or both of 'with_dirs' and 'with_files' is required"
for top, dirs, files in walk(location, ignored):
if with_files:
for f in files:
yield os.path.join(top, f)
if with_dirs:
for d in dirs:
yield os.path.join(top, d)
#
# COPY
#
def copytree(src, dst):
"""
Copy recursively the `src` directory to the `dst` directory. If `dst` is an
existing directory, files in `dst` may be overwritten during the copy.
Preserve timestamps.
Ignores:
-`src` permissions: `dst` files are created with the default permissions.
- all special files such as FIFO or character devices and symlinks.
Raise an shutil.Error with a list of reasons.
This function is similar to and derived from the Python shutil.copytree
function. See fileutils.py.ABOUT for details.
"""
if not filetype.is_readable(src):
chmod(src, R, recurse=False)
names = os.listdir(src)
if not os.path.exists(dst):
os.makedirs(dst)
errors = []
errors.extend(copytime(src, dst))
for name in names:
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
# skip anything that is not a regular file, dir or link
if not filetype.is_regular(srcname):
continue
if not filetype.is_readable(srcname):
chmod(srcname, R, recurse=False)
try:
if os.path.isdir(srcname):
copytree(srcname, dstname)
elif filetype.is_file(srcname):
copyfile(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except shutil.Error, err:
errors.extend(err.args[0])
except EnvironmentError, why:
errors.append((srcname, dstname, str(why)))
if errors:
raise shutil.Error, errors
def copyfile(src, dst):
"""
Copy src file to dst file preserving timestamps.
Ignore permissions and special files.
Similar to and derived from Python shutil module. See fileutils.py.ABOUT
for details.
"""
if not filetype.is_regular(src):
return
if not filetype.is_readable(src):
chmod(src, R, recurse=False)
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
shutil.copyfile(src, dst)
copytime(src, dst)
def copytime(src, dst):
"""
Copy timestamps from `src` to `dst`.
Similar to and derived from Python shutil module. See fileutils.py.ABOUT
for details.
"""
errors = []
st = os.stat(src)
if hasattr(os, 'utime'):
try:
os.utime(dst, (st.st_atime, st.st_mtime))
except OSError, why:
if WindowsError is not None and isinstance(why, WindowsError):
# File access times cannot be copied on Windows
pass
else:
errors.append((src, dst, str(why)))
return errors
#
# PERMISSIONS
#
# modes: read, write, executable
R = stat.S_IRUSR
RW = stat.S_IRUSR | stat.S_IWUSR
RX = stat.S_IRUSR | stat.S_IXUSR
RWX = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
# FIXME: This was an expensive operation that used to recurse of the parent directory
def chmod(location, flags, recurse=False):
"""
Update permissions for `location` with with `flags`. `flags` is one of R,
RW, RX or RWX with the same semantics as in the chmod command. Update is
done recursively if `recurse`.
"""
if not location or not os.path.exists(location):
return
location = os.path.abspath(location)
new_flags = flags
if filetype.is_dir(location):
# POSIX dirs need to be executable to be readable,
# and to be writable so we can change perms of files inside
new_flags = RWX
# FIXME: do we really need to change the parent directory perms?
# FIXME: may just check them instead?
parent = os.path.dirname(location)
current_stat = stat.S_IMODE(os.stat(parent).st_mode)
if not is_rwx(parent):
os.chmod(parent, current_stat | RWX)
if filetype.is_regular(location):
current_stat = stat.S_IMODE(os.stat(location).st_mode)
os.chmod(location, current_stat | new_flags)
if recurse:
chmod_tree(location, flags)
def chmod_tree(location, flags):
"""
Update permissions recursively in a directory tree `location`.
"""
if filetype.is_dir(location):
for top, dirs, files in walk(location):
for d in dirs:
chmod(os.path.join(top, d), flags, recurse=False)
for f in files:
chmod(os.path.join(top, f), flags, recurse=False)
#
# DELETION
#
def _rm_handler(function, path, excinfo): # @UnusedVariable
"""
shutil.rmtree handler invoked on error when deleting a directory tree.
This retries deleting once before giving up.
"""
if function == os.rmdir:
try:
chmod(path, RW, recurse=True)
shutil.rmtree(path, True)
except Exception:
pass
if os.path.exists(path):
logger.warning('Failed to delete directory %s', path)
elif function == os.remove:
try:
delete(path, _err_handler=None)
except:
pass
if os.path.exists(path):
logger.warning('Failed to delete file %s', path)
def delete(location, _err_handler=_rm_handler):
"""
Delete a directory or file at `location` recursively. Similar to "rm -rf"
in a shell or a combo of os.remove and shutil.rmtree.
"""
if not location:
return
if os.path.exists(location) or filetype.is_broken_link(location):
chmod(os.path.dirname(location), RW, recurse=False)
if filetype.is_dir(location):
shutil.rmtree(location, False, _rm_handler)
else:
os.remove(location)
|
yashdsaraf/scancode-toolkit
|
src/commoncode/fileutils.py
|
Python
|
apache-2.0
| 18,906
|
[
"VisIt"
] |
90bbd276555c94cbe366651aa7b8ebffc1a070b27efef295490d39beb47ff647
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Privacy accountant that uses Renyi differential privacy."""
import math
from typing import Collection, Optional
import numpy as np
from scipy import special
from tensorflow_privacy.privacy.analysis import dp_event
from tensorflow_privacy.privacy.analysis import privacy_accountant
NeighborRel = privacy_accountant.NeighboringRelation
def _log_add(logx, logy):
"""Adds two numbers in the log space."""
a, b = min(logx, logy), max(logx, logy)
if a == -np.inf: # adding 0
return b
# Use exp(a) + exp(b) = (exp(a - b) + 1) * exp(b)
return math.log1p(math.exp(a - b)) + b # log1p(x) = log(x + 1)
def _log_sub(logx, logy):
"""Subtracts two numbers in the log space. Answer must be non-negative."""
if logx < logy:
raise ValueError('The result of subtraction must be non-negative.')
if logy == -np.inf: # subtracting 0
return logx
if logx == logy:
return -np.inf # 0 is represented as -np.inf in the log space.
try:
# Use exp(x) - exp(y) = (exp(x - y) - 1) * exp(y).
return math.log(math.expm1(logx - logy)) + logy # expm1(x) = exp(x) - 1
except OverflowError:
return logx
def _log_sub_sign(logx, logy):
"""Returns log(exp(logx)-exp(logy)) and its sign."""
if logx > logy:
s = True
mag = logx + np.log(1 - np.exp(logy - logx))
elif logx < logy:
s = False
mag = logy + np.log(1 - np.exp(logx - logy))
else:
s = True
mag = -np.inf
return s, mag
def _log_comb(n, k):
"""Computes log of binomial coefficient."""
return (special.gammaln(n + 1) - special.gammaln(k + 1) -
special.gammaln(n - k + 1))
def _compute_log_a_int(q, sigma, alpha):
"""Computes log(A_alpha) for integer alpha, 0 < q < 1."""
assert isinstance(alpha, int)
# Initialize with 0 in the log space.
log_a = -np.inf
for i in range(alpha + 1):
log_coef_i = (
_log_comb(alpha, i) + i * math.log(q) + (alpha - i) * math.log(1 - q))
s = log_coef_i + (i * i - i) / (2 * (sigma**2))
log_a = _log_add(log_a, s)
return float(log_a)
def _compute_log_a_frac(q, sigma, alpha):
"""Computes log(A_alpha) for fractional alpha, 0 < q < 1."""
# The two parts of A_alpha, integrals over (-inf,z0] and [z0, +inf), are
# initialized to 0 in the log space:
log_a0, log_a1 = -np.inf, -np.inf
i = 0
z0 = sigma**2 * math.log(1 / q - 1) + .5
while True: # do ... until loop
coef = special.binom(alpha, i)
log_coef = math.log(abs(coef))
j = alpha - i
log_t0 = log_coef + i * math.log(q) + j * math.log(1 - q)
log_t1 = log_coef + j * math.log(q) + i * math.log(1 - q)
log_e0 = math.log(.5) + _log_erfc((i - z0) / (math.sqrt(2) * sigma))
log_e1 = math.log(.5) + _log_erfc((z0 - j) / (math.sqrt(2) * sigma))
log_s0 = log_t0 + (i * i - i) / (2 * (sigma**2)) + log_e0
log_s1 = log_t1 + (j * j - j) / (2 * (sigma**2)) + log_e1
if coef > 0:
log_a0 = _log_add(log_a0, log_s0)
log_a1 = _log_add(log_a1, log_s1)
else:
log_a0 = _log_sub(log_a0, log_s0)
log_a1 = _log_sub(log_a1, log_s1)
i += 1
if max(log_s0, log_s1) < -30:
break
return _log_add(log_a0, log_a1)
def _log_erfc(x):
"""Computes log(erfc(x)) with high accuracy for large x."""
try:
return math.log(2) + special.log_ndtr(-x * 2**.5)
except NameError:
# If log_ndtr is not available, approximate as follows:
r = special.erfc(x)
if r == 0.0:
# Using the Laurent series at infinity for the tail of the erfc function:
# erfc(x) ~ exp(-x^2-.5/x^2+.625/x^4)/(x*pi^.5)
# To verify in Mathematica:
# Series[Log[Erfc[x]] + Log[x] + Log[Pi]/2 + x^2, {x, Infinity, 6}]
return (-math.log(math.pi) / 2 - math.log(x) - x**2 - .5 * x**-2 +
.625 * x**-4 - 37. / 24. * x**-6 + 353. / 64. * x**-8)
else:
return math.log(r)
def _compute_delta(orders, rdp, epsilon):
"""Compute delta given a list of RDP values and target epsilon.
Args:
orders: An array of orders.
rdp: An array of RDP guarantees.
epsilon: The target epsilon.
Returns:
Optimal delta.
Raises:
ValueError: If input is malformed.
"""
if epsilon < 0:
raise ValueError(f'Epsilon cannot be negative. Found {epsilon}.')
if len(orders) != len(rdp):
raise ValueError('Input lists must have the same length.')
# Basic bound (see https://arxiv.org/abs/1702.07476 Proposition 3 in v3):
# delta = min( np.exp((rdp - epsilon) * (orders - 1)) )
# Improved bound from https://arxiv.org/abs/2004.00010 Proposition 12 (in v4):
logdeltas = [] # work in log space to avoid overflows
for (a, r) in zip(orders, rdp):
if a < 1:
raise ValueError(f'Renyi divergence order must be at least 1. Found {a}.')
if r < 0:
raise ValueError(f'Renyi divergence cannot be negative. Found {r}.')
# For small alpha, we are better of with bound via KL divergence:
# delta <= sqrt(1-exp(-KL)).
# Take a min of the two bounds.
if r == 0:
logdelta = -np.inf
else:
logdelta = 0.5 * math.log1p(-math.exp(-r))
if a > 1.01:
# This bound is not numerically stable as alpha->1.
# Thus we have a min value for alpha.
# The bound is also not useful for small alpha, so doesn't matter.
rdp_bound = (a - 1) * (r - epsilon + math.log1p(-1 / a)) - math.log(a)
logdelta = min(logdelta, rdp_bound)
logdeltas.append(logdelta)
return min(math.exp(np.min(logdeltas)), 1.)
def _compute_epsilon(orders, rdp, delta):
"""Compute epsilon given a list of RDP values and target delta.
Args:
orders: An array of orders.
rdp: An array of RDP guarantees.
delta: The target delta. Must be >= 0.
Returns:
Optimal epsilon.
Raises:
ValueError: If input is malformed.
"""
if delta < 0:
raise ValueError(f'Delta cannot be negative. Found {delta}.')
if delta == 0:
if all(r == 0 for r in rdp):
return 0
else:
return np.inf
if len(orders) != len(rdp):
raise ValueError('Input lists must have the same length.')
# Basic bound (see https://arxiv.org/abs/1702.07476 Proposition 3 in v3):
# epsilon = min( rdp - math.log(delta) / (orders - 1) )
# Improved bound from https://arxiv.org/abs/2004.00010 Proposition 12 (in v4).
# Also appears in https://arxiv.org/abs/2001.05990 Equation 20 (in v1).
eps = []
for (a, r) in zip(orders, rdp):
if a < 1:
raise ValueError(f'Renyi divergence order must be at least 1. Found {a}.')
if r < 0:
raise ValueError(f'Renyi divergence cannot be negative. Found {r}.')
if delta**2 + math.expm1(-r) > 0:
# In this case, we can simply bound via KL divergence:
# delta <= sqrt(1-exp(-KL)).
epsilon = 0 # No need to try further computation if we have epsilon = 0.
elif a > 1.01:
# This bound is not numerically stable as alpha->1.
# Thus we have a min value of alpha.
# The bound is also not useful for small alpha, so doesn't matter.
epsilon = r + math.log1p(-1 / a) - math.log(delta * a) / (a - 1)
else:
# In this case we can't do anything. E.g., asking for delta = 0.
epsilon = np.inf
eps.append(epsilon)
return max(0, np.min(eps))
def _stable_inplace_diff_in_log(vec, signs, n=-1):
"""Replaces the first n-1 dims of vec with the log of abs difference operator.
Args:
vec: numpy array of floats with size larger than 'n'
signs: Optional numpy array of bools with the same size as vec in case one
needs to compute partial differences vec and signs jointly describe a
vector of real numbers' sign and abs in log scale.
n: Optonal upper bound on number of differences to compute. If negative, all
differences are computed.
Returns:
The first n-1 dimension of vec and signs will store the log-abs and sign of
the difference.
Raises:
ValueError: If input is malformed.
"""
assert vec.shape == signs.shape
if n < 0:
n = np.max(vec.shape) - 1
else:
assert np.max(vec.shape) >= n + 1
for j in range(0, n, 1):
if signs[j] == signs[j + 1]: # When the signs are the same
# if the signs are both positive, then we can just use the standard one
signs[j], vec[j] = _log_sub_sign(vec[j + 1], vec[j])
# otherwise, we do that but toggle the sign
if not signs[j + 1]:
signs[j] = ~signs[j]
else: # When the signs are different.
vec[j] = _log_add(vec[j], vec[j + 1])
signs[j] = signs[j + 1]
def _get_forward_diffs(fun, n):
"""Computes up to nth order forward difference evaluated at 0.
See Theorem 27 of https://arxiv.org/pdf/1808.00087.pdf
Args:
fun: Function to compute forward differences of.
n: Number of differences to compute.
Returns:
Pair (deltas, signs_deltas) of the log deltas and their signs.
"""
func_vec = np.zeros(n + 3)
signs_func_vec = np.ones(n + 3, dtype=bool)
# ith coordinate of deltas stores log(abs(ith order discrete derivative))
deltas = np.zeros(n + 2)
signs_deltas = np.zeros(n + 2, dtype=bool)
for i in range(1, n + 3, 1):
func_vec[i] = fun(1.0 * (i - 1))
for i in range(0, n + 2, 1):
# Diff in log scale
_stable_inplace_diff_in_log(func_vec, signs_func_vec, n=n + 2 - i)
deltas[i] = func_vec[0]
signs_deltas[i] = signs_func_vec[0]
return deltas, signs_deltas
def _compute_log_a(q, noise_multiplier, alpha):
if float(alpha).is_integer():
return _compute_log_a_int(q, noise_multiplier, int(alpha))
else:
return _compute_log_a_frac(q, noise_multiplier, alpha)
def _compute_rdp_poisson_subsampled_gaussian(q, noise_multiplier, orders):
"""Computes RDP of the Poisson sampled Gaussian mechanism.
Args:
q: The sampling rate.
noise_multiplier: The ratio of the standard deviation of the Gaussian noise
to the l2-sensitivity of the function to which it is added.
orders: An array of RDP orders.
Returns:
The RDPs at all orders. Can be `np.inf`.
"""
def compute_one_order(q, alpha):
if np.isinf(alpha) or noise_multiplier == 0:
return np.inf
if q == 0:
return 0
if q == 1.:
return alpha / (2 * noise_multiplier**2)
return _compute_log_a(q, noise_multiplier, alpha) / (alpha - 1)
return np.array([compute_one_order(q, order) for order in orders])
def _compute_rdp_sample_wor_gaussian(q, noise_multiplier, orders):
"""Computes RDP of Gaussian mechanism using sampling without replacement.
This function applies to the following schemes:
1. Sampling w/o replacement: Sample a uniformly random subset of size m = q*n.
2. ``Replace one data point'' version of differential privacy, i.e., n is
considered public information.
Reference: Theorem 27 of https://arxiv.org/pdf/1808.00087.pdf (A strengthened
version applies subsampled-Gaussian mechanism.)
- Wang, Balle, Kasiviswanathan. "Subsampled Renyi Differential Privacy and
Analytical Moments Accountant." AISTATS'2019.
Args:
q: The sampling proportion = m / n. Assume m is an integer <= n.
noise_multiplier: The ratio of the standard deviation of the Gaussian noise
to the l2-sensitivity of the function to which it is added.
orders: An array of RDP orders.
Returns:
The RDPs at all orders, can be np.inf.
"""
return np.array([
_compute_rdp_sample_wor_gaussian_scalar(q, noise_multiplier, order)
for order in orders
])
def _compute_rdp_sample_wor_gaussian_scalar(q, sigma, alpha):
"""Compute RDP of the Sampled Gaussian mechanism at order alpha.
Args:
q: The sampling proportion = m / n. Assume m is an integer <= n.
sigma: The std of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
RDP at alpha, can be np.inf.
"""
assert (q <= 1) and (q >= 0) and (alpha >= 1)
if q == 0:
return 0
if q == 1.:
return alpha / (2 * sigma**2)
if np.isinf(alpha):
return np.inf
if float(alpha).is_integer():
return _compute_rdp_sample_wor_gaussian_int(q, sigma, int(alpha)) / (
alpha - 1)
else:
# When alpha not an integer, we apply Corollary 10 of [WBK19] to interpolate
# the CGF and obtain an upper bound
alpha_f = math.floor(alpha)
alpha_c = math.ceil(alpha)
x = _compute_rdp_sample_wor_gaussian_int(q, sigma, alpha_f)
y = _compute_rdp_sample_wor_gaussian_int(q, sigma, alpha_c)
t = alpha - alpha_f
return ((1 - t) * x + t * y) / (alpha - 1)
def _compute_rdp_sample_wor_gaussian_int(q, sigma, alpha):
"""Compute log(A_alpha) for integer alpha, subsampling without replacement.
When alpha is smaller than max_alpha, compute the bound Theorem 27 exactly,
otherwise compute the bound with Stirling approximation.
Args:
q: The sampling proportion = m / n. Assume m is an integer <= n.
sigma: The std of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
RDP at alpha, can be np.inf.
"""
max_alpha = 256
assert isinstance(alpha, int)
if np.isinf(alpha):
return np.inf
elif alpha == 1:
return 0
def cgf(x):
# Return rdp(x+1)*x, the rdp of Gaussian mechanism is alpha/(2*sigma**2)
return x * 1.0 * (x + 1) / (2.0 * sigma**2)
def func(x):
# Return the rdp of Gaussian mechanism
return 1.0 * x / (2.0 * sigma**2)
# Initialize with 1 in the log space.
log_a = 0
# Calculates the log term when alpha = 2
log_f2m1 = func(2.0) + np.log(1 - np.exp(-func(2.0)))
if alpha <= max_alpha:
# We need forward differences of exp(cgf)
# The following line is the numerically stable way of implementing it.
# The output is in polar form with logarithmic magnitude
deltas, _ = _get_forward_diffs(cgf, alpha)
# Compute the bound exactly requires book keeping of O(alpha**2)
for i in range(2, alpha + 1):
if i == 2:
s = 2 * np.log(q) + _log_comb(alpha, 2) + np.minimum(
np.log(4) + log_f2m1,
func(2.0) + np.log(2))
elif i > 2:
delta_lo = deltas[int(2 * np.floor(i / 2.0)) - 1]
delta_hi = deltas[int(2 * np.ceil(i / 2.0)) - 1]
s = np.log(4) + 0.5 * (delta_lo + delta_hi)
s = np.minimum(s, np.log(2) + cgf(i - 1))
s += i * np.log(q) + _log_comb(alpha, i)
log_a = _log_add(log_a, s)
return float(log_a)
else:
# Compute the bound with stirling approximation. Everything is O(x) now.
for i in range(2, alpha + 1):
if i == 2:
s = 2 * np.log(q) + _log_comb(alpha, 2) + np.minimum(
np.log(4) + log_f2m1,
func(2.0) + np.log(2))
else:
s = np.log(2) + cgf(i - 1) + i * np.log(q) + _log_comb(alpha, i)
log_a = _log_add(log_a, s)
return log_a
def _effective_gaussian_noise_multiplier(event: dp_event.DpEvent):
"""Determines the effective noise multiplier of nested structure of Gaussians.
A series of Gaussian queries on the same data can be reexpressed as a single
query with pre- and post- processing. For details, see section 3 of
https://arxiv.org/pdf/1812.06210.pdf.
Args:
event: A `dp_event.DpEvent`. In order for conversion to be successful it
must consist of a single `dp_event.GaussianDpEvent`, or a nested structure
of `dp_event.ComposedDpEvent` and/or `dp_event.SelfComposedDpEvent`
bottoming out in `dp_event.GaussianDpEvent`s.
Returns:
The noise multiplier of the equivalent `dp_event.GaussianDpEvent`, or None
if the input event was not a `dp_event.GaussianDpEvent` or a nested
structure of `dp_event.ComposedDpEvent` and/or
`dp_event.SelfComposedDpEvent` bottoming out in `dp_event.GaussianDpEvent`s.
"""
if isinstance(event, dp_event.GaussianDpEvent):
return event.noise_multiplier
elif isinstance(event, dp_event.ComposedDpEvent):
sum_sigma_inv_sq = 0
for e in event.events:
sigma = _effective_gaussian_noise_multiplier(e)
if sigma is None:
return None
sum_sigma_inv_sq += sigma**-2
return sum_sigma_inv_sq**-0.5
elif isinstance(event, dp_event.SelfComposedDpEvent):
sigma = _effective_gaussian_noise_multiplier(event.event)
return None if sigma is None else (event.count * sigma**-2)**-0.5
else:
return None
class RdpAccountant(privacy_accountant.PrivacyAccountant):
"""Privacy accountant that uses Renyi differential privacy."""
def __init__(
self,
orders: Optional[Collection[float]] = None,
neighboring_relation: NeighborRel = NeighborRel.ADD_OR_REMOVE_ONE,
):
super().__init__(neighboring_relation)
if orders is None:
# Default orders chosen to give good coverage for Gaussian mechanism in
# the privacy regime of interest. In the future, more orders might be
# added, in particular, fractional orders between 1.0 and 10.0 or so.
orders = [
2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 20, 24, 28, 32, 48, 64, 128,
256, 512, 1024
]
self._orders = np.array(orders)
self._rdp = np.zeros_like(orders, dtype=np.float64)
def supports(self, event: dp_event.DpEvent) -> bool:
return self._maybe_compose(event, 0, False)
def _compose(self, event: dp_event.DpEvent, count: int = 1):
self._maybe_compose(event, count, True)
def _maybe_compose(self, event: dp_event.DpEvent, count: int,
do_compose: bool) -> bool:
"""Traverses `event` and performs composition if `do_compose` is True.
If `do_compose` is False, can be used to check whether composition is
supported.
Args:
event: A `DpEvent` to process.
count: The number of times to compose the event.
do_compose: Whether to actually perform the composition.
Returns:
True if event is supported, otherwise False.
"""
if isinstance(event, dp_event.NoOpDpEvent):
return True
elif isinstance(event, dp_event.NonPrivateDpEvent):
if do_compose:
self._rdp += np.inf
return True
elif isinstance(event, dp_event.SelfComposedDpEvent):
return self._maybe_compose(event.event, event.count * count, do_compose)
elif isinstance(event, dp_event.ComposedDpEvent):
return all(
self._maybe_compose(e, count, do_compose) for e in event.events)
elif isinstance(event, dp_event.GaussianDpEvent):
if do_compose:
self._rdp += count * _compute_rdp_poisson_subsampled_gaussian(
q=1.0, noise_multiplier=event.noise_multiplier, orders=self._orders)
return True
elif isinstance(event, dp_event.PoissonSampledDpEvent):
if self._neighboring_relation is not NeighborRel.ADD_OR_REMOVE_ONE:
return False
gaussian_noise_multiplier = _effective_gaussian_noise_multiplier(
event.event)
if gaussian_noise_multiplier is None:
return False
if do_compose:
self._rdp += count * _compute_rdp_poisson_subsampled_gaussian(
q=event.sampling_probability,
noise_multiplier=gaussian_noise_multiplier,
orders=self._orders)
return True
elif isinstance(event, dp_event.SampledWithoutReplacementDpEvent):
if self._neighboring_relation is not NeighborRel.REPLACE_ONE:
return False
gaussian_noise_multiplier = _effective_gaussian_noise_multiplier(
event.event)
if gaussian_noise_multiplier is None:
return False
if do_compose:
self._rdp += count * _compute_rdp_sample_wor_gaussian(
q=event.sample_size / event.source_dataset_size,
noise_multiplier=gaussian_noise_multiplier,
orders=self._orders)
return True
else:
# Unsupported event (including `UnsupportedDpEvent`).
return False
def get_epsilon(self, target_delta: float) -> float:
return _compute_epsilon(self._orders, self._rdp, target_delta)
def get_delta(self, target_epsilon: float) -> float:
return _compute_delta(self._orders, self._rdp, target_epsilon)
|
tensorflow/privacy
|
tensorflow_privacy/privacy/analysis/rdp_privacy_accountant.py
|
Python
|
apache-2.0
| 20,608
|
[
"Gaussian"
] |
924c09bbfd4d8602085a9757a393353163b13baf743f7103832cfa95f7e43d96
|
"""Bayesian Gaussian Mixture Models and
Dirichlet Process Gaussian Mixture Models"""
from __future__ import print_function
# Author: Alexandre Passos (alexandre.tp@gmail.com)
# Bertrand Thirion <bertrand.thirion@inria.fr>
#
# Based on mixture.py by:
# Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# Important note for the deprecation cleaning of 0.20 :
# All the function and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_dpgmm.py'
# - 'sklearn/mixture/test_gmm.py'
import numpy as np
from scipy.special import digamma as _digamma, gammaln as _gammaln
from scipy import linalg
from scipy.linalg import pinvh
from scipy.spatial.distance import cdist
from ..externals.six.moves import xrange
from ..utils import check_random_state, check_array, deprecated
from ..utils.fixes import logsumexp
from ..utils.extmath import squared_norm, stable_cumsum
from ..utils.validation import check_is_fitted
from .. import cluster
from .gmm import _GMMBase
@deprecated("The function digamma is deprecated in 0.18 and "
"will be removed in 0.20. Use scipy.special.digamma instead.")
def digamma(x):
return _digamma(x + np.finfo(np.float32).eps)
@deprecated("The function gammaln is deprecated in 0.18 and "
"will be removed in 0.20. Use scipy.special.gammaln instead.")
def gammaln(x):
return _gammaln(x + np.finfo(np.float32).eps)
@deprecated("The function log_normalize is deprecated in 0.18 and "
"will be removed in 0.20.")
def log_normalize(v, axis=0):
"""Normalized probabilities from unnormalized log-probabilites"""
v = np.rollaxis(v, axis)
v = v.copy()
v -= v.max(axis=0)
out = logsumexp(v)
v = np.exp(v - out)
v += np.finfo(np.float32).eps
v /= np.sum(v, axis=0)
return np.swapaxes(v, 0, axis)
@deprecated("The function wishart_log_det is deprecated in 0.18 and "
"will be removed in 0.20.")
def wishart_log_det(a, b, detB, n_features):
"""Expected value of the log of the determinant of a Wishart
The expected value of the logarithm of the determinant of a
wishart-distributed random variable with the specified parameters."""
l = np.sum(digamma(0.5 * (a - np.arange(-1, n_features - 1))))
l += n_features * np.log(2)
return l + detB
@deprecated("The function wishart_logz is deprecated in 0.18 and "
"will be removed in 0.20.")
def wishart_logz(v, s, dets, n_features):
"The logarithm of the normalization constant for the wishart distribution"
z = 0.
z += 0.5 * v * n_features * np.log(2)
z += (0.25 * (n_features * (n_features - 1)) * np.log(np.pi))
z += 0.5 * v * np.log(dets)
z += np.sum(gammaln(0.5 * (v - np.arange(n_features) + 1)))
return z
def _bound_wishart(a, B, detB):
"""Returns a function of the dof, scale matrix and its determinant
used as an upper bound in variational approximation of the evidence"""
n_features = B.shape[0]
logprior = wishart_logz(a, B, detB, n_features)
logprior -= wishart_logz(n_features,
np.identity(n_features),
1, n_features)
logprior += 0.5 * (a - 1) * wishart_log_det(a, B, detB, n_features)
logprior += 0.5 * a * np.trace(B)
return logprior
##############################################################################
# Variational bound on the log likelihood of each class
##############################################################################
def _sym_quad_form(x, mu, A):
"""helper function to calculate symmetric quadratic form x.T * A * x"""
q = (cdist(x, mu[np.newaxis], "mahalanobis", VI=A) ** 2).reshape(-1)
return q
def _bound_state_log_lik(X, initial_bound, precs, means, covariance_type):
"""Update the bound with likelihood terms, for standard covariance types"""
n_components, n_features = means.shape
n_samples = X.shape[0]
bound = np.empty((n_samples, n_components))
bound[:] = initial_bound
if covariance_type in ['diag', 'spherical']:
for k in range(n_components):
d = X - means[k]
bound[:, k] -= 0.5 * np.sum(d * d * precs[k], axis=1)
elif covariance_type == 'tied':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs)
elif covariance_type == 'full':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs[k])
return bound
class _DPGMMBase(_GMMBase):
"""Variational Inference for the Infinite Gaussian Mixture Model.
DPGMM stands for Dirichlet Process Gaussian Mixture Model, and it
is an infinite mixture model with the Dirichlet Process as a prior
distribution on the number of clusters. In practice the
approximate inference algorithm uses a truncated distribution with
a fixed maximum number of components, but almost always the number
of components actually used depends on the data.
Stick-breaking Representation of a Gaussian mixture model
probability distribution. This class allows for easy and efficient
inference of an approximate posterior distribution over the
parameters of a Gaussian mixture model with a variable number of
components (smaller than the truncation parameter n_components).
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <dpgmm>`.
Parameters
----------
n_components : int, default 1
Number of mixture components.
covariance_type : string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha : float, default 1
Real number representing the concentration parameter of
the dirichlet process. Intuitively, the Dirichlet Process
is as likely to start a new cluster for a point as it is
to add that point to a cluster with alpha elements. A
higher alpha means more clusters, as the expected number
of clusters is ``alpha*log(N)``.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_components : int
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
VBGMM : Finite Gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0, min_covar=None,
n_iter=10, params='wmc', init_params='wmc'):
self.alpha = alpha
super(_DPGMMBase, self).__init__(n_components, covariance_type,
random_state=random_state,
tol=tol, min_covar=min_covar,
n_iter=n_iter, params=params,
init_params=init_params,
verbose=verbose)
def _get_precisions(self):
"""Return precisions as a full matrix."""
if self.covariance_type == 'full':
return self.precs_
elif self.covariance_type in ['diag', 'spherical']:
return [np.diag(cov) for cov in self.precs_]
elif self.covariance_type == 'tied':
return [self.precs_] * self.n_components
def _get_covars(self):
return [pinvh(c) for c in self._get_precisions()]
def _set_covars(self, covars):
raise NotImplementedError("""The variational algorithm does
not support setting the covariance parameters.""")
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
z = np.zeros((X.shape[0], self.n_components))
sd = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dgamma1 = digamma(self.gamma_.T[1]) - sd
dgamma2 = np.zeros(self.n_components)
dgamma2[0] = digamma(self.gamma_[0, 2]) - digamma(self.gamma_[0, 1] +
self.gamma_[0, 2])
for j in range(1, self.n_components):
dgamma2[j] = dgamma2[j - 1] + digamma(self.gamma_[j - 1, 2])
dgamma2[j] -= sd[j - 1]
dgamma = dgamma1 + dgamma2
# Free memory and developers cognitive load:
del dgamma1, dgamma2, sd
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dgamma
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
"""Update the concentration parameters for each cluster"""
sz = np.sum(z, axis=0)
self.gamma_.T[1] = 1. + sz
self.gamma_.T[2].fill(0)
for i in range(self.n_components - 2, -1, -1):
self.gamma_[i, 2] = self.gamma_[i + 1, 2] + sz[i]
self.gamma_.T[2] += self.alpha
def _update_means(self, X, z):
"""Update the variational distributions for the means"""
n_features = X.shape[1]
for k in range(self.n_components):
if self.covariance_type in ['spherical', 'diag']:
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num *= self.precs_[k]
den = 1. + self.precs_[k] * np.sum(z.T[k])
self.means_[k] = num / den
elif self.covariance_type in ['tied', 'full']:
if self.covariance_type == 'tied':
cov = self.precs_
else:
cov = self.precs_[k]
den = np.identity(n_features) + cov * np.sum(z.T[k])
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num = np.dot(cov, num)
self.means_[k] = linalg.lstsq(den, num)[0]
def _update_precisions(self, X, z):
"""Update the variational distributions for the precisions"""
n_features = X.shape[1]
if self.covariance_type == 'spherical':
self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
for k in range(self.n_components):
# could be more memory efficient ?
sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
self.scale_[k] = 1.
self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
self.bound_prec_[k] = (
0.5 * n_features * (
digamma(self.dof_[k]) - np.log(self.scale_[k])))
self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T
elif self.covariance_type == 'diag':
for k in range(self.n_components):
self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
sq_diff = (X - self.means_[k]) ** 2 # see comment above
self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
z.T[k], (sq_diff + 1))
self.precs_[k] = self.dof_[k] / self.scale_[k]
self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
- np.log(self.scale_[k]))
self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])
elif self.covariance_type == 'tied':
self.dof_ = 2 + X.shape[0] + n_features
self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
for k in range(self.n_components):
diff = X - self.means_[k]
self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_ = pinvh(self.scale_)
self.precs_ = self.dof_ * self.scale_
self.det_scale_ = linalg.det(self.scale_)
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
sum_resp = np.sum(z.T[k])
self.dof_[k] = 2 + sum_resp + n_features
self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
diff = X - self.means_[k]
self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_[k] = pinvh(self.scale_[k])
self.precs_[k] = self.dof_[k] * self.scale_[k]
self.det_scale_[k] = linalg.det(self.scale_[k])
self.bound_prec_[k] = 0.5 * wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
self.scale_[k])
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_.T[1])
print("covariance_type:", self.covariance_type)
def _do_mstep(self, X, z, params):
"""Maximize the variational lower bound
Update each of the parameters to maximize the lower bound."""
self._monitor(X, z, "z")
self._update_concentration(z)
self._monitor(X, z, "gamma")
if 'm' in params:
self._update_means(X, z)
self._monitor(X, z, "mu")
if 'c' in params:
self._update_precisions(X, z)
self._monitor(X, z, "a and b", end=True)
def _initialize_gamma(self):
"Initializes the concentration parameters"
self.gamma_ = self.alpha * np.ones((self.n_components, 3))
def _bound_concentration(self):
"""The variational lower bound for the concentration parameter."""
logprior = gammaln(self.alpha) * self.n_components
logprior += np.sum((self.alpha - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior += np.sum(- gammaln(self.gamma_.T[1] + self.gamma_.T[2]))
logprior += np.sum(gammaln(self.gamma_.T[1]) +
gammaln(self.gamma_.T[2]))
logprior -= np.sum((self.gamma_.T[1] - 1) * (
digamma(self.gamma_.T[1]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior -= np.sum((self.gamma_.T[2] - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
return logprior
def _bound_means(self):
"The variational lower bound for the mean parameters"
logprior = 0.
logprior -= 0.5 * squared_norm(self.means_)
logprior -= 0.5 * self.means_.shape[1] * self.n_components
return logprior
def _bound_precisions(self):
"""Returns the bound term related to precisions"""
logprior = 0.
if self.covariance_type == 'spherical':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_
- self.precs_[:, 0])
elif self.covariance_type == 'diag':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_ - self.precs_)
elif self.covariance_type == 'tied':
logprior += _bound_wishart(self.dof_, self.scale_, self.det_scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
logprior += _bound_wishart(self.dof_[k],
self.scale_[k],
self.det_scale_[k])
return logprior
def _bound_proportions(self, z):
"""Returns the bound term related to proportions"""
dg12 = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dg1 = digamma(self.gamma_.T[1]) - dg12
dg2 = digamma(self.gamma_.T[2]) - dg12
cz = stable_cumsum(z[:, ::-1], axis=-1)[:, -2::-1]
logprior = np.sum(cz * dg2[:-1]) + np.sum(z * dg1)
del cz # Save memory
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _logprior(self, z):
logprior = self._bound_concentration()
logprior += self._bound_means()
logprior += self._bound_precisions()
logprior += self._bound_proportions(z)
return logprior
def lower_bound(self, X, z):
"""returns a lower bound on model evidence based on X and membership"""
check_is_fitted(self, 'means_')
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
c = np.sum(z * _bound_state_log_lik(X, self._initial_bound +
self.bound_prec_, self.precs_,
self.means_, self.covariance_type))
return c + self._logprior(z)
def _set_weights(self):
for i in xrange(self.n_components):
self.weights_[i] = self.gamma_[i, 1] / (self.gamma_[i, 1]
+ self.gamma_[i, 2])
self.weights_ /= np.sum(self.weights_)
def _fit(self, X, y=None):
"""Estimate model parameters with the variational
algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating
the object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
self.random_state_ = check_random_state(self.random_state)
# initialization step
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
z = np.ones((n_samples, self.n_components))
z /= self.n_components
self._initial_bound = - 0.5 * n_features * np.log(2 * np.pi)
self._initial_bound -= np.log(2 * np.pi * np.e)
if (self.init_params != '') or not hasattr(self, 'gamma_'):
self._initialize_gamma()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state_).fit(X).cluster_centers_[::-1]
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components, self.n_components)
if 'c' in self.init_params or not hasattr(self, 'precs_'):
if self.covariance_type == 'spherical':
self.dof_ = np.ones(self.n_components)
self.scale_ = np.ones(self.n_components)
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * n_features * (
digamma(self.dof_) - np.log(self.scale_))
elif self.covariance_type == 'diag':
self.dof_ = 1 + 0.5 * n_features
self.dof_ *= np.ones((self.n_components, n_features))
self.scale_ = np.ones((self.n_components, n_features))
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * (np.sum(digamma(self.dof_) -
np.log(self.scale_), 1))
self.bound_prec_ -= 0.5 * np.sum(self.precs_, 1)
elif self.covariance_type == 'tied':
self.dof_ = 1.
self.scale_ = np.identity(n_features)
self.precs_ = np.identity(n_features)
self.det_scale_ = 1.
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
self.dof_ = (1 + self.n_components + n_samples)
self.dof_ *= np.ones(self.n_components)
self.scale_ = [2 * np.identity(n_features)
for _ in range(self.n_components)]
self.precs_ = [np.identity(n_features)
for _ in range(self.n_components)]
self.det_scale_ = np.ones(self.n_components)
self.bound_prec_ = np.zeros(self.n_components)
for k in range(self.n_components):
self.bound_prec_[k] = wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= (self.dof_[k] *
np.trace(self.scale_[k]))
self.bound_prec_ *= 0.5
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
prev_log_likelihood = current_log_likelihood
# Expectation step
curr_logprob, z = self.score_samples(X)
current_log_likelihood = (
curr_logprob.mean() + self._logprior(z) / n_samples)
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if change < self.tol:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, z, self.params)
if self.n_iter == 0:
# Need to make sure that there is a z value to output
# Output zeros because it was just a quick initialization
z = np.zeros((X.shape[0], self.n_components))
self._set_weights()
return z
@deprecated("The `DPGMM` class is not working correctly and it's better "
"to use `sklearn.mixture.BayesianGaussianMixture` class with "
"parameter `weight_concentration_prior_type='dirichlet_process'` "
"instead. DPGMM is deprecated in 0.18 and will be "
"removed in 0.20.")
class DPGMM(_DPGMMBase):
"""Dirichlet Process Gaussian Mixture Models
.. deprecated:: 0.18
This class will be removed in 0.20.
Use :class:`sklearn.mixture.BayesianGaussianMixture` with
parameter ``weight_concentration_prior_type='dirichlet_process'``
instead.
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0, min_covar=None,
n_iter=10, params='wmc', init_params='wmc'):
super(DPGMM, self).__init__(
n_components=n_components, covariance_type=covariance_type,
alpha=alpha, random_state=random_state, tol=tol, verbose=verbose,
min_covar=min_covar, n_iter=n_iter, params=params,
init_params=init_params)
@deprecated("The `VBGMM` class is not working correctly and it's better "
"to use `sklearn.mixture.BayesianGaussianMixture` class with "
"parameter `weight_concentration_prior_type="
"'dirichlet_distribution'` instead. "
"VBGMM is deprecated in 0.18 and will be removed in 0.20.")
class VBGMM(_DPGMMBase):
"""Variational Inference for the Gaussian Mixture Model
.. deprecated:: 0.18
This class will be removed in 0.20.
Use :class:`sklearn.mixture.BayesianGaussianMixture` with parameter
``weight_concentration_prior_type='dirichlet_distribution'`` instead.
Variational inference for a Gaussian mixture model probability
distribution. This class allows for easy and efficient inference
of an approximate posterior distribution over the parameters of a
Gaussian mixture model with a fixed number of components.
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <vbgmm>`.
Parameters
----------
n_components : int, default 1
Number of mixture components.
covariance_type : string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha : float, default 1
Real number representing the concentration parameter of
the dirichlet distribution. Intuitively, the higher the
value of alpha the more likely the variational mixture of
Gaussians model will use all components it can.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussians.
n_components : int (read-only)
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False
otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
DPGMM : Infinite Gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0,
min_covar=None, n_iter=10, params='wmc', init_params='wmc'):
super(VBGMM, self).__init__(
n_components, covariance_type, random_state=random_state,
tol=tol, verbose=verbose, min_covar=min_covar,
n_iter=n_iter, params=params, init_params=init_params)
self.alpha = alpha
def _fit(self, X, y=None):
"""Estimate model parameters with the variational algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the EM
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating
the object. Likewise, if you just would like to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
self.alpha_ = float(self.alpha) / self.n_components
return super(VBGMM, self)._fit(X, y)
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
dg = digamma(self.gamma_) - digamma(np.sum(self.gamma_))
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dg
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
for i in range(self.n_components):
self.gamma_[i] = self.alpha_ + np.sum(z.T[i])
def _initialize_gamma(self):
self.gamma_ = self.alpha_ * np.ones(self.n_components)
def _bound_proportions(self, z):
logprior = 0.
dg = digamma(self.gamma_)
dg -= digamma(np.sum(self.gamma_))
logprior += np.sum(dg.reshape((-1, 1)) * z.T)
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _bound_concentration(self):
logprior = 0.
logprior = gammaln(np.sum(self.gamma_)) - gammaln(self.n_components
* self.alpha_)
logprior -= np.sum(gammaln(self.gamma_) - gammaln(self.alpha_))
sg = digamma(np.sum(self.gamma_))
logprior += np.sum((self.gamma_ - self.alpha_)
* (digamma(self.gamma_) - sg))
return logprior
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_)
print("covariance_type:", self.covariance_type)
def _set_weights(self):
self.weights_[:] = self.gamma_
self.weights_ /= np.sum(self.weights_)
|
MechCoder/scikit-learn
|
sklearn/mixture/dpgmm.py
|
Python
|
bsd-3-clause
| 35,901
|
[
"Gaussian"
] |
bddfe03e1ba3526cbbcc9aae9f1612d460a053726d248f1bec82288294b17452
|
import matplotlib
matplotlib.use('Agg')
import numpy as np
import netCDF4
from datetime import datetime
import pyroms
import pyroms_toolbox
import sys
def create_HYCOM_file(name, time, lon, lat, var):
#create netCDF file
nc = netCDF4.Dataset(name, 'w', format='NETCDF3_64BIT')
nc.Author = sys._getframe().f_code.co_name
nc.Created = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
nc.title = 'HYCOM + NCODA Global 1/12 Analysis (GLBa0.08)'
#create dimensions
Mp, Lp = lon.shape
nc.createDimension('lon', Lp)
nc.createDimension('lat', Mp)
nc.createDimension('ocean_time', None)
#create variables
nc.createVariable('lon', 'f', ('lat', 'lon'))
nc.variables['lon'].long_name = 'longitude'
nc.variables['lon'].units = 'degrees_east'
nc.variables['lon'][:] = lon
nc.createVariable('lat', 'f', ('lat', 'lon'))
nc.variables['lat'].long_name = 'latitude'
nc.variables['lat'].units = 'degrees_north'
nc.variables['lat'][:] = lat
nc.createVariable('ocean_time', 'f', ('ocean_time'))
nc.variables['ocean_time'].units = 'days since 1900-01-01 00:00:00'
nc.variables['ocean_time'].calendar = 'LEAP'
nc.variables['ocean_time'][0] = time
nc.createVariable(outvarname, 'f', ('ocean_time', 'lat', 'lon'), fill_value=spval)
nc.variables[outvarname].long_name = long_name
nc.variables[outvarname].units = units
nc.variables[outvarname].coordinates = 'lon lat'
nc.variables[outvarname][0] = var
nc.close()
print 'Done with file %s' %name
# get HYCOM Northeast Pacific data from 2007 to 2009
year = 2014
retry='True'
invarname = 'ssh'
outvarname = 'ssh'
#read grid and variable attributes from the first file
url='http://tds.hycom.org/thredds/dodsC/datasets/GLBa0.08/expt_90.6/2009/2d/archv.2009_001_00_2d.nc'
dataset = netCDF4.Dataset(url)
lon = dataset.variables['Longitude'][2100:,550:4040]
lat = dataset.variables['Latitude'][2100:,550:4040]
#spval = dataset.variables[invarname]._FillValue
units = dataset.variables[invarname].units
long_name = dataset.variables[invarname].long_name
dataset.close()
retry_day = []
# loop over daily files
if year%4 == 0:
daysinyear = 366
else:
daysinyear = 365
daysinyear = 94
for day in range(1,daysinyear+1):
#for day in range(95,daysinyear+1):
print 'Processing file for day %03d, year %04d' %(day, year)
url='http://tds.hycom.org/thredds/dodsC/datasets/GLBa0.08/expt_91.0/2014/2d/archv.%04d_%03d_00_2d.nc' %(year,day)
# url='http://tds.hycom.org/thredds/dodsC/datasets/GLBa0.08/expt_91.1/2014/2d/archv.%04d_%03d_00_2d.nc' %(year,day)
#get data from server
try:
dataset = netCDF4.Dataset(url)
var = dataset.variables[invarname][0,2100:,550:4040]
spval = var.get_fill_value()
dataset.close()
except:
print 'No file on the server... We skip this day.'
retry_day.append(day)
continue
#create netCDF file
outfile = 'data/HYCOM_GLBa0.08_%s_%04d_%03d.nc' %(outvarname,year,day)
jday = pyroms_toolbox.date2jday(datetime(year, 1, 1)) + day - 1
create_HYCOM_file(outfile, jday, lon, lat, var)
if retry == 'True':
if len(retry_day) != 0:
print "Some file have not been downloded... Let's try again"
while len(retry_day) != 0:
for day in retry_day:
print 'Retry file for day %03d, year %04d' %(day, year)
url='http://tds.hycom.org/thredds/dodsC/datasets/GLBa0.08/expt_91.0/2014/2d/archv.%04d_%03d_00_2d.nc' %(year,day)
# url='http://tds.hycom.org/thredds/dodsC/datasets/GLBa0.08/expt_91.1/2014/2d/archv.%04d_%03d_00_2d.nc' %(year,day)
#get data from server
try:
dataset = netCDF4.Dataset(url)
var = dataset.variables[invarname][0,2100:,550:4040]
spval = var.get_fill_value()
dataset.close()
except:
print 'No file on the server... We skip this day.'
continue
#create netCDF file
outfile = 'data/HYCOM_GLBa0.08_%s_%04d_%03d.nc' %(outvarname,year,day)
jday = pyroms_toolbox.date2jday(datetime(year, 1, 1)) + day - 1
create_HYCOM_file(outfile, jday, lon, lat, var)
retry_day.remove(day)
|
dcherian/pyroms
|
examples/Arctic_HYCOM/get_hycom_GLBa0.08_ssh_2014.py
|
Python
|
bsd-3-clause
| 4,297
|
[
"NetCDF"
] |
2f9d8da07c1c826296b7ea7efdd629fb542a5f3f194cbdc5067da87ab9b81b66
|
import numpy as np
from ase.optimize.optimize import Optimizer
class MDMin(Optimizer):
def __init__(self, atoms, restart=None, logfile='-', trajectory=None,
dt=None):
Optimizer.__init__(self, atoms, restart, logfile, trajectory)
if dt is not None:
self.dt = dt
def initialize(self):
self.v = None
self.dt = 0.2
def read(self):
self.v, self.dt = self.load()
def step(self, f):
atoms = self.atoms
if self.v is None:
self.v = np.zeros((len(atoms), 3))
else:
self.v += 0.5 * self.dt * f
# Correct velocities:
vf = np.vdot(self.v, f)
if vf < 0.0:
self.v[:] = 0.0
else:
self.v[:] = f * vf / np.vdot(f, f)
self.v += 0.5 * self.dt * f
r = atoms.get_positions()
atoms.set_positions(r + self.dt * self.v)
self.dump((self.v, self.dt))
|
grhawk/ASE
|
tools/ase/optimize/mdmin.py
|
Python
|
gpl-2.0
| 986
|
[
"ASE"
] |
0f3be96a2dc108cd79c1e4000ebf7b9d5d1659c315b605546e8069a02296d5ed
|
import logging
import logging.config
import struct
import threading
import traceback
import signal
from ambercommon.common import runtime
import os
from amberdriver.common import drivermsg_pb2
__author__ = 'paoolo'
LEN_SIZE = 2
LOGGER_NAME = 'AmberPipes'
pwd = os.path.dirname(os.path.abspath(__file__))
logging.config.fileConfig('%s/amber.ini' % pwd)
class AmberException(Exception):
def __init__(self, message=None, cause=None):
super(AmberException, self).__init__(message + u', caused by ' + repr(cause))
self.cause = cause
class AmberPipes(object):
def __init__(self, message_handler, pipe_in, pipe_out):
self.__message_handler = message_handler
self.__pipe_in, self.__pipe_out = pipe_in, pipe_out
self.__is_alive = True
self.__write_lock = threading.Lock()
self.__logger = logging.getLogger(LOGGER_NAME)
runtime.add_shutdown_hook(self.terminate)
def __call__(self, *args, **kwargs):
self.__logger.info('Pipes thread started.')
self.__amber_pipes_loop()
def is_alive(self):
return self.__is_alive
def __amber_pipes_loop(self):
try:
while self.__is_alive:
header, message = self.__read_header_and_message_from_pipe()
self.__handle_header_and_message(header, message)
except struct.error:
self.__logger.warning('amber_pipes: stop due to error on pipe with mediator')
self.__is_alive = False
os.kill(os.getpid(), signal.SIGTERM)
self.__logger.warning('amber_pipes: stop')
def __read_header_and_message_from_pipe(self):
"""
Read and parse header and message from pipe.
:return: header and message
"""
header = drivermsg_pb2.DriverHdr()
message = drivermsg_pb2.DriverMsg()
header = self.__read_data_from_pipe(header)
message = self.__read_data_from_pipe(message)
return header, message
def __read_data_from_pipe(self, container):
data = self.__read_and_unpack_data_from_pipe(LEN_SIZE)
container.ParseFromString(data)
return container
def __read_and_unpack_data_from_pipe(self, size):
"""
Read and unpack data from pipe.
:param size: size of length data
:return: binary string
"""
data = self.__read_from_pipe(size)
# FIXME: can generate error, why?
size = struct.unpack('!h', data)
data = self.__read_from_pipe(size[0])
return data
def __read_from_pipe(self, size):
"""
Read binary string from pipe.
:param size: size of read string
:return: binary string
"""
return self.__pipe_in.read(size)
def __handle_header_and_message(self, header, message):
"""
Handle any message. Not serviced message are PONG and DRIVER_DIED.
:param header: object of DriverHdr
:param message: object of DriverMsg
:return: nothing
"""
if message.type == drivermsg_pb2.DriverMsg.DATA:
self.__logger.debug('Received DATA message')
self.__message_handler.handle_data_message(header, message)
elif message.type == drivermsg_pb2.DriverMsg.SUBSCRIBE:
self.__logger.debug('Received SUBSCRIBE message')
self.__message_handler.handle_subscribe_message(header, message)
elif message.type == drivermsg_pb2.DriverMsg.UNSUBSCRIBE:
self.__logger.debug('Received UNSUBSCRIBE message')
self.__message_handler.handle_unsubscribe_message(header, message)
elif message.type == drivermsg_pb2.DriverMsg.CLIENT_DIED:
self.__logger.debug('Received CLIENT_DIED message')
self.__handle_client_died_message(header, message)
elif message.type == drivermsg_pb2.DriverMsg.PING:
self.__logger.debug('Received PING message')
self.__handle_ping_message(header, message)
else:
self.__logger.warning('Received unknown type message, ignoring.')
def __handle_client_died_message(self, header, _):
"""
Handle CLIENT_DIED message which came from mediator.
Handling message delegated to message handler.
:param header: object of DriverHdr
:return: nothing
"""
if len(header.clientIDs) < 1:
self.__logger.warning('CLIENT_DIED\'s clientID not set, ignoring.')
else:
self.__message_handler.handle_client_died_message(header.clientIDs[0])
def __handle_ping_message(self, ping_header, ping_message):
"""
Handle PING message which came from mediator.
:param ping_header: object of DriverHdr
:param ping_message: object of DriverMsg
:return: nothing
"""
if not ping_message.HasField('synNum'):
self.__logger.warning('PING\'s synNum is not set, ignoring.')
else:
pong_message = drivermsg_pb2.DriverMsg()
pong_message.type = drivermsg_pb2.DriverMsg.PONG
pong_message.ackNum = ping_message.synNum
pong_header = drivermsg_pb2.DriverHdr()
pong_header.clientIDs.extend(ping_header.clientIDs)
self.__logger.debug('Send PONG message')
self.write_header_and_message_to_pipe(pong_header, pong_message)
def write_header_and_message_to_pipe(self, header, message):
"""
Serialize and write header and message to pipe.
:param header: object of DriverHdr
:param message: object of DriverMsg
:return: nothing
"""
self.__logger.debug('Write header and message to pipe:\nHEADER:\n%s\n---\nMESSAGE:\n%s\n---',
str(header).strip(), str(message).strip()[:200])
self.__write_lock.acquire()
try:
header_data = header.SerializeToString()
message_data = message.SerializeToString()
header_binary_data = struct.pack('!h', len(header_data)) + header_data
message_binary_data = struct.pack('!h', len(message_data)) + message_data
self.__write_to_pipe(header_binary_data + message_binary_data)
except BaseException as e:
traceback.print_exc(e)
raise AmberException(cause=e)
finally:
self.__write_lock.release()
def __write_to_pipe(self, binary_string):
"""
Write string binary to pipe.
:param binary_string: binary string
:return: nothing
"""
self.__pipe_out.write(binary_string)
self.__pipe_out.flush()
def terminate(self):
self.__is_alive = False
|
showmen15/testEEE
|
src/amberdriver/common/amber_pipes.py
|
Python
|
mit
| 6,730
|
[
"Amber"
] |
c0121bcff2940868e7be54a741e2e0ad72812f59074721bd8465349754cec710
|
from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from past.builtins import basestring
import os
import re
import tempfile
import moldesign as mdt
from . import ambertools
from .. import units as u
from .. import compute
from .. import utils
from .. import forcefields
from ..compute import packages
IMAGE = 'ambertools'
@utils.kwargs_from(mdt.compute.run_job)
def create_ff_parameters(mol, charges='esp', baseff='gaff2', **kwargs):
"""Parameterize ``mol``, typically using GAFF parameters.
This will both assign a forcefield to the molecule (at ``mol.ff``) and produce the parameters
so that they can be used in other systems (e.g., so that this molecule can be simulated
embedded in a larger protein)
Note:
'am1-bcc' and 'gasteiger' partial charges will be automatically computed if necessary.
Other charge types must be precomputed.
Args:
mol (moldesign.Molecule):
charges (str or dict): what partial charges to use? Can be a dict (``{atom:charge}``) OR
a string, in which case charges will be read from
``mol.properties.[charges name]``; typical values will be 'esp', 'mulliken',
'am1-bcc', etc. Use 'zero' to set all charges to 0 (for QM/MM and testing)
baseff (str): Name of the gaff-like forcefield file (default: gaff2)
Returns:
TLeapForcefield: Forcefield object for this residue
"""
# Check that there's only 1 residue, give it a name
assert mol.num_residues == 1
if mol.residues[0].resname is None:
mol.residues[0].resname = 'UNL'
print('Assigned residue name "UNL" to %s' % mol)
resname = mol.residues[0].resname
# check that atoms have unique names
if len(set(atom.name for atom in mol.atoms)) != mol.num_atoms:
raise ValueError('This molecule does not have uniquely named atoms, cannot assign FF')
if charges == 'am1-bcc' and 'am1-bcc' not in mol.properties:
ambertools.calc_am1_bcc_charges(mol)
elif charges == 'gasteiger' and 'gasteiger' not in mol.properties:
ambertools.calc_gasteiger_charges(mol)
elif charges == 'esp' and 'esp' not in mol.properties:
# TODO: use NWChem ESP to calculate
raise NotImplementedError()
if charges == 'zero':
charge_array = [0.0 for atom in mol.atoms]
elif isinstance(charges, basestring):
charge_array = u.array([mol.properties[charges][atom] for atom in mol.atoms])
if not charge_array.dimensionless: # implicitly convert floats to fundamental charge units
charge_array = charge_array.to(u.q_e).magnitude
else:
charge_array = [charges[atom] for atom in mol.atoms]
inputs = {'mol.mol2': mol.write(format='mol2'),
'mol.charges': '\n'.join(map(str, charge_array))}
cmds = ['antechamber -i mol.mol2 -fi mol2 -o mol_charged.mol2 '
' -fo mol2 -c rc -cf mol.charges -rn %s' % resname,
'parmchk -i mol_charged.mol2 -f mol2 -o mol.frcmod',
'tleap -f leap.in',
'sed -e "s/tempresname/%s/g" mol_rename.lib > mol.lib' % resname]
base_forcefield = forcefields.TLeapLib(baseff)
inputs['leap.in'] = '\n'.join(["source leaprc.%s" % baseff,
"tempresname = loadmol2 mol_charged.mol2",
"fmod = loadamberparams mol.frcmod",
"check tempresname",
"saveoff tempresname mol_rename.lib",
"saveamberparm tempresname mol.prmtop mol.inpcrd",
"quit\n"])
def finish_job(j):
leapcmds = ['source leaprc.gaff2']
files = {}
for fname, f in j.glob_output("*.lib").items():
leapcmds.append('loadoff %s' % fname)
files[fname] = f
for fname, f in j.glob_output("*.frcmod").items():
leapcmds.append('loadAmberParams %s' % fname)
files[fname] = f
param = forcefields.TLeapForcefield(leapcmds, files)
param.add_ff(base_forcefield)
param.assign(mol)
return param
job = packages.tleap.make_job(command=' && '.join(cmds),
inputs=inputs,
when_finished=finish_job,
name="GAFF assignment: %s" % mol.name)
return mdt.compute.run_job(job, _return_result=True, **kwargs)
class AmberParameters(object):
""" Forcefield parameters for a system in amber ``prmtop`` format
"""
def __getstate__(self):
state = self.__dict__.copy()
state['job'] = None
return state
def __init__(self, prmtop, inpcrd, job=None):
self.prmtop = prmtop
self.inpcrd = inpcrd
self.job = job
def to_parmed(self):
import parmed
prmtoppath = os.path.join(tempfile.mkdtemp(), 'prmtop')
self.prmtop.put(prmtoppath)
pmd = parmed.load_file(prmtoppath)
return pmd
@utils.kwargs_from(compute.run_job)
def _run_tleap_assignment(mol, leapcmds, files=None, **kwargs):
"""
Drives tleap to create a prmtop and inpcrd file. Specifically uses the AmberTools 16
tleap distribution.
Defaults are as recommended in the ambertools manual.
Args:
mol (moldesign.Molecule): Molecule to set up
leapcmds (List[str]): list of the commands to load the forcefields
files (List[pyccc.FileReference]): (optional) list of additional files
to send
**kwargs: keyword arguments to :meth:`compute.run_job`
References:
Ambertools Manual, http://ambermd.org/doc12/Amber16.pdf. See page 33 for forcefield
recommendations.
"""
leapstr = leapcmds[:]
inputs = {}
if files is not None:
inputs.update(files)
inputs['input.pdb'] = mol.write(format='pdb')
leapstr.append('mol = loadpdb input.pdb\n'
"check mol\n"
"saveamberparm mol output.prmtop output.inpcrd\n"
"savepdb mol output.pdb\n"
"quit\n")
inputs['input.leap'] = '\n'.join(leapstr)
job = packages.tleap.make_job(command='tleap -f input.leap',
inputs=inputs,
name="tleap, %s" % mol.name)
return compute.run_job(job, **kwargs)
def _prep_for_tleap(mol):
""" Returns a modified *copy* that's been modified for input to tleap
Makes the following modifications:
1. Reassigns all residue IDs
2. Assigns tleap-appropriate cysteine resnames
"""
change = False
clean = mdt.Molecule(mol.atoms)
for residue in clean.residues:
residue.pdbindex = residue.index+1
if residue.resname == 'CYS': # deal with cysteine states
if 'SG' not in residue.atoms or 'HG' in residue.atoms:
continue # sulfur's missing, we'll let tleap create it
else:
sulfur = residue.atoms['SG']
if sulfur.formal_charge == -1*u.q_e:
residue.resname = 'CYM'
change = True
continue
# check for a reasonable hybridization state
if sulfur.formal_charge != 0 or sulfur.num_bonds not in (1, 2):
raise ValueError("Unknown sulfur hybridization state for %s"
% sulfur)
# check for a disulfide bond
for otheratom in sulfur.bonded_atoms:
if otheratom.residue is not residue:
if otheratom.name != 'SG' or otheratom.residue.resname not in ('CYS', 'CYX'):
raise ValueError('Unknown bond from cysteine sulfur (%s)' % sulfur)
# if we're here, this is a cystine with a disulfide bond
print('INFO: disulfide bond detected. Renaming %s from CYS to CYX' % residue)
sulfur.residue.resname = 'CYX'
clean._rebuild_from_atoms()
return clean
ATOMSPEC = re.compile(r'\.R<(\S+) ([\-0-9]+)>\.A<(\S+) ([\-0-9]+)>')
def _parse_tleap_errors(job, molin):
# TODO: special messages for known problems (e.g. histidine)
msg = []
unknown_res = set() # so we can print only one error per unkonwn residue
lineiter = iter(job.stdout.split('\n'))
offset = utils.if_not_none(molin.residues[0].pdbindex, 1)
reslookup = {str(i+offset): r for i,r in enumerate(molin.residues)}
def _atom_from_re(s):
resname, residx, atomname, atomidx = s
r = reslookup[residx]
a = r[atomname]
return a
def unusual_bond(l):
atomre1, atomre2 = ATOMSPEC.findall(l)
try:
a1, a2 = _atom_from_re(atomre1), _atom_from_re(atomre2)
except KeyError:
a1 = a2 = None
r1 = reslookup[atomre1[1]]
r2 = reslookup[atomre2[1]]
return forcefields.errors.UnusualBond(l, (a1, a2), (r1, r2))
def _parse_tleap_logline(line):
fields = line.split()
if fields[0:2] == ['Unknown', 'residue:']:
# EX: "Unknown residue: 3TE number: 499 type: Terminal/beginning"
res = molin.residues[int(fields[4])]
unknown_res.add(res)
return forcefields.errors.UnknownResidue(line, res)
elif fields[:4] == 'Warning: Close contact of'.split():
# EX: "Warning: Close contact of 1.028366 angstroms between .R<DC5 1>.A<HO5' 1> and .R<DC5 81>.A<P 9>"
return unusual_bond(line)
elif fields[:6] == 'WARNING: There is a bond of'.split():
# Matches two lines, EX:
# "WARNING: There is a bond of 34.397700 angstroms between:"
# "------- .R<DG 92>.A<O3' 33> and .R<DG 93>.A<P 1>"
nextline = next(lineiter)
return unusual_bond(line+nextline)
elif fields[:5] == 'Created a new atom named:'.split():
# EX: "Created a new atom named: P within residue: .R<DC5 81>"
residue = reslookup[fields[-1][:-1]]
if residue in unknown_res:
return None # suppress atoms from an unknown res ...
atom = residue[fields[5]]
return forcefields.errors.UnknownAtom(line, residue, atom)
elif fields[:2] == ('FATAL:', 'Atom'):
# EX: "FATAL: Atom .R<ARQ 1>.A<C30 6> does not have a type."
assert fields[-5:] == "does not have a type.".split()
atom = _atom_from_re(ATOMSPEC.findall(line)[0])
return forcefields.errors.UnknownAtom(line, atom.residue, atom)
elif (fields[:5] == '** No torsion terms for'.split() or
fields[:5] == 'Could not find angle parameter:'.split() or
fields[:5] == 'Could not find bond parameter for:'.split()):
# EX: " ** No torsion terms for ca-ce-c3-hc"
# EX: "Could not find bond parameter for: -"
# EX: "Could not find angle parameter: - -"
return forcefields.errors.MissingTerms(line.strip())
else: # ignore this line
return None
while True:
try:
line = next(lineiter)
except StopIteration:
break
try:
errmsg = _parse_tleap_logline(line)
except (KeyError, ValueError):
print("WARNING: failed to process TLeap message '%s'" % line)
msg.append(forcefields.errors.ForceFieldMessage(line))
else:
if errmsg is not None:
msg.append(errmsg)
return msg
|
Autodesk/molecular-design-toolkit
|
moldesign/interfaces/tleap_interface.py
|
Python
|
apache-2.0
| 12,300
|
[
"Amber",
"NWChem"
] |
002610ecf7e306763beca6adc57ba1eda0c4d93e113a1d6202bf8f3686177125
|
# Mask Module
import numpy as np
def mask_classic(center, r, shape):
"""Return the mask function in the image space I defined by shape (see MIS). The classic mask takes the center of a
circle center = (xc,yc) and its radius r and put 1 if the image space is inside the circle and 0 outside. In
addition, the center of the mask is used to return g_0 that correspond to a first estimation of the unstrained
reference."""
"""Do not forget event coordinate (x,y) from matplotlib should be switched compared to numpy array indexing"""
g_0 = np.array([(center[1] - 0.5 * shape[0]) / shape[0] * np.ones(shape),
(center[0] - 0.5 * shape[1]) / shape[1] * np.ones(shape)])
mask = np.ndarray(shape=shape)
"""Do not forget event coordinate (x,y) from matplotlib should be switched compared to numpy array indexing"""
for i in range(0, shape[1]):
for j in range(0, shape[0]):
if ((i - center[1]) ** 2 + (j - center[0]) ** 2) < (r ** 2):
mask[i, j] = 1
else:
mask[i, j] = 0
return mask, g_0
def mask_gaussian(center, r, shape):
"""Return the mask function in the image space I defined by shape (see MIS). The Gaussian mask takes the center of a
circle center = (xc,yc) and its radius r to generate a 2D gaussian function centered around the circle. In
addition, the center of the mask is used to return g_0 that correspond to the unstrained reference."""
"""Do not forget event coordinate (x,y) from matplotlib should be switched compared to numpy array indexing"""
g_0 = np.array([(center[1] - 0.5 * shape[0]) / shape[0] * np.ones(shape),
(center[0] - 0.5 * shape[1]) / shape[1] * np.ones(shape)])
"""Do not forget event coordinate (x,y) from matplotlib should be switched compared to numpy array indexing
- r corresponds to 3 * sigma => 99% gaussian mask included in circle"""
const = 1 / (2 * (r / 3) ** 2)
mesh_x, mesh_y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]))
delta_x = (mesh_x - center[0]) ** 2
delta_y = (mesh_y - center[1]) ** 2
mask = np.exp(-(delta_x + delta_y) * const)
return mask, g_0
|
slimpotatoes/STEM_Moire_GPA
|
src/mask.py
|
Python
|
bsd-3-clause
| 2,214
|
[
"Gaussian"
] |
e4383c6d48d37259b50aa188b5fbe17adcd904fe6e65f627b2eaa727fdc109ab
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
import cifar10_input
parser = argparse.ArgumentParser()
# Basic model parameters.
parser.add_argument('--batch_size', type=int, default=128,
help='Number of images to process in a batch.')
parser.add_argument('--data_dir', type=str, default='/tmp/cifar10_data',
help='Path to the CIFAR-10 data directory.')
parser.add_argument('--use_fp16', type=bool, default=False,
help='Train the model using fp16.')
FLAGS = parser.parse_args()
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
zlpmichelle/crackingtensorflow
|
cifar10/cifar10.py
|
Python
|
apache-2.0
| 14,729
|
[
"Gaussian"
] |
88ac565279110f6b5a36ac41d0136b4f567f3fd18acda5473d0b6efaf49c294d
|
#!/usr/bin/python3
from __future__ import print_function
from vizdoom import *
import sys
import threading
import math
from random import choice
from time import sleep
from matplotlib import pyplot as plt
sys.path.append('../../deep_feedback_learning')
import numpy as np
import cv2
import deep_feedback_learning
# Create DoomGame instance. It will run the game and communicate with you.
game = DoomGame()
# Now it's time for configuration!
# load_config could be used to load configuration instead of doing it here with code.
# If load_config is used in-code configuration will also work - most recent changes will add to previous ones.
# game.load_config("../../scenarios/basic.cfg")
# Sets path to additional resources wad file which is basically your scenario wad.
# If not specified default maps will be used and it's pretty much useless... unless you want to play good old Doom.
game.set_doom_scenario_path("./basic.wad")
# Sets map to start (scenario .wad files can contain many maps).
game.set_doom_map("map01")
# Sets resolution. Default is 320X240
game.set_screen_resolution(ScreenResolution.RES_640X480)
# create masks for left and right visual fields - note that these only cover the upper half of the image
# this is to help prevent the tracking getting confused by the floor pattern
width = 640
widthNet = 320
height = 480
heightNet = 240
# Sets the screen buffer format. Not used here but now you can change it. Defalut is CRCGCB.
game.set_screen_format(ScreenFormat.RGB24)
# Enables depth buffer.
game.set_depth_buffer_enabled(True)
# Enables labeling of in game objects labeling.
game.set_labels_buffer_enabled(True)
# Enables buffer with top down map of the current episode/level.
game.set_automap_buffer_enabled(True)
# Sets other rendering options
game.set_render_hud(False)
game.set_render_minimal_hud(False) # If hud is enabled
game.set_render_crosshair(True)
game.set_render_weapon(False)
game.set_render_decals(False)
game.set_render_particles(False)
game.set_render_effects_sprites(False)
game.set_render_messages(False)
game.set_render_corpses(False)
# Adds buttons that will be allowed.
# game.add_available_button(Button.MOVE_LEFT)
# game.add_available_button(Button.MOVE_RIGHT)
game.add_available_button(Button.MOVE_LEFT_RIGHT_DELTA, 50)
game.add_available_button(Button.ATTACK)
game.add_available_button(Button.TURN_LEFT_RIGHT_DELTA)
# Adds game variables that will be included in state.
game.add_available_game_variable(GameVariable.AMMO2)
# Causes episodes to finish after 200 tics (actions)
game.set_episode_timeout(500)
# Makes episodes start after 10 tics (~after raising the weapon)
game.set_episode_start_time(10)
# Makes the window appear (turned on by default)
game.set_window_visible(True)
# Turns on the sound. (turned off by default)
game.set_sound_enabled(True)
# Sets the livin reward (for each move) to -1
game.set_living_reward(-1)
# Sets ViZDoom mode (PLAYER, ASYNC_PLAYER, SPECTATOR, ASYNC_SPECTATOR, PLAYER mode is default)
game.set_mode(Mode.PLAYER)
# Enables engine output to console.
#game.set_console_enabled(True)
nFiltersInput = 3
nFiltersHidden = 3
minT = 3
maxT = 30
nHidden0 = 4
nHidden1 = 2
net = deep_feedback_learning.DeepFeedbackLearning(widthNet*heightNet,[nHidden0*nHidden0,nHidden1*nHidden1], 1, nFiltersInput, nFiltersHidden, minT,maxT)
net.getLayer(0).setConvolution(widthNet,heightNet)
net.getLayer(1).setConvolution(nHidden0,nHidden0)
net.initWeights(0.5,0,deep_feedback_learning.Neuron.MAX_OUTPUT_RANDOM);
net.setLearningRate(0)
net.setAlgorithm(deep_feedback_learning.DeepFeedbackLearning.backprop);
# net.getLayer(0).setInputNorm2ZeroMean(128,256)
net.getLayer(0).setLearningRate(1E-10)
net.getLayer(1).setLearningRate(0.00001)
net.getLayer(2).setLearningRate(0.001)
#net.getLayer(1).setNormaliseWeights(True)
#net.getLayer(2).setNormaliseWeights(True)
net.setUseDerivative(0)
net.setBias(1)
# Initialize the game. Further configuration won't take any effect from now on.
game.init()
# Run this many episodes
episodes = 1000
# Sets time that will pause the engine after each action (in seconds)
# Without this everything would go too fast for you to keep track of what's happening.
sleep_time = 1.0 / DEFAULT_TICRATE # = 0.028
delta2 = 0
dontshoot = 1
deltaZeroCtr = 1
inp = np.zeros(widthNet*heightNet)
sharpen = np.array((
[0, 1, 0],
[1, 4, 1],
[0, 1, 0]), dtype="int")
edge = np.array((
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]), dtype="int")
plt.ion()
plt.show()
ln1 = False
ln2 = [False,False,False,False]
def getWeights2D(neuron):
n_neurons = net.getLayer(0).getNneurons()
n_inputs = net.getLayer(0).getNeuron(neuron).getNinputs()
weights = np.zeros(n_inputs)
for i in range(n_inputs):
if net.getLayer(0).getNeuron(neuron).getMask(i):
weights[i] = net.getLayer(0).getNeuron(neuron).getAvgWeight(i)
else:
weights[i] = np.nan
return weights.reshape(heightNet,widthNet)
def getWeights1D(layer,neuron):
n_neurons = net.getLayer(layer).getNneurons()
n_inputs = net.getLayer(layer).getNeuron(neuron).getNinputs()
weights = np.zeros(n_inputs)
for i in range(n_inputs):
weights[i] = net.getLayer(layer).getNeuron(neuron).getAvgWeight(i)
return weights
def plotWeights():
global ln1
global ln2
while True:
if ln1:
ln1.remove()
plt.figure(1)
w1 = getWeights2D(0)
for i in range(1,net.getLayer(0).getNneurons()):
w2 = getWeights2D(i)
w1 = np.where(np.isnan(w2),w1,w2)
ln1 = plt.imshow(w1,cmap='gray')
plt.draw()
plt.pause(0.1)
for j in range(1,3):
if ln2[j]:
ln2[j].remove()
plt.figure(j+1)
w1 = np.zeros( (net.getLayer(j).getNneurons(),net.getLayer(j).getNeuron(0).getNinputs()) )
for i in range(0,net.getLayer(j).getNneurons()):
w1[i,:] = getWeights1D(j,i)
ln2[j] = plt.imshow(w1,cmap='gray')
plt.draw()
plt.pause(0.1)
t1 = threading.Thread(target=plotWeights)
t1.start()
for i in range(episodes):
print("Episode #" + str(i + 1))
# Starts a new episode. It is not needed right after init() but it doesn't cost much. At least the loop is nicer.
game.new_episode()
while not game.is_episode_finished():
# Gets the state
state = game.get_state()
# Which consists of:
n = state.number
vars = state.game_variables
screen_buf = state.screen_buffer
depth_buf = state.depth_buffer
labels_buf = state.labels_buffer
automap_buf = state.automap_buffer
labels = state.labels
midlinex = int(width/2);
midliney = int(height*0.75);
crcb = screen_buf
screen_left = screen_buf[100:midliney,0:midlinex-1,2]
screen_right = screen_buf[100:midliney,midlinex+1:(width-1),2]
screen_left = cv2.filter2D(screen_left, -1, sharpen);
screen_right = cv2.filter2D(screen_right, -1, sharpen);
# cv2.imwrite('/tmp/left.png',screen_left)
# cv2.imwrite('/tmp/right.png',screen_right)
lavg = np.average(screen_left)
ravg = np.average(screen_right)
delta = (lavg - ravg)*15
dd = delta - delta2
delta2 = delta
# print(delta)
# Makes a random action and get remember reward.
shoot = 0
if (dontshoot > 1) :
dontshoot = dontshoot - 1
else :
if (abs(dd) < 10) :
shoot = 1
dontshoot = 60
deltaZeroCtr = 4
if deltaZeroCtr>0:
deltaZeroCtr = deltaZeroCtr - 1
delta = 0
blue = cv2.resize(crcb, (widthNet,heightNet));
blue = blue[:,:,2]
blue = cv2.filter2D(blue, -1, edge);
err = np.linspace(delta,delta,nHidden0*nHidden0);
net.doStep(blue.flatten()/512-0.5,err[:1])
#weightsplot.set_xdata(np.append(weightsplot.get_xdata(),n))
#weightsplot.set_ydata(np.append(weightsplot.get_ydata(),net.getLayer(0).getWeightDistanceFromInitialWeights()))
output = net.getOutput(0)*5
print(delta,output,
net.getLayer(0).getWeightDistanceFromInitialWeights(),"\t",
net.getLayer(1).getWeightDistanceFromInitialWeights(),"\t",
net.getLayer(2).getWeightDistanceFromInitialWeights())
# action[0] is translating left/right; action[2] is rotating/aiming
# action = [ delta+output , shoot, 0. ]
action = [ 0., shoot, (delta+output)*0.1 ]
r = game.make_action(action)
# if sleep_time > 0:
# sleep(sleep_time)
# Check how the episode went.
print("Episode finished.")
print("Total reward:", game.get_total_reward())
print("************************")
sleep(1)
# It will be done automatically anyway but sometimes you need to do it in the middle of the program...
game.close()
|
nlholdem/icodoom
|
ICO1/deep_feedback_learning_old/vizdoom/backprop1.py
|
Python
|
gpl-3.0
| 8,991
|
[
"NEURON"
] |
96072eb3af3685288814fb3e7bfe3576f83c4e6c3f8614791ab2a8b419f2cf88
|
"""Labels PDB file with site entropies.
To run this script, open ``pymol`` in the directory where the script is found.
Then at the ``pymol`` command line type::
run entropy_label_PDB.py
This script was written by Jesse Bloom, 2014."""
import mapmuts.io
import mapmuts.bayesian
def main():
"""Main body of script."""
# input / output files
aapreferencesfile = 'average_equilibriumpreferences.txt'
pdbfile = 'PDB_structure/1RVX_trimer_renumbered.pdb'
subsets = ['allRBS',
'conservedRBS',
'antigenicsites',
'nearantigenicsites',
]
files = {
'allRBS':'allRBS_residues.txt',
'conservedRBS':'receptor_binding_residues.txt',
'antigenicsites':'Caton_H1_HA_antigenic_sites.txt',
'nearantigenicsites':'nearby_antigenic_sites.txt',
}
imagefile = 'PDB_structure/entropy_colored_structure.png'
dpi = 350
# compute entropies
print "\nComputing entropies from amino-acid preferences in %s..." % aapreferencesfile
aapreferences = mapmuts.io.ReadEntropyAndEquilFreqs(aapreferencesfile)
mapmuts.bayesian.PreferencesRemoveStop(aapreferences)
sites = aapreferences.keys()
sites.sort()
entropies = {}
for r in sites:
entropies[r] = mapmuts.bayesian.SiteEntropy(dict([(aa, aapreferences[r]['PI_%s' % aa]) for aa in mapmuts.sequtils.AminoAcids()]))
# load the pdbfile and replace residue b-factors with entropies
cmd.delete('all')
cmd.load(pdbfile, 'fullPDB')
cmd.show('cartoon', 'fullPDB')
for r in sites:
cmd.alter("fullPDB and resi %d" % r, "b = %g" % entropies[r])
# color the entire HA molecule by entropy
print "Coloring entire HA..."
cmd.hide('everything')
cmd.bg_color('white')
cmd.show('surface', 'fullPDB')
cmd.select(None)
cmd.spectrum('b', 'blue_red', 'fullPDB')
cmd.color('gray', 'chain B+C')
# make image
cmd.set_view (\
'0.856079578, 0.142146140, -0.496918648,' +\
'0.514902651, -0.151199639, 0.843809366,' +\
'0.044809252, -0.978229046, -0.202629298,' +\
'0.000000000, 0.000000000, -426.549072266,' +\
'75.983520508, 0.051200867, 14.679933548,' +\
'340.855773926, 512.242492676, -20.000000000')
cmd.ray(1024 * 2, 768 * 2)
cmd.png(imagefile, dpi=dpi)
print "Wrote image to %s" % imagefile
# color the HA subsets
for subset in subsets:
cmd.hide('everything')
cmd.bg_color('white')
cmd.show('cartoon', 'fullPDB')
cmd.color('gray', 'fullPDB')
cmd.select('HA1_m1', 'chain A and resi 1-365')
cmd.select(None)
selectedsites = [line.split()[0] for line in open(files[subset]).readlines() if (not line.isspace()) and line[0] != '#']
cmd.spectrum('b', 'blue_red', 'chain A and resi %s' % '+'.join([r for r in selectedsites]))
cmd.show('spheres', 'chain A and resi %s' % '+'.join([r for r in selectedsites]))
# make image
cmd.set_view (\
'0.856079578, 0.142146140, -0.496918648,' +\
'0.514902651, -0.151199639, 0.843809366,' +\
'0.044809252, -0.978229046, -0.202629298,' +\
'0.000000000, 0.000000000, -426.549072266,' +\
'75.983520508, 0.051200867, 14.679933548,' +\
'340.855773926, 512.242492676, -20.000000000')
cmd.ray(1024 * 2, 768 * 2)
ifile = 'PDB_structure/%s_entropy_colored_structure.png' % subset
cmd.png(ifile, dpi=dpi)
print "Wrote image to %s" % ifile
print "Script complete"
main() # run the script
|
jbloom/mapmuts
|
examples/WSN_HA_2014Analysis/entropy_label_PDB.py
|
Python
|
gpl-3.0
| 3,685
|
[
"PyMOL"
] |
1cd2d35405b36bc603e4506cc71204a7c84096bcec5c313a67298ab3e7225905
|
import numpy as np
from rdkit import Chem
import os.path as osp
from mastic.selection import CoordArray, IndexedSelection, Selection, \
GenericSelection
from mastic.molecule import Bond, Molecule, Atom, \
MoleculeType, AtomType, BondType
from mastic.interfaces.rdkit import RDKitMoleculeWrapper
import mastic.config.molecule as masticmolconfig
### Making AtomTypes, BondTypes, and MoleculeTypes
print("making AtomTypes")
atom1_attrs = {'pdb_name' : "FAKE1"}
Atom1Type = AtomType("Atom1Type", **atom1_attrs)
atom2_attrs = {'pdb_name' : "FAKE2"}
Atom2Type = AtomType("Atom2Type", **atom2_attrs)
atom3_attrs = {'pdb_name' : "FAKE3"}
Atom3Type = AtomType("Atom3Type", **atom3_attrs)
print("making BondType")
bond1_attrs = {'bond_type' : "TRIPLE"}
Bond1Type = BondType("Bond1Type", atom_types=(Atom1Type, Atom2Type), **bond1_attrs)
print("making MoleculeType")
molecule1_attrs = {'name' : "FAKE"}
Molecule1Type = MoleculeType("Molecule1Type",
atom_types=[Atom1Type, Atom2Type],
bond_types=[Bond1Type], bond_map = {0 : (0,1)},
**molecule1_attrs)
### From an external representation
print("Read in an external representation")
tspo_dir = osp.expanduser("~/Dropbox/lab/tspo")
PKA_pdb_path = osp.join(tspo_dir, "PKA.pdb")
pka_rdkit = Chem.MolFromPDBFile(PKA_pdb_path, removeHs=False)
# cast external representation to a wrapper
pka_rdkit_wrapper = RDKitMoleculeWrapper(pka_rdkit, mol_name="PKA")
print(pka_rdkit_wrapper)
# extract data from it
# atom type data
pka_atom_data = pka_rdkit_wrapper.atoms_data()
print(pka_atom_data)
# features
pka_features = pka_rdkit_wrapper.find_features()
print(pka_features)
# bond types
pka_bond_data = pka_rdkit_wrapper.bonds_data()
print(pka_bond_data)
# bond map
pka_bond_map = pka_rdkit_wrapper.bonds_map()
# molecule data
pka_molecule_data = pka_rdkit_wrapper.molecule_data()
print(pka_molecule_data)
# get the coordinates from a conformer
pka_coords = pka_rdkit_wrapper.get_conformer_coords(0)
# create types from data sources
# AtomTypes
pka_atom_types = []
for atom_data in pka_atom_data:
atom_type_name = "PKAAtom{0}Type".format(atom_data['name'])
atom_type = AtomType(atom_type_name, **atom_data)
pka_atom_types.append(atom_type)
# BondTypes
pka_bond_types = []
for bond_data in pka_bond_data:
bond_type_name = "PKABond{0}Type".format(bond_data['name'])
atom_types = (pka_atom_types[bond_data['rdkit_atom_idxs'][0]],
pka_atom_types[bond_data['rdkit_atom_idxs'][1]])
bond_type = BondType(bond_type_name, atom_types=atom_types, **bond_data)
pka_bond_types.append(bond_type)
# MoleculeType
PKA1Type = MoleculeType("PKAType", atom_types=pka_atom_types,
bond_types=pka_bond_types, bond_map=pka_bond_map,
**pka_molecule_data)
# using the rdkit wrapper converter
PKA2Type = pka_rdkit_wrapper.make_molecule_type()
# find features
PKA3Type = pka_rdkit_wrapper.make_molecule_type(find_features=True)
### making actual Atoms, Bonds, and Molecules
# the CoordArray
array = np.array([[0,0,0], [0,0,1], [1,0,0]])
atom_array = CoordArray(array)
print(atom_array)
# the Atoms
atom1 = Atom(np.array([5,5,5]), atom_type=Atom1Type)
print(atom1)
print(atom1.coords)
atom2 = Atom(np.array([6,6,6]), atom_array=atom_array, atom_type=Atom2Type)
print(atom2)
print(atom2.coords)
print("testing overlap of two atoms")
print(atom2.overlaps(atom1))
atom3 = Atom(atom_array=atom_array, array_idx=0, atom_type=Atom3Type)
print(atom3)
print(atom3.coords)
atoms = [atom1, atom2, Atom(np.array([0,1,0]), atom_type=Atom2Type)]
# selection of atoms is fun
atomsel = IndexedSelection(atoms, [0,1])
# Bonds
bond = Bond(atoms, atom_ids=(0,1), bond_type=Bond1Type)
idx_a = range(len(atoms))[:-1]
idx_b = [a+1 for a in idx_a]
idx = zip(idx_a, idx_b)
bonds = [Bond(atoms, bond_idx, bond_type=Bond1Type) for bond_idx in idx]
# getting the atoms out of bonds
print("accessing bonds from an atom")
print("Is an atom in a bond?")
print(atoms[0].isin_bond)
print("how many bonds is it in")
print("first atom", len(atoms[0].bonds))
print("second atom", len(atoms[1].bonds))
print("get the bonds themselves")
print(atoms[0].bonds)
print("get the other atom in the bond")
bond = atoms[0].bonds[0]
other_atom = next((a for a in bond.atoms if a is not atoms[0]))
# using the class's method
print(atoms[0].adjacent_atoms)
# Molecule
mol = Molecule(atoms, bonds, mol_type=Molecule1Type)
print(mol)
print("atom_types in mol")
print(mol.atom_types)
print("Making a mastic.Molecule from the RDKitMoleculeWrapper data")
pka_mol = PKA3Type.to_molecule(pka_coords)
# pka_mol = Molecule(mol_type=pka_type, coords=pka_coords)
print(pka_mol)
print(pka_mol.molecule_type)
print("testing overlap of two molecules")
print(pka_mol.overlaps(mol))
pka_atom_type = PKA3Type.atom_types[0]
pka_atom = pka_mol.atoms[0]
pka_bond = pka_mol.bonds[0]
pka_feature = pka_mol.features[1]
|
salotz/mast
|
prototypes/molecule_example.py
|
Python
|
mit
| 5,003
|
[
"RDKit"
] |
513bba67ad258a571e804c4a2778a8c1b5b737112018bcdf1ae0b0414805f25d
|
from openbabel import OBMol, OBConversion, pybel
import re
inchi_validator = re.compile('InChI=[0-9]S?\\/')
# This function only validates the first part. It does not guarantee
# that the entire InChI is valid.
def validate_start_of_inchi(inchi):
if not inchi_validator.match(inchi):
raise Exception('Invalid InChI: "' + inchi + '"')
# gen3d should be true for 2D input formats such as inchi or smiles
def convert_str(str_data, in_format, out_format, gen3d=False,
add_hydrogens=False, perceive_bonds=False, out_options=None,
gen3d_forcefield='mmff94', gen3d_steps=100):
# Make sure that the start of InChI is valid before passing it to
# Open Babel, or Open Babel will crash the server.
if in_format.lower() == 'inchi':
validate_start_of_inchi(str_data)
if out_options is None:
out_options = {}
obMol = OBMol()
conv = OBConversion()
conv.SetInFormat(in_format)
conv.SetOutFormat(out_format)
conv.ReadString(obMol, str_data)
if add_hydrogens:
obMol.AddHydrogens()
if gen3d:
# Generate 3D coordinates for the input
mol = pybel.Molecule(obMol)
mol.make3D(gen3d_forcefield, gen3d_steps)
if perceive_bonds:
obMol.ConnectTheDots()
obMol.PerceiveBondOrders()
for option, value in out_options.items():
conv.AddOption(option, conv.OUTOPTIONS, value)
return (conv.WriteString(obMol), conv.GetOutFormat().GetMIMEType())
def to_inchi(str_data, in_format):
mol = OBMol()
conv = OBConversion()
conv.SetInFormat(in_format)
conv.ReadString(mol, str_data)
conv.SetOutFormat('inchi')
inchi = conv.WriteString(mol).rstrip()
conv.SetOptions('K', conv.OUTOPTIONS)
inchikey = conv.WriteString(mol).rstrip()
return (inchi, inchikey)
def to_smiles(str_data, in_format):
# The smiles has returns at the end of it, and may contain
# a return in the middle with a common name. Get rid of
# all of these.
# Use canonical smiles
smiles, mime = convert_str(str_data, in_format, 'can')
smiles = smiles.strip().split()[0]
return (smiles, mime)
def atom_count(str_data, in_format):
mol = OBMol()
conv = OBConversion()
conv.SetInFormat(in_format)
conv.ReadString(mol, str_data)
return mol.NumAtoms()
def get_formula(str_data, in_format):
# Inchi must start with 'InChI='
if in_format == 'inchi' and not str_data.startswith('InChI='):
str_data = 'InChI=' + str_data
validate_start_of_inchi(str_data)
# Get the molecule using the "Hill Order" - i. e., C first, then H,
# and then alphabetical.
mol = OBMol()
conv = OBConversion()
conv.SetInFormat(in_format)
conv.ReadString(mol, str_data)
return mol.GetFormula()
def properties(str_data, in_format, add_hydrogens=False):
# Returns a dict with the atom count, formula, heavy atom count,
# mass, and spaced formula.
if in_format == 'inchi' and not str_data.startswith('InChI='):
# Inchi must start with 'InChI='
str_data = 'InChI=' + str_data
validate_start_of_inchi(str_data)
mol = OBMol()
conv = OBConversion()
conv.SetInFormat(in_format)
conv.ReadString(mol, str_data)
if add_hydrogens:
mol.AddHydrogens()
props = {}
props['atomCount'] = mol.NumAtoms()
props['formula'] = mol.GetFormula()
props['heavyAtomCount'] = mol.NumHvyAtoms()
props['mass'] = mol.GetMolWt()
props['spacedFormula'] = mol.GetSpacedFormula()
return props
def to_svg(str_data, in_format):
out_options = {
'b': 'none', # transparent background color
'B': 'black' # black bonds color
}
return convert_str(str_data, in_format, 'svg', out_options=out_options)
|
OpenChemistry/mongochemserver
|
flask/openbabel/src/openbabel_api.py
|
Python
|
bsd-3-clause
| 3,803
|
[
"Open Babel",
"Pybel"
] |
f72408618243eddec47e5e18e70d3e9da9025b565b11c2e08d011bac5646ac48
|
# -*-python-*-
#
# Copyright (C) 1999-2006 The ViewCVS Group. All Rights Reserved.
#
# By using this file, you agree to the terms and conditions set forth in
# the LICENSE.html file which can be found at the top level of the ViewVC
# distribution or at http://viewvc.org/license-1.html.
#
# For more information, visit http://viewvc.org/
#
# -----------------------------------------------------------------------
import string
# note: this will raise an ImportError if it isn't available. the rcsparse
# package will recognize this and switch over to the default parser.
from mx import TextTools
import common
# for convenience
_tt = TextTools
_idchar_list = map(chr, range(33, 127)) + map(chr, range(160, 256))
_idchar_list.remove('$')
_idchar_list.remove(',')
#_idchar_list.remove('.') leave as part of 'num' symbol
_idchar_list.remove(':')
_idchar_list.remove(';')
_idchar_list.remove('@')
_idchar = string.join(_idchar_list, '')
_idchar_set = _tt.set(_idchar)
_onechar_token_set = _tt.set(':;')
_not_at_set = _tt.invset('@')
_T_TOKEN = 30
_T_STRING_START = 40
_T_STRING_SPAN = 60
_T_STRING_END = 70
_E_COMPLETE = 100 # ended on a complete token
_E_TOKEN = 110 # ended mid-token
_E_STRING_SPAN = 130 # ended within a string
_E_STRING_END = 140 # ended with string-end ('@') (could be mid-@@)
_SUCCESS = +100
_EOF = 'EOF'
_CONTINUE = 'CONTINUE'
_UNUSED = 'UNUSED'
# continuation of a token over a chunk boundary
_c_token_table = (
(_T_TOKEN, _tt.AllInSet, _idchar_set),
)
class _mxTokenStream:
# the algorithm is about the same speed for any CHUNK_SIZE chosen.
# grab a good-sized chunk, but not too large to overwhelm memory.
# note: we use a multiple of a standard block size
CHUNK_SIZE = 192 * 512 # about 100k
# CHUNK_SIZE = 5 # for debugging, make the function grind...
def __init__(self, file):
self.rcsfile = file
self.tokens = [ ]
self.partial = None
self.string_end = None
def _parse_chunk(self, buf, start=0):
"Get the next token from the RCS file."
buflen = len(buf)
assert start < buflen
# construct a tag table which refers to the buffer we need to parse.
table = (
# ignore whitespace. with or without whitespace, move to the next rule.
(None, _tt.AllInSet, _tt.whitespace_set, +1),
(_E_COMPLETE, _tt.EOF + _tt.AppendTagobj, _tt.Here, +1, _SUCCESS),
# accumulate token text and exit, or move to the next rule.
(_UNUSED, _tt.AllInSet + _tt.AppendMatch, _idchar_set, +2),
(_E_TOKEN, _tt.EOF + _tt.AppendTagobj, _tt.Here, -3, _SUCCESS),
# single character tokens exit immediately, or move to the next rule
(_UNUSED, _tt.IsInSet + _tt.AppendMatch, _onechar_token_set, +2),
(_E_COMPLETE, _tt.EOF + _tt.AppendTagobj, _tt.Here, -5, _SUCCESS),
# if this isn't an '@' symbol, then we have a syntax error (go to a
# negative index to indicate that condition). otherwise, suck it up
# and move to the next rule.
(_T_STRING_START, _tt.Is + _tt.AppendTagobj, '@'),
(None, _tt.Is, '@', +4, +1),
(buf, _tt.Is, '@', +1, -1),
(_T_STRING_END, _tt.Skip + _tt.AppendTagobj, 0, 0, +1),
(_E_STRING_END, _tt.EOF + _tt.AppendTagobj, _tt.Here, -10, _SUCCESS),
(_E_STRING_SPAN, _tt.EOF + _tt.AppendTagobj, _tt.Here, +1, _SUCCESS),
# suck up everything that isn't an AT. go to next rule to look for EOF
(buf, _tt.AllInSet, _not_at_set, 0, +1),
# go back to look for double AT if we aren't at the end of the string
(_E_STRING_SPAN, _tt.EOF + _tt.AppendTagobj, _tt.Here, -6, _SUCCESS),
)
success, taglist, idx = _tt.tag(buf, table, start)
if not success:
### need a better way to report this error
raise common.RCSIllegalCharacter()
assert idx == buflen
# pop off the last item
last_which = taglist.pop()
i = 0
tlen = len(taglist)
while i < tlen:
if taglist[i] == _T_STRING_START:
j = i + 1
while j < tlen:
if taglist[j] == _T_STRING_END:
s = _tt.join(taglist, '', i+1, j)
del taglist[i:j]
tlen = len(taglist)
taglist[i] = s
break
j = j + 1
else:
assert last_which == _E_STRING_SPAN
s = _tt.join(taglist, '', i+1)
del taglist[i:]
self.partial = (_T_STRING_SPAN, [ s ])
break
i = i + 1
# figure out whether we have a partial last-token
if last_which == _E_TOKEN:
self.partial = (_T_TOKEN, [ taglist.pop() ])
elif last_which == _E_COMPLETE:
pass
elif last_which == _E_STRING_SPAN:
assert self.partial
else:
assert last_which == _E_STRING_END
self.partial = (_T_STRING_END, [ taglist.pop() ])
taglist.reverse()
taglist.extend(self.tokens)
self.tokens = taglist
def _set_end(self, taglist, text, l, r, subtags):
self.string_end = l
def _handle_partial(self, buf):
which, chunks = self.partial
if which == _T_TOKEN:
success, taglist, idx = _tt.tag(buf, _c_token_table)
if not success:
# The start of this buffer was not a token. So the end of the
# prior buffer was a complete token.
self.tokens.insert(0, string.join(chunks, ''))
else:
assert len(taglist) == 1 and taglist[0][0] == _T_TOKEN \
and taglist[0][1] == 0 and taglist[0][2] == idx
if idx == len(buf):
#
# The whole buffer was one huge token, so we may have a
# partial token again.
#
# Note: this modifies the list of chunks in self.partial
#
chunks.append(buf)
# consumed the whole buffer
return len(buf)
# got the rest of the token.
chunks.append(buf[:idx])
self.tokens.insert(0, string.join(chunks, ''))
# no more partial token
self.partial = None
return idx
if which == _T_STRING_END:
if buf[0] != '@':
self.tokens.insert(0, string.join(chunks, ''))
return 0
chunks.append('@')
start = 1
else:
start = 0
self.string_end = None
string_table = (
(None, _tt.Is, '@', +3, +1),
(_UNUSED, _tt.Is + _tt.AppendMatch, '@', +1, -1),
(self._set_end, _tt.Skip + _tt.CallTag, 0, 0, _SUCCESS),
(None, _tt.EOF, _tt.Here, +1, _SUCCESS),
# suck up everything that isn't an AT. move to next rule to look
# for EOF
(_UNUSED, _tt.AllInSet + _tt.AppendMatch, _not_at_set, 0, +1),
# go back to look for double AT if we aren't at the end of the string
(None, _tt.EOF, _tt.Here, -5, _SUCCESS),
)
success, unused, idx = _tt.tag(buf, string_table,
start, len(buf), chunks)
# must have matched at least one item
assert success
if self.string_end is None:
assert idx == len(buf)
self.partial = (_T_STRING_SPAN, chunks)
elif self.string_end < len(buf):
self.partial = None
self.tokens.insert(0, string.join(chunks, ''))
else:
self.partial = (_T_STRING_END, chunks)
return idx
def _parse_more(self):
buf = self.rcsfile.read(self.CHUNK_SIZE)
if not buf:
return _EOF
if self.partial:
idx = self._handle_partial(buf)
if idx is None:
return _CONTINUE
if idx < len(buf):
self._parse_chunk(buf, idx)
else:
self._parse_chunk(buf)
return _CONTINUE
def get(self):
try:
return self.tokens.pop()
except IndexError:
pass
while not self.tokens:
action = self._parse_more()
if action == _EOF:
return None
return self.tokens.pop()
# _get = get
# def get(self):
token = self._get()
print 'T:', `token`
return token
def match(self, match):
if self.tokens:
token = self.tokens.pop()
if token != match:
raise RuntimeError, ('Unexpected parsing error in RCS file.\n'
'Expected token: %s, but saw: %s'
% (match, token))
else:
token = self.get()
if token != match:
raise RuntimeError, ('Unexpected parsing error in RCS file.\n'
'Expected token: %s, but saw: %s'
% (match, token))
def unget(self, token):
self.tokens.append(token)
def mget(self, count):
"Return multiple tokens. 'next' is at the end."
while len(self.tokens) < count:
action = self._parse_more()
if action == _EOF:
### fix this
raise RuntimeError, 'EOF hit while expecting tokens'
result = self.tokens[-count:]
del self.tokens[-count:]
return result
class Parser(common._Parser):
stream_class = _mxTokenStream
|
foresthz/fusion5.1
|
www/scm/viewvc/lib/vclib/ccvs/rcsparse/texttools.py
|
Python
|
gpl-2.0
| 8,816
|
[
"VisIt"
] |
370033af1c150e6582b28afaa5cacbc7fcbf1b1f38d9bebd66856f71ae59989d
|
###############################################################################
# Implementation of kernel multi-view spectral algorithm of Song et al. (2014)
#
# Author: E.D. Gutierrez (edg@icsi.berkeley.edu)
# Created: 24 March 2016
# Last modified: 29 March 2016
#
# Sample usage: see knbTest.py. The main functions are kernHMM and kernXMM
#
###############################################################################
import numpy as np
import scipy
import scipy.sparse.linalg
import scipy.stats as stats
from scipy.spatial.distance import pdist,cdist,squareform
import sys; sys.path.append('c:/users/e4gutier/documents/')
import tentopy
import itertools
inner, outer = 100,100 #number of inner and outer iterations for tensor power method
pdist2 = lambda X,Y: squareform(pdist(X,Y)) #compute square form of pairwise distances
# <codecell>
def kernHMM(X,k,kernel='gaussian',symmetric=False,var=1, xRange=None):
"""
Main function for learning Hidden Markov Model with D-dimensional obs.
Inputs:
X: D x (m+2) matrix with m+2 samples of dimension D
k: rank of model (number of hidden states)
symmetric: whether the model has symmetric views or not (should be False
for HMM)
xRange: range of X's for which to compute p(x|h). each row is a point.
var: variance of the kernel (for most kernels). for beta kernel, var is a
list with var[0] = numObs and var[1] = desired dimensionality of
output feature map.
Otputs:
pXbarH: probabilitiy densities of hidden states (i.e., p(x|h)) at points
specified by xrange. This is len(xrange) x k array.
"""
xRange = np.matrix(xRange)
if xRange.shape[0]==1:
xRange = xRange.T
(K,L,G) = computeKerns(X,kernel,symmetric,var)
(A,pi) = kernSpecAsymm(K,L,G,k)
if xrange is None:
pXbarH = G*A
else:
pXbarH = crossComputeKerns(xRange,np.matrix(X)[2:,:])*A
return pXbarH
# <codecell>
def kernXMM(X,k,queryX=None, kernel='gaussian', var=1, symmetric=False):
"""
Main function for learning three-view mixture model
Inputs:
X: D x 3 matrix with m observatioons from each of 3 views
k: rank of model (number of hidden states)
Optional inputs:
queryX: D_test x 1 matrix of test points for which to comptue p(x|h). If
queryX is None, queryX is set to X[:,2]
kernel: kernel to use for smoothing probability distributions p(x|h)
var: variance of kernel (smoothing).
symmetric: whether to use the symmetric version of algorithm (set to False)
Otputs:
O_h: probabilitiy densities p(x|h); a discretized finite-dimensional
version of observation operator (observation matrix)
T_h: probability densities p(h_2|h_1); a discretized finite-dimensional
version of transition operator (transition matrix)
"""
(K,L,G) = computeKerns3(X,kernel,symmetric,var)
if symmetric:
(A,pi) = kernSpecSymm(np.hstack(K,L), np.hstack(L,K),k)
else:
(A,pi) = kernSpecAsymm(K,L,G,k,view=2)
if compute_T:
(A3,_) = kernSpecAsymm(K,L,G,k,view=3)
pinvA = np.linalg.pinv(A)
if queryX is None:
O_h = G*A
T_h = pinvA*L*G*pinvA.T
else:
# pXbarH = crossComputeKerns(queryX,np.matrix(X[:,2]).T,kernel,symmetric,var)*A
KL = crossComputeKerns(queryX,X[:,2].T,kernel,symmetric,var)
O_h = KL*A
T_h = pinvA*L*KL*pinvA.T
return O_h, T_h
# <codecell>
def returnKernel(kernel, var=1):
"""
Utility function to return the correct kernel function given a string
identifying the name of the kernel
"""
kernel = kernel.lower()
if kernel in ['gaussian','normal','l2']:
kern = lambda XX: np.exp(-pdist2(XX,'sqeuclidean')/var)
elif kernel in ['laplace','laplacian','l1']:
kern = lambda XX: np.exp(-pdist2(XX,'minkowski',1)/var)
elif kernel in ['dirac', 'delta','kronecker']:
equals = lambda u,v: 1 - (np.array(u)==np.array(v)).all()
kern = lambda XX: pdist2(XX,equals)
elif kernel in ['mahalanobis']:
VInv = np.inv(var)
kern = lambda XX: np.exp(-np.power(pdist2(XX,'mahalanobis',VInv),2))
elif kernel in ['beta']:
dot = lambda XX: XX.dot(XX.T)
kern = lambda XX: dot(np.matrix(phi_beta(XX,var[0],var[1])))
elif kernel in ['beta_shifted','beta shifted']:
dot = lambda XX: XX.dot(XX.T)
kern = lambda XX: dot(np.matrix(phi_beta_shifted(XX,var[0],var[1])))
return kern
# <codecell>
def crossComputeKerns(X,Y,kernel,symmetric,var=1):
"""
Compute pairwise kernel between points in two matrices
Inputs:
X: m x D matrix with m samples of dimension D
kernel: (string) name of kernel being computed
Y: n x D matrix with n samples of dimension D
kernel: (string) name of kernel being computed
k: (int) rank of model (number of hidden states)
symmetric: whether the model has symmetric views or not (should be false
for HMM)
var: for gaussian kernel, the variance (sigma^2)
for laplacian kernel, the bandwidth
for mahalanobis kernel, the covariance matrix
for delta kernel, None
Outputs: tuple (K,L,G), where:
K: cross-kernel matrix c of dimension m x n
"""
kernel = kernel.lower()
if kernel in ['gaussian','normal','l2']:
kern = lambda XX,YY: np.exp(-cdist(XX,YY,'sqeuclidean')/var)
elif kernel in ['laplace','laplacian','l1']:
kern = lambda XX,YY: np.exp(-cdist(XX,YY,'minkowski',1)/var)
elif kernel in ['dirac', 'delta','kronecker']:
equals = lambda u,v: 1 - (np.array(u)==np.array(v)).all()
kern = lambda XX,YY: cdist(XX,YY,equals)
elif kernel in ['mahalanobis']:
VInv = np.inv(var)
kern = lambda XX,YY: np.exp(-np.power(cdist(XX,YY,'mahalanobis',VInv),2))
elif kernel in ['beta','betadot']:
kern = lambda XX,YY: np.matrix(phi_beta(XX,var[0],var[1]).dot(
phi_beta(YY,var[0],var[1]).T))
elif kernel in ['beta_shifted','beta shifted']:
kern = lambda XX,YY: np.matrix(phi_beta_shifted(XX,var[0],var[1]).dot(
phi_beta_shifted(YY,var[0],var[1]).T))
K = kern(X,Y)
return K
# <codecell>
def computeKerns(X,kernel,symmetric, var=1):
"""
Compute pairwise kernels between D-dimensional points arranged into a matrix
Inputs:
X: m x D matrix with m samples of dimension D
kernel: (string) name of kernel being computed
k: (int) rank of model (number of hidden states)
symmetric: whether the model has symmetric views or not (should be false
for HMM)
var: for gaussian kernel, the variance (sigma^2)
for laplacian kernel, the bandwidth
for mahalanobis kernel, the covariance matrix
for delta kernel, None
Outputs: tuple (K,L,G), where:
K: kernel matrix of first view
L: kernel matrix of second view
G: kernel matrix of third view
"""
kern = returnKernel(kernel,var)
K = kern(X)
if symmetric:
return (K[:-1,:-1], K[1:,1:], None)
else:
return (K[:-2,:-2],K[1:-1,1:-1],K[2:,2:])
# <codecell>
def computeKerns3(X,kernel,symmetric, var=1):
"""
Compute the kernel for samples from 3 views
Inputs:
X: D x m matrix with m samples and 3 views
kernel: (string) name of kernel being computed
k: (int) rank of model (number of hidden states)
symmetric: whether the model has symmetric views or not (should be false
for HMM)
var: for gaussian kernel, the variance (sigma^2)
for laplacian kernel, the bandwidth
for mahalanobis kernel, the covariance matrix
for delta kernel, None
Outputs: tuple (K,L,G), where:
K: kernel matrix of first view
L: kernel matrix of second view
G: kernel matrix of third view
"""
kern = returnKernel(kernel,var)
X0,X1,X2 = np.matrix(X[:,0]).T,np.matrix(X[:,1]).T, np.matrix(X[:,2]).T
return (kern(X0),kern(X1),kern(X2))
# <codecell>
def kernSpecAsymm(K,L,G,k,view=2,lambda0=1e-2):
"""
Algorithm 1 from Song et al. (2014). adapted to asymmetric view
Inputs:
*K: kernel matrix from view 1 of shape (m x m)
*L: kernel matrix from view 2 of shape (m x m)
*G: kernel matrix from view 3 of shape (m x m)
*k: desired rank (number of hidden states)
*view: (int) which view to learn (either 1,2, or 3)
*lambda0: (float) regularization parameter
Outputs:
*A: matrix of shape m x k
*pi: vector of shape k
"""
K,L,G = np.matrix(K), np.matrix(L), np.matrix(G)
m = K.shape[1] # number of samples per view
if view==2:
S, beta = sortedEig(K*G*K/(m**2),K,k,lambda0)
t1 = np.matrix(beta.real)*np.matrix(np.diag(np.power(S,-0.5)).real)
S, beta = sortedEig(G*K*G/(m**2),G,k,lambda0)#Lnk = L*np.matrix(sortedEig(L*K*L,L,k)[1].real)
t2 = np.matrix(beta.real)*np.matrix(np.diag(np.power(S,-0.5)).real)
Knk = K*t1; Gnk = G*t2
H = Gnk*np.linalg.inv(Knk.T*Gnk)*Knk.T #NEEDS TO BE VERIFIED
(S,beta) = sortedEig(L*H.T*L*H*L/(m**2),L,k,lambda0) #find generalized eigenvectors
S,beta=S.real,np.matrix(beta.real)
Sroot = np.matrix(np.diag(np.power(S,-0.5)))
term1 = L*beta*Sroot
T = trilinear('I', H.T*term1,term1, H*term1)/m
elif view==3:
S, beta = sortedEig(K*L*K/(m**2),K,k,lambda0)
t1 = np.matrix(beta.real)*np.matrix(np.diag(np.power(S,-0.5)).real)
S, beta = sortedEig(L*K*L/(m**2),L,k,lambda0)#Lnk = L*np.matrix(sortedEig(L*K*L,L,k)[1].real)
t2 = np.matrix(beta.real)*np.matrix(np.diag(np.power(S,-0.5)).real)
Knk = K*t1; Lnk = L*t2
H = Knk*np.linalg.inv(Lnk.T*Knk)*Lnk.T #Symmetrization matrix
(S,beta) = sortedEig(G*H.T*G*H*G/(m**2),G,k,lambda0) #find generalized eigenvectors
S,beta=S.real,np.matrix(beta.real)
Sroot = np.matrix(np.diag(np.power(S,-0.5)))
term1 = G*beta*Sroot
T = trilinear('I', H*term1,H.T*term1,term1)/m
(M,lambda0) = tentopy.eig(T,inner,outer) ;
M = np.matrix(M[:,:k])
lambda0 = np.array(lambda0[:k]).flatten()
A = beta*Sroot*M*np.diag(lambda0)
pi = np.power(lambda0,-2).T
return (A,pi)
# <codecell>
def sortedEig(X,M=None,k=None, lambda0=0):
"""
Return the k largest eigenvalues and the corresponding eigenvectors of the
solution to X*u = b*M*u
Inputs:
X: matrix
M: matrix
k: (int) if k is None, return all but one.
lambda0: (float) regularization parameter to ensure positive definiteness
so that cholesky decomposition works
Outputs:
b: vetor of eigenvalues
U: matrix of eigenvectors; each column is an eigenvector
"""
if k is None:
k = X.shape[0]
if M is None:
(b,U) = scipy.linalg.eig(X)
else:
(b,U) = scipy.linalg.eig(X,M+lambda0*np.eye(M.shape[0]))
idx = b.argsort()[-k:][::-1]
return b[idx], U[:,idx]
else:
if M is None:
(b,U) = scipy.sparse.linalg.eigsh(X,k)
else:
(b,U) = scipy.sparse.linalg.eigsh(X,k,M+lambda0*np.eye(M.shape[0]))
return b,U
# <codecell>
def kernSpecSymm(K,L,k):
"""
Kernel Spectral Algorithm (Algorithm 1 from Song et al. (2014)) for symmetric
views. For now, just use the asymmetric views algorithm, since this is just
a special case of that algorithm.
Inputs:
*K, L: kernel matrices as defined in Sec 5.2 of Song et al. (2014)
*k: desired rank of model
Outputs:
*pi: vector of prior probabilities
*A: matrix
"""
m = K.shape[1]/3
(S, beta) = scipy.linalg.eig(K*L*K,K)
S = np.diag(S[:k])
beta = beta[:,:k]
qq = np.size(np.power(S,-0.5)*beta.T*K[:,1])
term1 = np.power(S,-0.5)*beta.T
T = tentopy.tensor_outer(np.zeros(qq),3)
for kk in range(k):
chi_1 = term1*K[:,kk]
chi_2 = term1*K[:,m+kk]
chi_3 = term1*K[:,2*m+kk]
T += symmetricTensor(chi_1,chi_2,chi_3)
(M, lambda0) = tentopy.eig(T,inner,outer)
A = beta*np.power(S,-0.5)*M*np.diag(lambda0)
pi = np.power(lambda0,-2).T
return (A,pi)
# <codecell>
def trilinear(T,W1,W2,W3):
"""
Compute trilinear form T(W1,W2,W3). If T=='I', it is taken to be the third-
order identity tensor I such that I[i,j,k] = \delta_{(i==j==k)}.
Inputs:
T: 3rd-order tensor of shape (N_T x N_T x N_T)
W1,W2,W3: each Wi is a vector or matrix of shape (N_T x N_i)
Returns:
X3: Trilinear form of shape (N_1 x N_2 x N_3)
"""
def matrix(W):
if len(W.shape)==1:
return np.matrix(W).T
else:
return W
W1,W2,W3 = matrix(W1),matrix(W2),matrix(W3)
N1 = xrange(W1.shape[1])
N2 = xrange(W2.shape[1])
N3 = xrange(W3.shape[1])
NT = xrange(W1.shape[0])
X3 = np.zeros((W1.shape[1], W2.shape[1], W3.shape[1]))
# TODO: figure out the equivalent numpy routines
if type(T) is str:
if T == 'I':
for (i1,i2,i3,j) in itertools.product(N1,N2,N3,NT):
X3[i1,i2,i3] += W1[j,i1] * W2[j,i2] * W3[j,i3]
else:
for (i1,i2,i3,j1,j2,j3) in itertools.product(N1,N2,N3,NT,NT,NT):
X3[i1,i2,i3] += T[j1,j2,j3] * W1[j1,i1] * W2[j2,i2] * W3[j3,i3]
return X3
def fast_trilinear(T, W1, W2, W3):
NT = W1.shape[0];
# Generate T is it is implicitly given
if type(T) is str:
if T == 'I':
M = np.zeros((NT, NT, NT));
for i in xrange(NT):
M[i,i,i] = 1
else:
M = T
M1 = dim_stable_tpm(M, W1, 0)
M12 = dim_stable_tpm(M1, W2, 1)
M123 = dim_stable_tpm(M12, W3, 2)
return M123
# <codecell>
def dim_stable_tpm(M, W, axis):
dim = len(np.shape(M))
T = np.tensordot(M, W, axes = ([axis], [0]))
rear_dims = tuple(range(0, axis) + [dim-1] + range(axis,dim-1))
T = np.transpose(T, rear_dims)
return T
def medianTrick(X,kernel='gaussian'):
"""
Implementation of the median trick for bandwidth selection from Gretton et
al (2006) NIPS paper.
Set var = 0.5*(median(|X_i-X_j|)[:])**2,i != j (TODO: check this condition)
"""
if kernel=='gaussian':
return 0.5*np.median(pdist(X,'sqeuclidean'))
# <codecell>
def symmetricTensor(a,b,c):
"""
symmetric tensor product from section 5.2 of Song et al. (2014)
"""
term1 = np.tensordot(np.tensordot(a,b,0),c,0)
term2 = np.tensordot(np.tensordot(c,a,0),b,0)
term3 = np.tensordot(np.tensordot(b,c,0),a,0)
return term1+term2+term3
# <codecell>
def phi_beta_shifted(x, N, n):
'''
Input:
phi: feature map
[0..N]: possible values x can take
n: dimensionality of feature map
Output:
beta-distribution encoding of phi(x)
'''
#print x
#print N
if len(np.shape(x))==0:
i = int(x / (N+1));
k = int(x) % (N+1);
if k > i:
return np.zeros(n)
p = np.asarray(map(lambda t: beta_interval(t, k, i-k, n), unif_partition(n).tolist()));
return p / sum(p);
else:
p = np.zeros((np.size(x),n))
for j,item in enumerate(x):
i = int(item / (N+1))
k = int(item) % (N+1)
if k > i:
pass
else:
p[j,:] = np.asarray(map(lambda t: beta_interval(t, k, i-k, n), unif_partition(n).tolist()));
p[j,:] = p[j,:]/sum(p[j,:]) #TODO: vectorize this
return p
#p = np.asarray(map(lambda t: (t ** k) * ( (1-t) ** (i - k) ), unif_partition(n).tolist()));
def phi_beta(x, N, n):
'''
Input:
phi: feature map
[0..N]: possible values x can take
n: dimensionality of feature map
Output:
beta-distribution encoding of phi(x)
'''
#print x
#print N
#p = np.asarray(map(lambda t: (t ** x) * ( (1-t) ** (N-x) ), unif_partition(n).tolist()));
p = np.asarray(map(lambda t: beta_interval(t, x, N-x, n), unif_partition(n).tolist()));
return p / sum(p);
def beta_interval(t, k, l, n):
return stats.beta.cdf(t+0.5/n, k+1, l+1) - stats.beta.cdf(t-0.5/n, k+1, l+1)
def unif_partition(n):
return np.linspace(1.0/(2*n), 1.0 - 1.0/(2*n), n)
if __name__ == '__main__':
'''
a = np.arange(6).reshape(3,2)
b = np.arange(18).reshape(3,6)
c = np.arange(24).reshape(3,8)
m = fast_trilinear('I', a, b, c)
n = trilinear('I', a, b, c)
print 'm = '
print m.shape
print m
print 'n = '
print n.shape
print n
print n == m
'''
|
anapophenic/knb
|
kernelNaiveBayes.py
|
Python
|
cc0-1.0
| 16,306
|
[
"DIRAC",
"Gaussian"
] |
a01bf52fc35f05e86566a98fd1842e9ca66a58b18197b430c4b65b6be2879710
|
import sys
import os
import deepchem
import tempfile, shutil
import numpy as np
import numpy.random
from bace_datasets import load_bace
from deepchem.utils.save import load_from_disk
from deepchem.splits import SpecifiedSplitter
from deepchem.data import Dataset
from deepchem.hyper import HyperparamOpt
from deepchem import metrics
from deepchem.metrics import Metric
from deepchem.utils.evaluate import Evaluator
from deepchem.models.keras_models.fcnet import MultiTaskDNN
from deepchem.models.keras_models import KerasModel
def bace_dnn_model(mode="classification", verbosity="high", split="20-80"):
"""Train fully-connected DNNs on BACE dataset."""
(bace_tasks, train_dataset, valid_dataset, test_dataset, crystal_dataset,
transformers) = load_bace(mode=mode, transform=True, split=split)
if mode == "regression":
r2_metric = Metric(metrics.r2_score, verbosity=verbosity)
rms_metric = Metric(metrics.rms_score, verbosity=verbosity)
mae_metric = Metric(metrics.mae_score, verbosity=verbosity)
all_metrics = [r2_metric, rms_metric, mae_metric]
metric = r2_metric
elif mode == "classification":
roc_auc_metric = Metric(metrics.roc_auc_score, verbosity=verbosity)
accuracy_metric = Metric(metrics.accuracy_score, verbosity=verbosity)
mcc_metric = Metric(metrics.matthews_corrcoef, verbosity=verbosity)
# Note sensitivity = recall
recall_metric = Metric(metrics.recall_score, verbosity=verbosity)
all_metrics = [accuracy_metric, mcc_metric, recall_metric, roc_auc_metric]
metric = roc_auc_metric
else:
raise ValueError("Invalid mode %s" % mode)
params_dict = {"learning_rate": np.power(10., np.random.uniform(-5, -3, size=5)),
"decay": np.power(10, np.random.uniform(-6, -4, size=5)),
"nb_epoch": [40] }
n_features = train_dataset.get_data_shape()[0]
def model_builder(model_params, model_dir):
keras_model = MultiTaskDNN(
len(bace_tasks), n_features, "classification", dropout=.5,
**model_params)
return KerasModel(keras_model, model_dir)
optimizer = HyperparamOpt(model_builder, verbosity="low")
best_dnn, best_hyperparams, all_results = optimizer.hyperparam_search(
params_dict, train_dataset, valid_dataset, transformers,
metric=metric)
if len(train_dataset) > 0:
dnn_train_evaluator = Evaluator(best_dnn, train_dataset, transformers)
csv_out = "dnn_%s_%s_train.csv" % (mode, split)
stats_out = "dnn_%s_%s_train_stats.txt" % (mode, split)
dnn_train_score = dnn_train_evaluator.compute_model_performance(
all_metrics, csv_out=csv_out, stats_out=stats_out)
print("DNN Train set %s: %s" % (metric.name, str(dnn_train_score)))
if len(valid_dataset) > 0:
dnn_valid_evaluator = Evaluator(best_dnn, valid_dataset, transformers)
csv_out = "dnn_%s_%s_valid.csv" % (mode, split)
stats_out = "dnn_%s_%s_valid_stats.txt" % (mode, split)
dnn_valid_score = dnn_valid_evaluator.compute_model_performance(
all_metrics, csv_out=csv_out, stats_out=stats_out)
print("DNN Valid set %s: %s" % (metric.name, str(dnn_valid_score)))
if len(test_dataset) > 0:
dnn_test_evaluator = Evaluator(best_dnn, test_dataset, transformers)
csv_out = "dnn_%s_%s_test.csv" % (mode, split)
stats_out = "dnn_%s_%s_test_stats.txt" % (mode, split)
dnn_test_score = dnn_test_evaluator.compute_model_performance(
all_metrics, csv_out=csv_out, stats_out=stats_out)
print("DNN Test set %s: %s" % (metric.name, str(dnn_test_score)))
if len(crystal_dataset) > 0:
dnn_crystal_evaluator = Evaluator(best_dnn, crystal_dataset, transformers)
csv_out = "dnn_%s_%s_crystal.csv" % (mode, split)
stats_out = "dnn_%s_%s_crystal_stats.txt" % (mode, split)
dnn_crystal_score = dnn_crystal_evaluator.compute_model_performance(
all_metrics, csv_out=csv_out, stats_out=stats_out)
print("DNN Crystal set %s: %s" % (metric.name, str(dnn_crystal_score)))
if __name__ == "__main__":
print("Classifier DNN 20-80:")
print("--------------------------------")
bace_dnn_model(mode="classification", verbosity="high", split="20-80")
print("Classifier DNN 80-20:")
print("--------------------------------")
bace_dnn_model(mode="classification", verbosity="high", split="80-20")
print("Regressor DNN 20-80:")
print("--------------------------------")
bace_dnn_model(mode="regression", verbosity="high", split="20-80")
print("Regressor DNN 80-20:")
print("--------------------------------")
bace_dnn_model(mode="regression", verbosity="high", split="80-20")
|
bowenliu16/deepchem
|
examples/bace/bace_dnn.py
|
Python
|
gpl-3.0
| 4,706
|
[
"CRYSTAL"
] |
69ffbddf7aef1ba1f702f153c1eb708c5df89d42e227d68cc2c88a13c7faa287
|
from __future__ import unicode_literals
from test import TestCase
from web import app
from db import session, User
from nose.tools import eq_
class TestSignup(TestCase):
def test_sign_up(self):
app.test_client().post('/', data={'email': 'andrew@lorente.name'})
users = session().query(User.email).all()
eq_(users, [('andrew@lorente.name',)])
self.visit('/')
self.browser.fill('email', 'joe@lewis.name')
self.browser.find_by_name('go').click()
assert self.browser.is_text_present('Thanks'), 'rude!'
users = session().query(User.email).all()
eq_(users, [('andrew@lorente.name',), ('joe@lewis.name',)])
def test_valid_emails_get_validated(self):
self.visit('/')
self.browser.fill('email', 'eric@holscher.name')
assert self.browser.is_text_present('valid'), "didn't get validated"
def test_invalid_emails_get_yelled_about(self):
self.visit('/')
self.browser.fill('email', 'aghlaghlaghl')
assert self.browser.is_text_present('invalid'), "didn't get yelled at"
|
ErinCall/splinter_demo
|
flask/test/test_signup.py
|
Python
|
mit
| 1,095
|
[
"VisIt"
] |
102cdbc3a85a41bba14ace69b329a323147333a9e03f094833be847bef7edbe3
|
#!/usr/bin/env python
from itertools import groupby
import os
import re
import sys
from bokeh.io import output_file, show, save
from bokeh.layouts import gridplot, column
from bokeh.models import Range1d, ColumnDataSource, HoverTool, CrosshairTool
from bokeh.palettes import Category20, brewer, viridis
from bokeh.plotting import figure
import click
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
import pandas as pd
import rasterio
from rasterio.plot import show
import pdb
import projections.utils as utils
import osr
def earth_radius(authalic=False):
if authalic:
return 6371007.2
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
return srs.GetSemiMajor()
def rcs2(ds, authalic=False):
ul = ds.affine * (0.5, 0.5)
lr = ds.affine * (ds.width - 0.5, ds.height - 0.5)
lats = np.linspace(ul[1], lr[1], ds.height)
vec = ((np.sin(np.radians(lats + ds.res[1] / 2.0)) -
np.sin(np.radians(lats - ds.res[1] / 2.0))) *
(ds.res[0] * np.pi/180) * earth_radius(authalic) ** 2 / 1e6)
return vec.reshape((vec.shape[0], 1))
def rcs(height, res, left, bottom, right, top):
lats = np.linspace(top, bottom + res[1] / 2.0, height)
vec = ((np.sin(np.radians(lats + res[1] / 2.0)) -
np.sin(np.radians(lats - res[1] / 2.0))) *
(res[0] * np.pi/180) * earth_radius(False) ** 2 / 1e6)
return vec.reshape((vec.shape[0], 1))
def _one(fname, scale=True, band=1):
ds = rasterio.open(fname)
area = rcs(ds.height, ds.res, *ds.bounds)
data = ds.read(band, masked=True).filled(0)
data[np.where(np.isnan(data))] = 0
adj = data * area if scale else data
return adj.sum()
def one(name, fname, scale=True, band=1):
ds = rasterio.open(fname)
area = rcs(ds.height, ds.res, *ds.bounds)
data = ds.read(band, masked=True).filled(0)
data[np.where(np.isnan(data))] = 0
adj = data * area if scale else data
print("%-30s: %e" % (name, adj.sum()))
#print("%10s: %e" % ('max', data.max()))
if not scale:
print("%30s: %e" % ('max', (data / area).max()))
def get_files(dname, what):
files = groupby(map(lambda f: f.rsplit('-', 2),
map(lambda p: os.path.splitext(p)[0],
filter(lambda f: re.search(r'-%s-\d+.tif$' % what, f),
sorted(os.listdir(dname))))),
key=lambda s: s[0])
out = {}
for scenario, content in files:
out[scenario] = ['%s-%s-%s.tif' % (s, w, y)
for s, w, y in tuple(content)]
return out
def pline(p, df, column, legend=None, color='black', line_width=3):
src = ColumnDataSource(data={
'year': df.index,
'data': df[column],
'name': [legend for n in range(len(df))]
})
if legend is None:
legend = column
p.line('year', 'data', source=src, line_width=line_width,
legend=legend, color=color)
def bokeh_plot(dfs, title=''):
p = figure(title='Worldwide Human Population (%s)' % title)
mypalette=Category20[min(max(3, len(dfs)), 20)]
for idx, scenario in enumerate(dfs):
df = dfs[scenario]
name = scenario.split('_', 1)[0]
pline(p, df, scenario, name, mypalette[idx], 3)
p.add_tools(HoverTool(tooltips=[('Year', '@year'),
('Population', '@data'),
('Scenario', '@name')]))
p.legend.location = "top_left"
return p
def do_plot(dfs, title, bokeh, out):
if bokeh:
p = bokeh_plot(dfs, title)
if out:
output_file(out)
save(p)
else:
ax = None
for key in dfs:
ax = dfs[key].plot(ax=ax)
ax.set_title('Worldwide Human Population (%s)' % title)
plt.show()
@click.group(invoke_without_command=True)
@click.pass_context
def cli(ctx):
if ctx.invoked_subcommand is None:
click.echo('I was invoked without subcommand')
projections()
@cli.command()
@click.pass_context
@click.argument('series', type=click.Choice(('hyde', 'sps', 'projected', 'all')))
@click.option('--outdir', type=click.Path(dir_okay=True, file_okay=False),
default='/out/luh2')
@click.option('-o', '--out', type=click.Path(dir_okay=False, file_okay=True))
@click.option('-b', '--bokeh', is_flag=True, default=False)
def plot(ctx, series, outdir, out, bokeh):
if series == 'hyde':
dfs = hyde(outdir)
elif series == 'sps':
dfs = sps(outdir)
elif series == 'projected':
dfs = projected(outdir, 'hpd')
elif series == 'all':
p1 = bokeh_plot(hyde(outdir), 'hyde')
p2 = bokeh_plot(sps(outdir), 'sps')
p3 = bokeh_plot(projected(outdir), 'projected')
col = column(p1, p2, p3)
if out:
output_file(out)
save(col)
return
do_plot(dfs, series, bokeh, out)
def hyde(outdir):
print('hyde')
df = pd.DataFrame()
ds = rasterio.open('netcdf:' + os.path.join(outdir, 'hyde.nc:popd'))
years = tuple(map(lambda idx: int(ds.tags(idx)['NETCDF_DIM_time']),
ds.indexes))
df['historical'] = tuple(map(lambda y: _one(ds.name, True,
years.index(y) + 1),
years))
df.index = years
return {'historical': df}
def sps(outdir, raw=False):
dfs = {}
for scenario in map(lambda n: 'ssp%d' % n, range(1, 6)):
print(scenario)
df = pd.DataFrame()
if raw:
raise NotImplementedError('SPS unscaled support not implemented')
else:
ds = rasterio.open('netcdf:' +
os.path.join(outdir, 'sps.nc:%s' % scenario))
years = tuple(map(lambda idx: int(ds.tags(idx)['NETCDF_DIM_time']),
ds.indexes))
df[scenario] = tuple(map(lambda y: ds.read(years.index(y) + 1,
masked=True).sum(), years))
df.index = years
dfs[scenario] = df
return dfs
def projected(outdir, what='hpd'):
files = get_files(outdir, what)
dfs = {}
for scenario in files:
print(scenario)
df = pd.DataFrame()
df[scenario] = tuple(map(lambda f: _one(os.path.join(outdir, f), True, 1),
files[scenario]))
df.index = tuple(map(lambda f: int(os.path.splitext(f)[0].rsplit('-', 1)[-1]),
files[scenario]))
dfs[scenario] = df
return dfs
@cli.command()
def old_school():
for year in (2011, 2012, 2013, 2014):
one(str(year), '/out/luh2/historical-hpd-%d.tif' %year, True)
return
for year in (2010, 2099):
scenario = 'ssp3'
one('%s/%d' % (scenario, year),
'netcdf:%s/luh2/sps.nc:%s' % (utils.outdir(), scenario),
False, year - 2009)
for name, fname, scale in (('gluds qd', '/out/luh2/gluds00ag.tif', True),
('v4 qd', '/out/luh2/grumps4.tif', True),
# ('1950', '/Volumes/Vagrant 155/playground/ds/luh2/historical-hpd-1950.tif', True),
('sps3/2015', 'netcdf:/out/luh2/sps.nc:ssp3', False),
('v4', utils.grumps4(), True),
('gluds', utils.grumps1(), True),):
one(name, fname, scale)
# ('sps1', '/data/sps/SSP1_NetCDF/total/NetCDF/ssp1_2010.nc', False),
# ('sps2', '/data/sps/SSP2_NetCDF/total/NetCDF/ssp2_2010.nc', False),
# ('sps3', '/data/sps/SSP3_NetCDF/total/NetCDF/ssp3_2010.nc', False),
# ('sps4', '/data/sps/SSP4_NetCDF/total/NetCDF/ssp4_2010.nc', False),
# ('sps5', '/data/sps/SSP5_NetCDF/total/NetCDF/ssp5_2010.nc', False),
if __name__ == '__main__':
cli()
|
ricardog/raster-project
|
attic/hpd-check.py
|
Python
|
apache-2.0
| 7,605
|
[
"NetCDF"
] |
9a046499fac20fc28849f5db0e5aa25ca63bbc6d36ae194d3bd20bffb6b5a063
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 Johnathan "Shaggytwodope" Jenkins <twodopeshaggy@gmail.com>
#
# Distributed under terms of the GPL2 license.
import os
import sys
import urllib.request
import webbrowser
import subprocess
import fcntl
import tkinter
from configparser import ConfigParser
import gi
gi.require_version('WebKit', '3.0')
from gi.repository import WebKit as webkit
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk as gtk
from gi.repository.GdkPixbuf import Pixbuf
from os import stat as os_stat
import datetime
import apt
def run_once():
global fh
fh = open(os.path.realpath(__file__), 'r')
try:
fcntl.flock(fh, fcntl.LOCK_EX | fcntl.LOCK_NB)
except:
run_once_dialog()
def run_once_dialog():
window = gtk.Window()
dialog = gtk.MessageDialog(None, 0, gtk.MessageType.WARNING,
gtk.ButtonsType.OK, appname + ' - Error')
dialog.set_default_size(400, 250)
dialog.set_transient_for(window)
dialog.format_secondary_text("There is another instance of " + appname +
" already running.")
response = dialog.run()
if response == gtk.ResponseType.OK:
dialog.destroy()
sys.exit()
dialog.destroy()
def execute(command, ret=True):
if ret is True:
p = os.popen(command)
return p.readline()
else:
p = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
return p.stdout
def functions(view, frame, req, data=None):
uri = req.get_uri()
lllink, path = uri.split('://', 1)
path = path.replace("%20", " ")
if lllink == "file":
return False
elif lllink == "about":
about = gtk.AboutDialog()
about.set_program_name(appname)
about.set_version(appver)
about.set_copyright('Copyright Linux Lite 2016')
about.set_wrap_license
about.set_license(
'''This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. ''')
about.set_authors([
"Johnathan 'ShaggyTwoDope'" +
" Jenkins\n<shaggytwodope@linuxliteos.com>\n",
"Jerry Bezencon\n<valtam@linuxliteos.com>\n",
"Milos Pavlovic\n<mpsrbija@gmail.com>\n",
"Brian 'DarthLukan' Tomlinson\n<brian.tomlinson@linux.com>\n",
"Josh Erickson\n<josh@snoj.us>"
])
about.set_comments("Designed for Linux Lite")
about.set_website("https://www.linuxliteos.com")
about.set_logo(Pixbuf.new_from_file(app_icon))
about.set_transient_for(window)
about.run()
about.destroy()
elif lllink == "admin":
subprocess.Popen(path, shell=True, executable='/bin/bash')
elif lllink == "script":
execute("{0}/scripts/{1}".format(app_dir, path))
elif lllink == "help":
webbrowser.open('file:///usr/share/doc/litemanual/index.html')
elif lllink == "forum":
webbrowser.open('http://www.linuxliteos.com/forums/')
elif lllink == "website":
webbrowser.open('http://www.linuxliteos.com/')
elif lllink == "facebook":
webbrowser.open('https://www.facebook.com/linuxliteos')
elif lllink == "twitter":
webbrowser.open('http://www.twitter.com/linuxlite/')
elif lllink == "google":
webbrowser.open('https://plus.google.com/+linuxliteos/')
elif lllink == "linkedin":
webbrowser.open('http://www.linkedin.com/in/jerrybezencon')
elif lllink == "screenshot":
os.system("/bin/bash -c 'scrot -u $HOME/liteccshot.png'")
subprocess.Popen(['/bin/bash', '-c',
'/usr/share/litecc/scripts/screenshot'])
elif lllink == "report":
subprocess.Popen(['/bin/bash', '-c', 'gksudo /usr/scripts/systemreport'
])
elif lllink == "update":
subprocess.Popen(['/bin/bash', '-c', 'gksudo /usr/scripts/updates-gui'
])
elif lllink == "refresh":
reload()
return True
def reload():
info = ""
get_info(info)
frontend = frontend_fill()
browser.load_html_string(frontend, "file://{0}/frontend/".format(app_dir))
return True
def connected(host='http://google.com'):
try:
urllib.request.urlopen(host)
return True
except:
return False
def mem_info():
f = open('/proc/meminfo')
for line in f:
if line.startswith('MemTotal:'):
mem_total = (int(line.split()[1]) * 1024.0)
elif line.startswith('Active:'):
mem_active = (int(line.split()[1]) * 1024.0)
elif line.startswith('Inactive:'):
mem_inactive = (int(line.split()[1]) * 1024.0)
elif line.startswith('MemFree:'):
mem_free = (int(line.split()[1]) * 1024.0)
elif line.startswith('Cached:'):
mem_cached = (int(line.split()[1]) * 1024.0)
elif line.startswith('Buffers:'):
mem_buffers = (int(line.split()[1]) * 1024.0)
f.close()
return (mem_total, mem_active, mem_inactive, mem_free, mem_cached,
mem_buffers)
def apt_info():
cache = apt.Cache()
cache.close()
cache.open()
upgrades = 0
cache.upgrade(dist_upgrade=False)
changes = cache.get_changes()
if changes:
counter = [change.name for change in changes]
upgrades = (len(counter))
return upgrades
def get_info(info):
try:
if info == "os":
try:
osin = open('/etc/llver', 'r').read().split('\\n')[0]
except:
infocmd = "lsb_release -d | sed 's/Description:[\t]//g'"
osin = execute(infocmd).split('\\n')[0]
return osin
if info == "desk":
desk_ses = os.environ.get("XDG_SESSION_DESKTOP")
if desk_ses is None:
desk_ses = os.environ.get("XDG_CURRENT_DESKTOP")
if "XFCE" in desk_ses or desk_ses.startswith("xfce"):
xfcev = "xfce4-session -V | grep xfce4-session"
return execute(xfcev).split('(')[1].split(')')[0].split(',')[0]
elif "ubuntu" in desk_ses:
return "Unity"
else:
return desk_ses
if desk_ses is None:
desk_ses = "Desktop Unknown"
return desk_ses
if info == "arc":
return os.uname()[4]
if info == "host":
return os.uname()[1]
if info == "kernel":
return "{0} {1}".format(os.uname()[0], os.uname()[2])
if info == "updates":
pkgcache = '/var/cache/apt/pkgcache.bin'
aptcount = apt_info()
if aptcount == 0:
count = ''
elif aptcount == 1:
count = ' (<font style=\"color: red;\">{0}</font> update available)'.format(
aptcount)
else:
count = ' (<font style=\"color: red;\">{0}</font> updates available)'.format(
aptcount)
if os.path.isfile(pkgcache):
mtime = os_stat(pkgcache).st_mtime
modtime = datetime.datetime.fromtimestamp(mtime).strftime(
'%Y-%m-%d %H:%M')
modday = datetime.datetime.fromtimestamp(mtime).strftime(
'%Y-%m-%d')
today = datetime.datetime.today().strftime('%Y-%m-%d')
if modday == today:
updaters = '''<section class="gradient">Last checked on <font style=\"color: green;\">{0}</font>{1} <button style=\"padding-bottom:0px;padding-left:50pxi\" onclick=\"location.href=('update://')\">Run Updates</button></section>'''.format(
modtime, count)
else:
updaters = '''<section class="gradient">Last checked on <font style=\"color: red;\">{0}</font>{1} <button style=\"padding-bottom:0px;padding-left:50pxi\" onclick=\"location.href=('update://')\">Run Updates</button></section>'''.format(
modtime, count)
else:
updaters = '''<section class="gradient">No Update History <button style=\"padding-bottom:0px;padding-left:50pxi\" onclick=\"location.href=('update://')\">Run Updates</button></section>'''
return updaters
if info == "processor":
proc = execute("grep 'model name' /proc/cpuinfo").split(':')[1]
return proc
if info == "mem":
total, active, inactive, free, cached, buffers, = mem_info()
pie = ((int(total) - int(free)) - (int(buffers) + int(cached)))
mem_usage = float(pie) * 100 / float(total)
ramdis = "%14dMB (Used: %8dMB %7.2f%%)" % (
int(total) / 1048576, pie / 1024 / 1024, mem_usage)
return ramdis
if info == "gfx":
return execute("lspci | grep VGA").split('controller:')[1].split(
'(rev')[0].split(',')[0]
if info == "audio":
audio = execute("lspci | grep 'Audio device:'")
if len(audio) == 0:
return execute("lspci | grep audio").split('controller:')[
1].split('(rev')[0].split(',')[0]
else:
return execute("lspci | grep Audio").split('device:')[1].split(
'(rev')[0].split(',')[0]
if info == "disk":
p1 = subprocess.Popen(
['df', '-Tlh', '--total', '-t', 'ext4', '-t', 'ext3', '-t',
'ext2', '-t', 'reiserfs', '-t'
'jfs', '-t', 'ntfs', '-t', 'fat32', '-t', 'btrfs', '-t',
'fuseblk', '-t', 'xfs'],
stdout=subprocess.PIPE).communicate()[0].decode("Utf-8")
total = p1.splitlines()[-1]
used = total.split()[3].replace(total.split()[3][-1:],
" " + total.split()[3][-1:] + "B")
size = total.split()[2].replace(total.split()[2][-1:],
" " + total.split()[2][-1:] + "B")
disk = "{0} (Used: {1})".format(size, used)
return disk
if info == "netstatus":
if connected():
status = '<font color=green>Active</font>'
else:
status = '<font color=red>Not connected</font>'
return status
if info == "netip":
ip = execute("hostname -I").split(' ')
if len(ip) > 1:
ip = ip[0]
elif ip == "":
ip = 'None'
else:
ip = 'None'
return ip
if info == "gateway":
gateway = execute("route -n | grep 'UG[ \t]' | awk '{print $2}'")
if len(gateway) == 0:
gateway = 'None'
return gateway
except (OSError, TypeError, Exception) as e:
print(e)
return " "
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def get_modules(section):
window = gtk.Window()
try:
mod_dir = os.listdir("{0}/modules/{1}/".format(app_dir, section))
mod_dir.sort()
except Exception:
dialog = gtk.MessageDialog(None, 0, gtk.MessageType.WARNING,
gtk.ButtonsType.OK,
'Error Importing Module Data')
dialog.set_default_size(400, 250)
dialog.format_secondary_text("No modules could be found." +
" Please reinstall " + appname)
dialog.set_transient_for(window)
response = dialog.run()
if response == gtk.ResponseType.OK:
dialog.destroy()
sys.exit()
dialog.destroy()
if isinstance(mod_dir, list) and len(mod_dir) < 1:
return "<p>\"no modules found!\"</p>"
else:
parser = ConfigParser()
admin = ""
mod_dir.sort()
for i in mod_dir:
parser.read("{0}/modules/{1}/{2}".format(app_dir, section, i))
command = parser.get('module', 'command')
chk = command.split(' ')[0]
if chk == "gksudo":
chk = command.split(' ')[1]
elif chk == "gksu":
chk = command.split(' ')[1]
checking = which(chk)
if checking is not None:
ico = parser.get('module', 'ico')
ico = "{0}/frontend/icons/modules/{1}".format(app_dir, ico)
name = parser.get('module', 'name')
desc = parser.get('module', 'desc')
command = command.replace("'", ''' \\' ''')
admin += '''<div class="launcher" onclick="location.href='admin://{0}'" >
<img src="{1}" onerror='this.src = "/usr/share/litecc/frontend/icons/modules/notfound.png"'/>
<h3>{2}</h3>
<span>{3}</span>
</div>'''.format(command, ico, name, desc)
return admin
def frontend_fill():
filee = open("{0}/frontend/default.html".format(app_dir), "r")
page = filee.read()
for i in ['os', 'desk', 'arc', 'processor', 'mem', 'gfx', 'audio', 'disk',
'kernel', 'updates', 'host', 'netstatus', 'netip', 'gateway']:
page = page.replace("{%s}" % i, str(get_info(i)))
sections = ['software', 'system', 'desktop', 'hardware', 'networking']
sections.sort()
for i in sections:
page = page.replace("{%s_list}" % i, get_modules(i))
filee.close()
return page
def main():
global browser
global window
frontend = frontend_fill()
window = gtk.Window()
window.connect('destroy', gtk.main_quit)
window.set_title(appname)
window.set_icon(Pixbuf.new_from_file(app_icon))
rootsize = tkinter.Tk()
if rootsize.winfo_screenheight() > 700:
window.set_resizable(False)
window.set_size_request(880, 660)
else:
window.set_resizable(True)
window.set_size_request(880, 500)
window.set_position(gtk.WindowPosition.CENTER),
browser = webkit.WebView()
swindow = gtk.ScrolledWindow()
window.add(swindow)
swindow.add(browser)
window.show_all()
browser.connect("navigation-requested", functions)
browser.load_html_string(frontend, "file://{0}/frontend/".format(app_dir))
settings = browser.get_settings()
settings.set_property('enable-default-context-menu', False)
browser.set_settings(settings)
gtk.main()
if __name__ == '__main__':
appname = 'Linux Lite Control Center'
appver = '1.0-0310'
app_dir = '/usr/share/litecc'
app_icon = "/usr/share/pixmaps/lite-controlcenter.png"
fh = 0
try:
run_once()
main()
except (Exception, AttributeError) as e:
print("Exiting due to error: {0}".format(e))
sys.exit(1)
|
linuxlite/litecontrolcenter
|
usr/share/litecc/lite-controlcenter.py
|
Python
|
gpl-2.0
| 15,981
|
[
"Brian"
] |
19a89de19b09c4dfb52fa1d5ae27c4554ee925b00b89b1005fe74ea123da873a
|
# -*- coding: utf-8 -*-
"""
This file contains methods for lorentzian-like fitting, these methods
are imported by class FitLogic.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Developed from PI3diamond code Copyright (C) 2009 Helmut Rathgen <helmut.rathgen@gmail.com>
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
import logging
logger = logging.getLogger(__name__)
import numpy as np
from lmfit.models import ConstantModel, LorentzianModel
from lmfit import Parameters
from scipy.ndimage import filters
from scipy.interpolate import InterpolatedUnivariateSpline
############################################################################
# #
# Lorentzian Model #
# #
############################################################################
"""
Information about the general Lorentzian Model
==============================================
The lorentzian has the following general form:
_ _
A | sigma |
f(x; A, x_0, sigma) = ----- | ---------------------- |
pi |_ (x_0 - x)^2 + sigma^2 _|
which can be redefined with
! A
f(x=x_0) = I = -----------
pi * sigma
_ _
| (sigma)^2 |
L(x; I, x_0, sigma) = I * | -------------------------- |
|_ (x_0 - x)^2 + (sigma)^2 _|
Note that the fitting algorithm is using the equation f(x; A, x_0, sigma) and
not L(x; I, x_0, sigma), therefore all the parameters are defined according to
f(x; A, x_0, sigma). The full width at half maximum is therefore 2*sigma.
The indefinite Integral of the Lorentzian is
int(f(x),x) = A/pi *Arctan( (x-x0)/sigma)
Plugging in the limits [0 to inf] we get:
int(f(x), {x,0,inf}) = (A * sigma/pi) *( pi/(2*sigma) + Arctan(x_0/sigma)/sigma) ) = F
(You can confirm that with Mathematica.) For the assumption that
x_0 >> sigma
we can take the limit of Arctan to which it converges: pi/2
That simplifies the formula further to
F = (A * sigma/pi) * ( pi/(2*sigma) + pi/(2*sigma) ) = A
Using the formula for I (above) we can solve the equation for sigma:
sigma = A / (pi* I) = F /(pi * I)
The parameter I can be really easy determined, since it will be just the
maximal/minimal value of the Lorentzian. If the area F is calculated
numerically, then the parameter sigma can be estimated.
"""
def make_lorentzian_model(self):
""" This method creates a model of lorentzian with an offset. The
parameters are: 'amplitude', 'center', 'sigma, 'fwhm' and offset
'c'. For function see:
http://cars9.uchicago.edu/software/python/lmfit/builtin_models.html#models.LorentzianModel
@return lmfit.model.CompositeModel model: Returns an object of the
class CompositeModel
@return object params: lmfit.parameter.Parameters object, returns an
object of the class Parameters with all
parameters for the lorentzian model.
"""
model = LorentzianModel()+ConstantModel()
params = model.make_params()
return model, params
def estimate_lorentz(self,x_axis=None,data=None):
""" This method provides a lorentzian function.
@param array x_axis: x values
@param array data: value of each data point corresponding to
x values
@return int error: error code (0:OK, -1:error)
@return float amplitude: estimated amplitude
@return float x_zero: estimated x value of maximum
@return float sigma_x: estimated standard deviation in x direction
@return float offset: estimated offset
"""
# TODO: make sigma and amplitude good, this is only a dirty fast solution
error = 0
# check if parameters make sense
parameters=[x_axis,data]
for var in parameters:
if not isinstance(var,(frozenset, list, set, tuple, np.ndarray)):
logger.error('Given parameter is no array.')
error=-1
elif len(np.shape(var))!=1:
logger.error('Given parameter is no one dimensional array.')
#set parameters
data_smooth, offset = self.find_offset_parameter(x_axis, data)
# data_level = data-offset
data_level = data - data_smooth.mean()
data_min = data_level.min()
data_max = data_level.max()
# estimate sigma
# numerical_integral = (np.sum(data_level) *
# (abs(x_axis[-1] - x_axis[0])) / len(x_axis))
smoothing_spline = 1 # must be 1<= smoothing_spline <= 5
function = InterpolatedUnivariateSpline(x_axis, data_level, k=smoothing_spline)
numerical_integral = function.integral(x_axis[0], x_axis[-1])
if data_max > abs(data_min):
logger.warning('The lorentzian estimator set the peak to the '
'minimal value, if you want to fit a peak instead '
'of a dip rewrite the estimator.')
amplitude_median = data_min
x_zero = x_axis[np.argmin(data_smooth)]
# For the fitting procedure it is much better to start with a larger sigma
# then with a smaller one. A small sigma is prone to larger instabilities
# in the fit.
oversize_sigma = 8
sigma = numerical_integral*oversize_sigma / (np.pi * amplitude_median)
amplitude = amplitude_median * np.pi * sigma
amplitude = -1 *abs(amplitude_median * np.pi * sigma)
return error, amplitude, x_zero, sigma, offset
def make_lorentzian_fit(self, axis=None, data=None,
add_parameters=None):
""" This method performes a 1D lorentzian fit on the provided data.
@param array [] axis: axis values
@param array[] x_data: data
@param dictionary add_parameters: Additional parameters
@return object model: lmfit.model.ModelFit object, all parameters
provided about the fitting, like: success,
initial fitting values, best fitting values, data
with best fit with given axis,...
"""
error, amplitude, x_zero, sigma, offset = self.estimate_lorentz(axis, data)
model, params = self.make_lorentzian_model()
# auxiliary variables
stepsize = axis[1]-axis[0]
n_steps = len(axis)
# TODO: Make sigma amplitude and x_zero better
# Defining standard parameters
if axis[1]-axis[0]>0:
# (Name, Value, Vary, Min, Max, Expr)
params.add_many(('amplitude', amplitude, True, None, -1e-12, None),
('sigma', sigma, True, (axis[1]-axis[0])/2 , (axis[-1]-axis[0])*10, None),
('center', x_zero, True, (axis[0])-n_steps*stepsize, (axis[-1])+n_steps*stepsize, None),
('c', offset, True, None, None, None))
if axis[0]-axis[1]>0:
# (Name, Value, Vary, Min, Max, Expr)
params.add_many(('amplitude', amplitude, True, None, -1e-12, None),
('sigma', sigma, True, (axis[0]-axis[1])/2, (axis[0]-axis[1])*10, None),
('center', x_zero, True, (axis[-1]), (axis[0]), None),
('c', offset, True, None, None, None))
#redefine values of additional parameters
if add_parameters is not None :
params = self._substitute_parameter(parameters=params,
update_dict=add_parameters)
try:
result = model.fit(data, x=axis,params=params)
except:
result = model.fit(data, x=axis,params=params)
logger.warning('The 1D lorentzian fit did not work. Error '
'message: {0}\n'.format(result.message))
return result
############################################################################
# #
# Lorentz fit for peak instead of dip #
# #
############################################################################
def estimate_lorentzpeak (self, x_axis=None, data=None):
""" This method provides a lorentzian function to fit a peak.
@param array x_axis: x values
@param array data: value of each data point corresponding to x values
@return int error: error code (0:OK, -1:error)
@return float amplitude: estimated amplitude
@return float x_zero: estimated x value of maximum
@return float sigma_x: estimated standard deviation in x direction
@return float offset: estimated offset
"""
#TODO: make sigma and amplitude good, this is only a dirty fast solution
error = 0
# check if parameters make sense
parameters = [x_axis, data]
for var in parameters:
if not isinstance(var, (frozenset, list, set, tuple, np.ndarray)):
logger.error('Given parameter is no array.')
error = -1
elif len(np.shape(var)) != 1:
logger.error('Given parameter is no one dimensional array.')
#set paraameters
data_smooth, offset = self.find_offset_parameter(x_axis, data)
data_level = data-offset
data_min = data_level.min()
data_max = data_level.max()
numerical_integral = np.sum(data_level) * \
(np.abs(x_axis[0] - x_axis[-1])) / len(x_axis)
if data_max<abs(data_min):
logger.warning('This lorentzian estimator set the peak to the '
'maximum value, if you want to fit a dip '
'instead of a peak use estimate_lorentz.')
amplitude_median = data_max
x_zero = x_axis[np.argmax(data)]
sigma = np.abs(numerical_integral / (np.pi * amplitude_median))
amplitude = amplitude_median * np.pi * sigma
return error, amplitude, x_zero, sigma, offset
def make_lorentzianpeak_fit(self, axis=None, data=None,
add_parameters=None):
""" Perform a 1D Lorentzian peak fit on the provided data.
@param array [] axis: axis values
@param array[] x_data: data
@param dictionary add_parameters: Additional parameters
@return lmfit.model.ModelFit result: All parameters provided about
the fitting, like: success,
initial fitting values, best
fitting values, data with best
fit with given axis,...
"""
error, \
amplitude, \
x_zero, \
sigma, \
offset = self.estimate_lorentzpeak(axis, data)
model, params = self.make_lorentzian_model()
# auxiliary variables:
stepsize=np.abs(axis[1]-axis[0])
n_steps=len(axis)
# TODO: Make sigma amplitude and x_zero better
#Defining standard parameters
if axis[1]-axis[0]>0:
# (Name, Value, Vary, Min, Max, Expr)
params.add_many(('amplitude', amplitude, True, 2e-12, None, None),
('sigma', sigma, True, (axis[1]-axis[0])/2, (axis[-1]-axis[0])*10, None),
('center', x_zero, True, (axis[0])-n_steps*stepsize, (axis[-1])+n_steps*stepsize, None),
('c', offset, True, None, None, None))
if axis[0]-axis[1]>0:
# (Name, Value, Vary, Min, Max, Expr)
params.add_many(('amplitude', amplitude, True, 2e-12, None, None),
('sigma', sigma, True, (axis[0]-axis[1])/2 , (axis[0]-axis[1])*10, None),
('center', x_zero, True, (axis[-1]), (axis[0]), None),
('c', offset, True, None, None, None))
#redefine values of additional parameters
if add_parameters is not None :
params=self._substitute_parameter(parameters=params,
update_dict=add_parameters)
try:
result=model.fit(data, x=axis,params=params)
except:
result=model.fit(data, x=axis,params=params)
logger.warning('The 1D gaussian fit did not work. Error '
'message:' + result.message)
return result
############################################################################
# #
# Double Lorentzian Model #
# #
############################################################################
def make_multiplelorentzian_model(self, no_of_lor=None):
""" This method creates a model of lorentzian with an offset. The
parameters are: 'amplitude', 'center', 'sigm, 'fwhm' and offset
'c'. For function see:
http://cars9.uchicago.edu/software/python/lmfit/builtin_models.html#models.LorentzianModel
@return lmfit.model.CompositeModel model: Returns an object of the
class CompositeModel
@return lmfit.parameter.Parameters params: Returns an object of the
class Parameters with all
parameters for the
lorentzian model.
"""
model=ConstantModel()
for ii in range(no_of_lor):
model += LorentzianModel(prefix='lorentz{0}_'.format(ii))
params = model.make_params()
return model, params
def estimate_doublelorentz(self, x_axis=None, data=None,
threshold_fraction=0.3,
minimal_threshold=0.01,
sigma_threshold_fraction=0.3):
""" This method provides a lorentzian function.
@param array x_axis: x values
@param array data: value of each data point corresponding to
x values
@return int error: error code (0:OK, -1:error)
@return float lorentz0_amplitude: estimated amplitude of 1st peak
@return float lorentz1_amplitude: estimated amplitude of 2nd peak
@return float lorentz0_center: estimated x value of 1st maximum
@return float lorentz1_center: estimated x value of 2nd maximum
@return float lorentz0_sigma: estimated sigma of 1st peak
@return float lorentz1_sigma: estimated sigma of 2nd peak
@return float offset: estimated offset
"""
error = 0
# check if parameters make sense
parameters = [x_axis,data]
for var in parameters:
if not isinstance(var,(frozenset, list, set, tuple, np.ndarray)):
logger.error('Given parameter is no array.')
error=-1
elif len(np.shape(var)) != 1:
logger.error('Given parameter is no one dimensional array.')
#set paraameters
data_smooth,offset=self.find_offset_parameter(x_axis,data)
data_level=data_smooth-offset
#search for double lorentzian
error, \
sigma0_argleft, dip0_arg, sigma0_argright, \
sigma1_argleft, dip1_arg , sigma1_argright = \
self._search_double_dip(x_axis, data_level, threshold_fraction,
minimal_threshold, sigma_threshold_fraction)
if dip0_arg == dip1_arg:
lorentz0_amplitude = data_level[dip0_arg]/2.
lorentz1_amplitude = lorentz0_amplitude
else:
lorentz0_amplitude=data_level[dip0_arg]
lorentz1_amplitude=data_level[dip1_arg]
lorentz0_center = x_axis[dip0_arg]
lorentz1_center = x_axis[dip1_arg]
#Both sigmas are set to the same value
numerical_integral_0=(np.sum(data_level[sigma0_argleft:sigma0_argright]) *
(x_axis[sigma0_argright] - x_axis[sigma0_argleft]) /
len(data_level[sigma0_argleft:sigma0_argright]))
lorentz0_sigma = abs(numerical_integral_0 /
(np.pi * lorentz0_amplitude) )
numerical_integral_1=numerical_integral_0
lorentz1_sigma = abs( numerical_integral_1
/ (np.pi * lorentz1_amplitude) )
#esstimate amplitude
lorentz0_amplitude = -1*abs(lorentz0_amplitude*np.pi*lorentz0_sigma)
lorentz1_amplitude = -1*abs(lorentz1_amplitude*np.pi*lorentz1_sigma)
if lorentz1_center < lorentz0_center :
lorentz0_amplitude_temp = lorentz0_amplitude
lorentz0_amplitude = lorentz1_amplitude
lorentz1_amplitude = lorentz0_amplitude_temp
lorentz0_center_temp = lorentz0_center
lorentz0_center = lorentz1_center
lorentz1_center = lorentz0_center_temp
lorentz0_sigma_temp= lorentz0_sigma
lorentz0_sigma = lorentz1_sigma
lorentz1_sigma = lorentz0_sigma_temp
return error, lorentz0_amplitude,lorentz1_amplitude, \
lorentz0_center,lorentz1_center, lorentz0_sigma, \
lorentz1_sigma, offset
def make_doublelorentzian_fit(self, axis=None, data=None,
add_parameters=None):
""" This method performes a 1D lorentzian fit on the provided data.
@param array [] axis: axis values
@param array[] x_data: data
@param dictionary add_parameters: Additional parameters
@return lmfit.model.ModelFit result: All parameters provided about
the fitting, like: success,
initial fitting values, best
fitting values, data with best
fit with given axis,...
"""
error, \
lorentz0_amplitude, \
lorentz1_amplitude, \
lorentz0_center, \
lorentz1_center, \
lorentz0_sigma, \
lorentz1_sigma, \
offset = self.estimate_doublelorentz(axis, data)
model, params = self.make_multiplelorentzian_model(no_of_lor=2)
# Auxiliary variables:
stepsize=axis[1]-axis[0]
n_steps=len(axis)
#Defining standard parameters
# (Name, Value, Vary, Min, Max, Expr)
params.add('lorentz0_amplitude', lorentz0_amplitude, True, None, -0.01, None)
params.add('lorentz0_sigma', lorentz0_sigma, True, (axis[1]-axis[0])/2 , (axis[-1]-axis[0])*4, None)
params.add('lorentz0_center', lorentz0_center, True, (axis[0])-n_steps*stepsize, (axis[-1])+n_steps*stepsize, None)
params.add('lorentz1_amplitude', lorentz1_amplitude, True, None, -0.01, None)
params.add('lorentz1_sigma', lorentz1_sigma, True, (axis[1]-axis[0])/2 , (axis[-1]-axis[0])*4, None)
params.add('lorentz1_center', lorentz1_center, True, (axis[0])-n_steps*stepsize, (axis[-1])+n_steps*stepsize, None)
params.add('c', offset, True, None, None, None)
#redefine values of additional parameters
if add_parameters is not None:
params=self._substitute_parameter(parameters=params,
update_dict=add_parameters)
try:
result=model.fit(data, x=axis,params=params)
except:
result=model.fit(data, x=axis,params=params)
logger.warning('The double lorentzian fit did not '
'work: {0}'.format(result.message))
return result
############################################################################
# #
# N14 fitting #
# #
############################################################################
def estimate_N14(self, x_axis=None, data=None):
""" Provide an estimation of all fitting parameters for fitting the
three equdistant lorentzian dips of the hyperfine interaction
of a N14 nuclear spin. Here the splitting is set as an expression,
if the splitting is not exactly 2.15MHz the fit will not work.
@param array x_axis: x values in Hz
@param array data: value of each data point corresponding to
x values
@return lmfit.parameter.Parameters parameters: New object corresponding
parameters like offset,
the three sigma's, the
three amplitudes and centers
"""
# find the offset parameter, which should be in the fit the zero level:
data_smooth_lorentz, offset = self.find_offset_parameter(x_axis, data)
# Create now a filter of length 5MHz, then create a step-wise function with
# three dips. This step-wise function will be convolved with the smoothed
# data, where the maximal contribution will be if the peaks are within the
# filter. Take that to obtain from that the accurate peak position:
#filter of one dip should always have a length of approx linewidth 1MHz
points_within_1MHz = len(x_axis)/(x_axis.max()-x_axis.min()) * 1e6
# filter should have a width of 5MHz
x_filter = np.linspace(0, 5*points_within_1MHz, 5*points_within_1MHz)
lorentz = np.piecewise(x_filter, [(x_filter >= 0) * (x_filter < len(x_filter)*1/5),
(x_filter >= len(x_filter)*1/5) * (x_filter < len(x_filter)*2/5),
(x_filter >= len(x_filter)*2/5) * (x_filter < len(x_filter)*3/5),
(x_filter >= len(x_filter)*3/5) * (x_filter < len(x_filter)*4/5),
(x_filter >= len(x_filter)*4/5)],
[1, 0, 1, 0, 1])
# if the filter is smaller than 5 points a convolution does not make sense
if len(lorentz) >= 5:
data_convolved = filters.convolve1d(data_smooth_lorentz, lorentz/lorentz.sum(), mode='constant', cval=data_smooth_lorentz.max())
x_axis_min = x_axis[data_convolved.argmin()]-2.15*1e6
else:
x_axis_min = x_axis[data_smooth_lorentz.argmin()]-2.15*1e6
# Create the parameter container, with the estimated values, which should be
# passed to the fit algorithm:
parameters = Parameters()
# level of the data, that means the offset is subtracted and the real data
# are present
data_level = data_smooth_lorentz - data_smooth_lorentz.mean()
minimum_level = data_level.min()
# In order to perform a smooth integral to obtain the area under the curve
# make an interpolation of the passed data, in case they are very sparse.
# That increases the accuracy of the calculated Integral.
# integral of data corresponds to sqrt(2) * Amplitude * Sigma
smoothing_spline = 1 # must be 1<= smoothing_spline <= 5
function = InterpolatedUnivariateSpline(x_axis, data_level, k=smoothing_spline)
integrated_area = function.integral(x_axis[0], x_axis[-1])
sigma = abs(integrated_area / (minimum_level/np.pi))
# That is wrong, so commenting out:
# sigma = abs(integrated_area /(np.pi * minimum_level) )
amplitude = -1*abs(minimum_level*np.pi*sigma)
# Since the total amplitude of the lorentzian is depending on sigma it makes
# sense to vary sigma within an interval, which is smaller than the minimal
# distance between two points. Then the fit algorithm will have a larger
# range to determine the amplitude properly. That is the main issue with the
# fit!
linewidth = sigma
minimal_linewidth = (x_axis[1]-x_axis[0])/4
maximal_linewidth = x_axis[-1]-x_axis[0]
# The linewidth of all the lorentzians are set to be the same! that is a
# physical constraint for the N14 fitting.
# (Name, Value, Vary, Min, Max, Expr)
parameters.add('lorentz0_amplitude', value=amplitude, max=-1e-6)
parameters.add('lorentz0_center', value=x_axis_min)
parameters.add('lorentz0_sigma', value=linewidth, min=minimal_linewidth, max=maximal_linewidth)
parameters.add('lorentz1_amplitude', value=parameters['lorentz0_amplitude'].value, max=-1e-6)
parameters.add('lorentz1_center', value=parameters['lorentz0_center'].value+2.15*1e6, expr='lorentz0_center+2.15*1e6')
parameters.add('lorentz1_sigma', value=parameters['lorentz0_sigma'].value, min=minimal_linewidth, max=maximal_linewidth, expr='lorentz0_sigma')
parameters.add('lorentz2_amplitude', value=parameters['lorentz0_amplitude'].value, max=-1e-6)
parameters.add('lorentz2_center', value=parameters['lorentz1_center'].value+2.15*1e6, expr='lorentz0_center+4.3*1e6')
parameters.add('lorentz2_sigma', value=parameters['lorentz0_sigma'].value, min=minimal_linewidth, max=maximal_linewidth, expr='lorentz0_sigma')
parameters.add('c', value=data_smooth_lorentz.max())
return parameters
def make_N14_fit(self, axis=None, data=None, add_parameters=None):
""" This method performs a fit on the provided data where a N14
hyperfine interaction of 2.15 MHz is taken into account.
@param array [] axis: axis values
@param array[] data: data
@param dictionary add_parameters: Additional parameters
@return lmfit.model.ModelFit result: All parameters provided about
the fitting, like: success,
initial fitting values, best
fitting values, data with best
fit with given axis,...
"""
parameters = self.estimate_N14(axis, data)
# redefine values of additional parameters
if add_parameters is not None:
parameters = self._substitute_parameter(parameters=parameters,
update_dict=add_parameters)
mod, params = self.make_multiplelorentzian_model(no_of_lor=3)
result = mod.fit(data=data, x=axis, params=parameters)
return result
############################################################################
# #
# N15 fitting #
# #
############################################################################
def estimate_N15(self, x_axis=None, data=None):
""" This method provides an estimation of all fitting parameters for
fitting the three equdistant lorentzian dips of the hyperfine interaction
of a N15 nuclear spin. Here the splitting is set as an expression, if the
splitting is not exactly 3.03MHz the fit will not work.
@param array x_axis: x values in Hz
@param array data: value of each data point corresponding to
x values
@return lmfit.parameter.Parameters parameters: New object corresponding
parameters like offset,
the three sigma's, the
three amplitudes and centers
"""
data_smooth_lorentz, offset = self.find_offset_parameter(x_axis, data)
hf_splitting = 3.03 * 1e6 # Hz
#filter should always have a length of approx linewidth 1MHz
points_within_1MHz = len(x_axis)/(x_axis.max()-x_axis.min()) * 1e6
# filter should have a width of 4 MHz
x_filter = np.linspace(0,4*points_within_1MHz,4*points_within_1MHz)
lorentz = np.piecewise(x_filter, [(x_filter >= 0)*(x_filter<len(x_filter)/4),
(x_filter >= len(x_filter)/4)*(x_filter<len(x_filter)*3/4),
(x_filter >= len(x_filter)*3/4)], [1, 0,1])
# if the filter is smaller than 5 points a convolution does not make sense
if len(lorentz) >= 3:
data_convolved = filters.convolve1d(data_smooth_lorentz, lorentz/lorentz.sum(),
mode='constant', cval=data_smooth_lorentz.max())
x_axis_min = x_axis[data_convolved.argmin()]-hf_splitting/2.
else:
x_axis_min = x_axis[data_smooth_lorentz.argmin()]
data_level = data_smooth_lorentz - data_smooth_lorentz.max()
minimum_level = data_level.min()
# integral of data:
function = InterpolatedUnivariateSpline(x_axis, data_level, k=1)
Integral = function.integral(x_axis[0], x_axis[-1])
sigma = abs(Integral /(np.pi * minimum_level) )
amplitude = -1*abs(minimum_level*np.pi*sigma)
minimal_sigma = x_axis[1]-x_axis[0]
maximal_sigma = x_axis[-1]-x_axis[0]
parameters = Parameters()
parameters.add('lorentz0_amplitude', value=amplitude/2., max=-1e-6)
parameters.add('lorentz0_center', value=x_axis_min)
parameters.add('lorentz0_sigma', value=sigma/2., min=minimal_sigma, max=maximal_sigma)
parameters.add('lorentz1_amplitude', value=parameters['lorentz0_amplitude'].value, max=-1e-6)
parameters.add('lorentz1_center', value=parameters['lorentz0_center'].value+hf_splitting, expr='lorentz0_center+3.03*1e6')
parameters.add('lorentz1_sigma', value=parameters['lorentz0_sigma'].value, min=minimal_sigma, max=maximal_sigma, expr='lorentz0_sigma')
parameters.add('c', value=data_smooth_lorentz.max())
return parameters
def make_N15_fit(self, axis=None, data=None, add_parameters=None):
""" This method performes a fit on the provided data where a N14
hyperfine interaction of 3.03 MHz is taken into accound.
@param array [] axis: axis values in Hz
@param array[] data: data
@param dictionary add_parameters: Additional parameters
@return lmfit.model.ModelFit result: All parameters provided about
the fitting, like: success,
initial fitting values, best
fitting values, data with best
fit with given axis,...
"""
parameters = self.estimate_N15(axis, data)
# redefine values of additional parameters
if add_parameters is not None:
parameters = self._substitute_parameter(parameters=parameters,
update_dict=add_parameters)
mod, params = self.make_multiplelorentzian_model(no_of_lor=2)
result = mod.fit(data=data, x=axis, params=parameters)
return result
|
drogenlied/qudi
|
logic/fitmethods/lorentzianlikemethods.py
|
Python
|
gpl-3.0
| 32,566
|
[
"Gaussian"
] |
5b170faf03680df9f1f43972961c9688380bf6c4bea98d0df75389e5abf9a81e
|
import numpy as np
import warnings
import GPy
def get_slices(input_list):
num_outputs = len(input_list)
_s = [0] + [ _x.shape[0] for _x in input_list ]
_s = np.cumsum(_s)
slices = [slice(a,b) for a,b in zip(_s[:-1],_s[1:])]
return slices
def build_XY(input_list,output_list=None,index=None):
num_outputs = len(input_list)
if output_list is not None:
assert num_outputs == len(output_list)
Y = np.vstack(output_list)
else:
Y = None
if index is not None:
assert len(index) == num_outputs
I = np.hstack( [np.repeat(j,_x.shape[0]) for _x,j in zip(input_list,index)] )
else:
I = np.hstack( [np.repeat(j,_x.shape[0]) for _x,j in zip(input_list,range(num_outputs))] )
X = np.vstack(input_list)
X = np.hstack([X,I[:,None]])
return X,Y,I[:,None]#slices
def build_likelihood(Y_list,noise_index,likelihoods_list=None):
Ny = len(Y_list)
if likelihoods_list is None:
likelihoods_list = [GPy.likelihoods.Gaussian(name="Gaussian_noise_%s" %j) for y,j in zip(Y_list,range(Ny))]
else:
assert len(likelihoods_list) == Ny
#likelihood = GPy.likelihoods.mixed_noise.MixedNoise(likelihoods_list=likelihoods_list, noise_index=noise_index)
likelihood = GPy.likelihoods.mixed_noise.MixedNoise(likelihoods_list=likelihoods_list)
return likelihood
def ICM(input_dim, num_outputs, kernel, W_rank=1,W=None,kappa=None,name='ICM'):
"""
Builds a kernel for an Intrinsic Coregionalization Model
:input_dim: Input dimensionality (does not include dimension of indices)
:num_outputs: Number of outputs
:param kernel: kernel that will be multiplied by the coregionalize kernel (matrix B).
:type kernel: a GPy kernel
:param W_rank: number tuples of the corregionalization parameters 'W'
:type W_rank: integer
"""
if kernel.input_dim != input_dim:
kernel.input_dim = input_dim
warnings.warn("kernel's input dimension overwritten to fit input_dim parameter.")
K = kernel.prod(GPy.kern.Coregionalize(1, num_outputs, active_dims=[input_dim], rank=W_rank,W=W,kappa=kappa,name='B'),name=name)
return K
def LCM(input_dim, num_outputs, kernels_list, W_rank=1,name='ICM'):
"""
Builds a kernel for an Linear Coregionalization Model
:input_dim: Input dimensionality (does not include dimension of indices)
:num_outputs: Number of outputs
:param kernel: kernel that will be multiplied by the coregionalize kernel (matrix B).
:type kernel: a GPy kernel
:param W_rank: number tuples of the corregionalization parameters 'W'
:type W_rank: integer
"""
Nk = len(kernels_list)
K = ICM(input_dim,num_outputs,kernels_list[0],W_rank,name='%s%s' %(name,0))
j = 1
for kernel in kernels_list[1:]:
K += ICM(input_dim,num_outputs,kernel,W_rank,name='%s%s' %(name,j))
j += 1
return K
def Private(input_dim, num_outputs, kernel, output, kappa=None,name='X'):
"""
Builds a kernel for an Intrinsic Coregionalization Model
:input_dim: Input dimensionality
:num_outputs: Number of outputs
:param kernel: kernel that will be multiplied by the coregionalize kernel (matrix B).
:type kernel: a GPy kernel
:param W_rank: number tuples of the corregionalization parameters 'W'
:type W_rank: integer
"""
K = ICM(input_dim,num_outputs,kernel,W_rank=1,kappa=kappa,name=name)
K.B.W.fix(0)
_range = range(num_outputs)
_range.pop(output)
for j in _range:
K.B.kappa[j] = 0
K.B.kappa[j].fix()
return K
|
ptonner/GPy
|
GPy/util/multioutput.py
|
Python
|
bsd-3-clause
| 3,588
|
[
"Gaussian"
] |
014331d664d31bbbe9b612f02bc782c15ac1aa134ce35a867ba7955b7c94a9be
|
# -----------------------------------------------------------------------------
# Copyright (c) 2016+ Buro Petr van Blokland + Claudia Mens & Font Bureau
# www.pagebot.io
#
# P A G E B O T
#
# Licensed under MIT conditions
# Made for usage in DrawBot, www.drawbot.com
# -----------------------------------------------------------------------------
#
# CartierMagazine.py
#
from __future__ import division
from datetime import datetime # Make date on magazine cover fit today.
import pagebot
from pagebot import newFS, Gradient, Shadow
from pagebot.style import getRootStyle, CENTER, LEFT, TOP, RIGHT, A4Letter
from pagebot.elements import *
from pagebot.conditions import *
from pagebot.document import Document
from pagebot.composer import Composer
from pagebot.typesetter import Typesetter
from pagebot.toolbox.transformer import s2Color, int2Color, lighter
# Import other than default view class, showing double pages spread
from pagebot.elements.views.spreadview import SpreadView
from pagebot.fonttoolbox.variablefontbuilder import getVariableFont, Font
W, H = 800, 1018 # Size of main image
PADDING = (24, 24, 20, 24) # General page padding.
EXPORT_PATH = '_export/CartierMagazineCover.png' # Export path of the document.
COVER_IMAGE_PATH1 = '../AdsProject/content/cartier/beeld.jpg' # Path of the cover image.
# Use this color to show "error" debugging, e.g. to show bounding box of an element.
debugColor = (1, 1, 0, 0.5)
# Set some values of the default template (as already generated by the document).
# Make squential unique names for the flow boxes inside the templates
MAIN_FLOW = 'main' # ELement id of the text box on pages the hold the main text flow.
FLOWID1 = MAIN_FLOW+'1'
FLOWID2 = MAIN_FLOW+'2'
FLOWID3 = MAIN_FLOW+'3'
# Get the root path of open source fonts, enclosed in PageBot.
ROOT_PATH = pagebot.getRootPath()
# Main Variable Font for all text in the magazine. Change this line to build with
# another Variable Font. Using Optical Size (opsz), Weight (wght) and Width (wdth) axes.
FONT_PATH = ROOT_PATH + '/Fonts/fontbureau/AmstelvarAlpha-VF.ttf'
# Open the font, so we can query values that are not available in standard DrawBot functions,
# such as stem width, overshoot of roundings, etc.
f = Font(FONT_PATH)
#print f.axes Uncomment to see the available axes printed.
# Pre-calculate instances of locations in the Variable Font.
LIGHT72 = getVariableFont(FONT_PATH, dict(wght=0.5, wdth=0.6, opsz=72))
BOOK_LIGHT = getVariableFont(FONT_PATH, dict(wght=0.5, wdth=0.7))
BOOK_CONDENSED = getVariableFont(FONT_PATH, dict(wght=0.7, wdth=0.4))
BOOK = getVariableFont(FONT_PATH, dict(wght=0.25, wdth=0))
BOOK_ITALIC = getVariableFont(FONT_PATH, dict(wght=0.25, wdth=1))
MEDIUM = getVariableFont(FONT_PATH, dict(wght=0.40, wdth=0))
SEMIBOLD = getVariableFont(FONT_PATH, dict(wght=0.40, wdth=1))
SEMIBOLD_CONDENSED = getVariableFont(FONT_PATH, dict(wght=0.40, wdth=0.5))
BOLD = getVariableFont(FONT_PATH, dict(wght=0.70, wdth=1))
BOLD_ITALIC = getVariableFont(FONT_PATH, dict(wght=0.7, wdth=1))
shadow = Shadow(offset=(6, -6), blur=10, color=(0.2, 0.2, 0.2, 0.5))
def makeCoverTemplate(imagePath, w, h):
bleed = 0
textColor = 1
# Make styles
# TODO: Make this fit, using size/wdth axis combination of Amstelvar
coverTitleSize = 160
# Not optical size yet. Play more with the axes
coverTitleFont = getVariableFont(FONT_PATH,
dict(wght=0.9, wdth=0.02))#, opsz=coverTitleSize))
coverTitleStyle = dict(font=coverTitleFont.installedName, fontSize=coverTitleSize,
textShadow=shadow, textFill=textColor, tracking=-3)
coverSubTitleSize = 80
# Not optical size yet. Play more with the axes
coverSubTitleFont = getVariableFont(FONT_PATH, dict(wght=0.6, wdth=0.02)) #opsz=coverSubTitleSize))
coverSubTitleStyle = dict(font=coverSubTitleFont.installedName, fontSize=coverSubTitleSize,
textFill=(1, 1, 1, 0.6), tracking=0)
# Cover
coverTemplate = Template(w=w, h=h, padding=PADDING) # Cover template of the magazine.
newImage(imagePath, z=10, parent=coverTemplate, conditions=[Fit2WidthSides(), Bottom2BottomSide()])
# Title of the magazine cover.
coverTitle = newFS('Cartier', style=coverTitleStyle)
# Calculate width if single "F" for now, to align "Magazine"
# TODO: Change in example to go through the coverTitle to get positions and widths.
newText(coverTitle, parent=coverTemplate, z=20,
textShadow=shadow, conditions=[Right2Right(), Top2TopSide()])
coversubTitle = newFS('Magazine', style=coverSubTitleStyle)
tw, th = textSize(coverTitle)
newTextBox(coversubTitle, parent=coverTemplate, pt=-90, z=20,
xTextAlign=RIGHT,
conditions=[Right2Right(), Fit2Width(), Float2Top()])
# Make actual date in top-right with magazine title. Draw a bit transparant on background photo.
dt = datetime.now()
d = dt.strftime("%B %Y")
fs = newFS(d, style=dict(font=MEDIUM.installedName, fontSize=17,
textFill=(1, 1, 1, 0.8), tracking=0.5))
# TODO: padding righ could come from right stem of the "n"
newTextBox(fs, parent=coverTemplate, xTextAlign=RIGHT, pr=10, pt=6, conditions=[Top2Top(), Right2Right()])
# Titles could come automatic from chapters in the magazine.
fs = newFS('$6.95', style=dict(font=BOOK.installedName, fontSize=12,
textFill=textColor, tracking=1, leading=12 ))
newText(fs, parent=coverTemplate, mt=8, conditions=[Top2Bottom(), Right2Right()])
makeCoverTitles(coverTemplate)
return coverTemplate
def makeCoverTitles(coverTemplate):
u"""Build the text box elements in the coverTemplate, containing the chapter titles
of the magazine."""
# TODO: Titles should come automatic from random blurb chapter titles in the magazine.
pl = 8 # Generic offset as padding left from the page padding to aligh with cover title.
fs = newFS('\n', style=dict(font=BOOK_CONDENSED.installedName,
fontSize=64, textFill=1, tracking=0.5, leading=0, rLeading=0.9))
newTextBox(fs, z=20, pl=15, pt=60, parent=coverTemplate,
conditions=[Left2Left(), Fit2Width(), Float2Top()])
# TODO: Titles should come automatic from random blurb chapter titles in the magazine.
fs = newFS('Ideal style:\n', style=dict(font=MEDIUM.installedName, fontSize=32,
textFill=1, tracking=0.5, leading=50))
fs += newFS('Diamond collection', style=dict(font=BOOK.installedName,
fontSize=45, textFill=1, tracking=0.5, leading=48))
newTextBox(fs, z=20, pl=8, w=400, pt=0, parent=coverTemplate,
textShadow=shadow,
conditions=[Left2Left(), Float2Top()])
# TODO: Titles should come automatic from random blurb chapter titles in the magazine.
fs = newFS('Visit store\nP.C. Hooftstraat', style=dict(font=BOOK_LIGHT.installedName,
fontSize=72, textFill=1, tracking=0.5, leading=74))
newTextBox(fs, z=20, pl=8, pt=40, parent=coverTemplate,
style=dict(shadowOffset=(4, -4), shadowBlur=20, shadowFill=(0,0,0,0.6)),
textShadow=shadow,
conditions=[Left2Left(), Fit2Width(), Float2Top()])
# TODO: Titles should come automatic from random blurb chapter titles in the magazine.
c = (1, 0, 0, 0.7) #lighter(int2Color(0x99CBE9)) # Pick from light spot in the photo
fs = newFS('Exclusive: ', style=dict(font=MEDIUM.installedName, fontSize=24,
textFill=c, tracking=0.5, lineHeight=34))
fs += newFS('Necklace and bracelets ', style=dict(font=BOOK.installedName,
fontSize=24, textFill=c, tracking=0.5, lineHeight=34))
newTextBox(fs, z=20, pl=pl, parent=coverTemplate,
style=dict(shadowOffset=(4, -4), shadowBlur=20, shadowFill=(0,0,0,0.6)),
textShadow=shadow,
conditions=[Left2Left(), Fit2Width(), Float2Bottom()])
# -----------------------------------------------------------------
def makeDocument():
u"""Demo page composer."""
coverTemplate1 = makeCoverTemplate(COVER_IMAGE_PATH1, W, H)
# Create new document with (w,h) and fixed amount of pages.
# Make number of pages with default document size, start a page=1 to make SpreadView work.
# Initially make all pages default with template2.
# Oversized document (docW, docH) is defined in the rootStyle.
doc = Document(title=EXPORT_PATH, w=W, h=H, autoPages=1, originTop=False,
template=coverTemplate1, startPage=1)
# TODO Will be expanded with more pages later.
view = doc.getView()
#view = SpreadView(parent=doc) # Show as spread, not a single pages.
view.padding = 0
view.showPageCropMarks = True
view.showPageRegistrationMarks = True
view.showPageFrame = False
view.showPagePadding = False
view.showElementOrigin = False
view.showElementDimensions = False
# Change template of page 1
page1 = doc[0]
page1.applyTemplate(coverTemplate1)
doc.solve()
return doc
d = makeDocument()
d.export(EXPORT_PATH, viewId=SpreadView.viewId)
|
JaspervanBlokland/SandyApp
|
src/Toon/CartierMagazine_800_1018.py
|
Python
|
mit
| 9,078
|
[
"VisIt"
] |
5a94e625c5887858cb87e16cd7d3b4ff7d3c56fb335761b262c412b95a15e051
|
from django.apps import AppConfig
class ColumbusConfig(AppConfig):
name = "pyedf"
verbose_name = "Columbus Workflow Engine"
def ready(self):
pass
|
jkachika/columbus
|
pyedf/apps.py
|
Python
|
mit
| 169
|
[
"COLUMBUS"
] |
8cbfde46546e6d306e2dd16d654477709ce45bdf6096189df1e2e020396dfb41
|
"""
Function-like objects that creates cubic clusters.
"""
import numpy as np
from ase.cluster.cubic import FaceCenteredCubic
from ase.cluster.compounds import L1_2
def Octahedron(symbol, length, cutoff=0, latticeconstant=None, alloy=False):
"""
Returns Face Centered Cubic clusters of the octahedral class depending
on the choice of cutoff.
Type Condition
---- ---------
Regular octahedron cutoff = 0
Truncated octahedron cutoff > 0
Regular truncated octahedron length = 3 * cutoff + 1
Cuboctahedron length = 2 * cutoff + 1
Parameters
----------
symbol: The chemical symbol or atomic number of the element(s).
length: Number of atoms on the square edges of the complete octahedron.
cutoff (optional): Number of layers cut at each vertex.
latticeconstant (optional): The lattice constant. If not given,
then it is extracted form ase.data.
alloy (optional): If true the L1_2 structure is used. Default is False.
"""
# Check length and cutoff
if length < 2:
raise ValueError("The lenght must be greater than one.")
if cutoff < 0 or length < 2 * cutoff + 1:
raise ValueError("The cutoff must fullfill: > 0 and <= (length - 1) / 2.")
# Create cluster
surfaces = [(1,1,1), (1,0,0)]
if length % 2 == 0:
center = np.array([0.5, 0.5, 0.5])
layers = [length/2, length - 1 - cutoff]
else:
center = np.array([0.0, 0.0, 0.0])
layers = [(length - 1)/2, length - 1 - cutoff]
if not alloy:
return FaceCenteredCubic(symbol, surfaces, layers, latticeconstant, center)
else:
return L1_2(symbol, surfaces, layers, latticeconstant, center)
|
grhawk/ASE
|
tools/ase/cluster/octahedron.py
|
Python
|
gpl-2.0
| 1,805
|
[
"ASE"
] |
998bfc7a92be91a4076f9d1ddff9d8a8384ce84d5b5282cc64ede395b1c3c236
|
import cython
cython.declare(PyrexTypes=object, Naming=object, ExprNodes=object, Nodes=object,
Options=object, UtilNodes=object, ModuleNode=object,
LetNode=object, LetRefNode=object, TreeFragment=object,
TemplateTransform=object, EncodedString=object,
error=object, warning=object, copy=object)
import Builtin
import ExprNodes
import Nodes
from PyrexTypes import py_object_type, unspecified_type
import PyrexTypes
from Visitor import TreeVisitor, CythonTransform
from Errors import error, warning, InternalError
class TypedExprNode(ExprNodes.ExprNode):
# Used for declaring assignments of a specified type without a known entry.
def __init__(self, type, may_be_none=None):
self.type = type
self._may_be_none = may_be_none
def may_be_none(self):
return self._may_be_none != False
object_expr = TypedExprNode(py_object_type, may_be_none=True)
object_expr_not_none = TypedExprNode(py_object_type, may_be_none=False)
# Fake rhs to silence "unused variable" warning
fake_rhs_expr = TypedExprNode(unspecified_type)
class ControlBlock(object):
"""Control flow graph node. Sequence of assignments and name references.
children set of children nodes
parents set of parent nodes
positions set of position markers
stats list of block statements
gen dict of assignments generated by this block
bounded set of entries that are definitely bounded in this block
Example:
a = 1
b = a + c # 'c' is already bounded or exception here
stats = [Assignment(a), NameReference(a), NameReference(c),
Assignment(b)]
gen = {Entry(a): Assignment(a), Entry(b): Assignment(b)}
bounded = set([Entry(a), Entry(c)])
"""
def __init__(self):
self.children = set()
self.parents = set()
self.positions = set()
self.stats = []
self.gen = {}
self.bounded = set()
self.i_input = 0
self.i_output = 0
self.i_gen = 0
self.i_kill = 0
self.i_state = 0
def empty(self):
return (not self.stats and not self.positions)
def detach(self):
"""Detach block from parents and children."""
for child in self.children:
child.parents.remove(self)
for parent in self.parents:
parent.children.remove(self)
self.parents.clear()
self.children.clear()
def add_child(self, block):
self.children.add(block)
block.parents.add(self)
class ExitBlock(ControlBlock):
"""Non-empty exit point block."""
def empty(self):
return False
class AssignmentList:
def __init__(self):
self.stats = []
class ControlFlow(object):
"""Control-flow graph.
entry_point ControlBlock entry point for this graph
exit_point ControlBlock normal exit point
block ControlBlock current block
blocks set children nodes
entries set tracked entries
loops list stack for loop descriptors
exceptions list stack for exception descriptors
"""
def __init__(self):
self.blocks = set()
self.entries = set()
self.loops = []
self.exceptions = []
self.entry_point = ControlBlock()
self.exit_point = ExitBlock()
self.blocks.add(self.exit_point)
self.block = self.entry_point
def newblock(self, parent=None):
"""Create floating block linked to `parent` if given.
NOTE: Block is NOT added to self.blocks
"""
block = ControlBlock()
self.blocks.add(block)
if parent:
parent.add_child(block)
return block
def nextblock(self, parent=None):
"""Create block children block linked to current or `parent` if given.
NOTE: Block is added to self.blocks
"""
block = ControlBlock()
self.blocks.add(block)
if parent:
parent.add_child(block)
elif self.block:
self.block.add_child(block)
self.block = block
return self.block
def is_tracked(self, entry):
if entry.is_anonymous:
return False
if (entry.type.is_array or entry.type.is_struct_or_union or
entry.type.is_cpp_class):
return False
return (entry.is_local or entry.is_pyclass_attr or entry.is_arg or
entry.from_closure or entry.in_closure or
entry.error_on_uninitialized)
def mark_position(self, node):
"""Mark position, will be used to draw graph nodes."""
if self.block:
self.block.positions.add(node.pos[:2])
def mark_assignment(self, lhs, rhs, entry):
if self.block:
if not self.is_tracked(entry):
return
assignment = NameAssignment(lhs, rhs, entry)
self.block.stats.append(assignment)
self.block.gen[entry] = assignment
self.entries.add(entry)
def mark_argument(self, lhs, rhs, entry):
if self.block and self.is_tracked(entry):
assignment = Argument(lhs, rhs, entry)
self.block.stats.append(assignment)
self.block.gen[entry] = assignment
self.entries.add(entry)
def mark_deletion(self, node, entry):
if self.block and self.is_tracked(entry):
assignment = NameDeletion(node, entry)
self.block.stats.append(assignment)
self.block.gen[entry] = Uninitialized
self.entries.add(entry)
def mark_reference(self, node, entry):
if self.block and self.is_tracked(entry):
self.block.stats.append(NameReference(node, entry))
# Local variable is definitely bound after this reference
if not node.allow_null:
self.block.bounded.add(entry)
self.entries.add(entry)
def normalize(self):
"""Delete unreachable and orphan blocks."""
queue = set([self.entry_point])
visited = set()
while queue:
root = queue.pop()
visited.add(root)
for child in root.children:
if child not in visited:
queue.add(child)
unreachable = self.blocks - visited
for block in unreachable:
block.detach()
visited.remove(self.entry_point)
for block in visited:
if block.empty():
for parent in block.parents: # Re-parent
for child in block.children:
parent.add_child(child)
block.detach()
unreachable.add(block)
self.blocks -= unreachable
def initialize(self):
"""Set initial state, map assignments to bits."""
self.assmts = {}
offset = 0
for entry in self.entries:
assmts = AssignmentList()
assmts.bit = 1 << offset
assmts.mask = assmts.bit
self.assmts[entry] = assmts
offset += 1
for block in self.blocks:
for stat in block.stats:
if isinstance(stat, NameAssignment):
stat.bit = 1 << offset
assmts = self.assmts[stat.entry]
assmts.stats.append(stat)
assmts.mask |= stat.bit
offset += 1
for block in self.blocks:
for entry, stat in block.gen.items():
assmts = self.assmts[entry]
if stat is Uninitialized:
block.i_gen |= assmts.bit
else:
block.i_gen |= stat.bit
block.i_kill |= assmts.mask
block.i_output = block.i_gen
for entry in block.bounded:
block.i_kill |= self.assmts[entry].bit
for assmts in self.assmts.itervalues():
self.entry_point.i_gen |= assmts.bit
self.entry_point.i_output = self.entry_point.i_gen
def map_one(self, istate, entry):
ret = set()
assmts = self.assmts[entry]
if istate & assmts.bit:
ret.add(Uninitialized)
for assmt in assmts.stats:
if istate & assmt.bit:
ret.add(assmt)
return ret
def reaching_definitions(self):
"""Per-block reaching definitions analysis."""
dirty = True
while dirty:
dirty = False
for block in self.blocks:
i_input = 0
for parent in block.parents:
i_input |= parent.i_output
i_output = (i_input & ~block.i_kill) | block.i_gen
if i_output != block.i_output:
dirty = True
block.i_input = i_input
block.i_output = i_output
class LoopDescr(object):
def __init__(self, next_block, loop_block):
self.next_block = next_block
self.loop_block = loop_block
self.exceptions = []
class ExceptionDescr(object):
"""Exception handling helper.
entry_point ControlBlock Exception handling entry point
finally_enter ControlBlock Normal finally clause entry point
finally_exit ControlBlock Normal finally clause exit point
"""
def __init__(self, entry_point, finally_enter=None, finally_exit=None):
self.entry_point = entry_point
self.finally_enter = finally_enter
self.finally_exit = finally_exit
class NameAssignment(object):
def __init__(self, lhs, rhs, entry):
if lhs.cf_state is None:
lhs.cf_state = set()
self.lhs = lhs
self.rhs = rhs
self.entry = entry
self.pos = lhs.pos
self.refs = set()
self.is_arg = False
self.is_deletion = False
def __repr__(self):
return '%s(entry=%r)' % (self.__class__.__name__, self.entry)
def infer_type(self, scope):
return self.rhs.infer_type(scope)
def type_dependencies(self, scope):
return self.rhs.type_dependencies(scope)
class Argument(NameAssignment):
def __init__(self, lhs, rhs, entry):
NameAssignment.__init__(self, lhs, rhs, entry)
self.is_arg = True
class NameDeletion(NameAssignment):
def __init__(self, lhs, entry):
NameAssignment.__init__(self, lhs, lhs, entry)
self.is_deletion = True
def infer_type(self, scope):
inferred_type = self.rhs.infer_type(scope)
if (not inferred_type.is_pyobject and
inferred_type.can_coerce_to_pyobject(scope)):
return py_object_type
return inferred_type
class Uninitialized(object):
pass
class NameReference(object):
def __init__(self, node, entry):
if node.cf_state is None:
node.cf_state = set()
self.node = node
self.entry = entry
self.pos = node.pos
def __repr__(self):
return '%s(entry=%r)' % (self.__class__.__name__, self.entry)
class ControlFlowState(list):
# Keeps track of Node's entry assignments
#
# cf_is_null [boolean] It is uninitialized
# cf_maybe_null [boolean] May be uninitialized
# is_single [boolean] Has only one assignment at this point
cf_maybe_null = False
cf_is_null = False
is_single = False
def __init__(self, state):
if Uninitialized in state:
state.discard(Uninitialized)
self.cf_maybe_null = True
if not state:
self.cf_is_null = True
else:
if len(state) == 1:
self.is_single = True
super(ControlFlowState, self).__init__(state)
def one(self):
return self[0]
class GVContext(object):
"""Graphviz subgraph object."""
def __init__(self):
self.blockids = {}
self.nextid = 0
self.children = []
self.sources = {}
def add(self, child):
self.children.append(child)
def nodeid(self, block):
if block not in self.blockids:
self.blockids[block] = 'block%d' % self.nextid
self.nextid += 1
return self.blockids[block]
def extract_sources(self, block):
if not block.positions:
return ''
start = min(block.positions)
stop = max(block.positions)
srcdescr = start[0]
if not srcdescr in self.sources:
self.sources[srcdescr] = list(srcdescr.get_lines())
lines = self.sources[srcdescr]
return '\\n'.join([l.strip() for l in lines[start[1] - 1:stop[1]]])
def render(self, fp, name, annotate_defs=False):
"""Render graphviz dot graph"""
fp.write('digraph %s {\n' % name)
fp.write(' node [shape=box];\n')
for child in self.children:
child.render(fp, self, annotate_defs)
fp.write('}\n')
def escape(self, text):
return text.replace('"', '\\"').replace('\n', '\\n')
class GV(object):
"""Graphviz DOT renderer."""
def __init__(self, name, flow):
self.name = name
self.flow = flow
def render(self, fp, ctx, annotate_defs=False):
fp.write(' subgraph %s {\n' % self.name)
for block in self.flow.blocks:
label = ctx.extract_sources(block)
if annotate_defs:
for stat in block.stats:
if isinstance(stat, NameAssignment):
label += '\n %s [definition]' % stat.entry.name
elif isinstance(stat, NameReference):
if stat.entry:
label += '\n %s [reference]' % stat.entry.name
if not label:
label = 'empty'
pid = ctx.nodeid(block)
fp.write(' %s [label="%s"];\n' % (pid, ctx.escape(label)))
for block in self.flow.blocks:
pid = ctx.nodeid(block)
for child in block.children:
fp.write(' %s -> %s;\n' % (pid, ctx.nodeid(child)))
fp.write(' }\n')
class MessageCollection:
"""Collect error/warnings messages first then sort"""
def __init__(self):
self.messages = []
def error(self, pos, message):
self.messages.append((pos, True, message))
def warning(self, pos, message):
self.messages.append((pos, False, message))
def report(self):
self.messages.sort()
for pos, is_error, message in self.messages:
if is_error:
error(pos, message)
else:
warning(pos, message, 2)
def check_definitions(flow, compiler_directives):
flow.initialize()
flow.reaching_definitions()
# Track down state
assignments = set()
# Node to entry map
references = {}
assmt_nodes = set()
for block in flow.blocks:
i_state = block.i_input
for stat in block.stats:
i_assmts = flow.assmts[stat.entry]
state = flow.map_one(i_state, stat.entry)
if isinstance(stat, NameAssignment):
stat.lhs.cf_state.update(state)
assmt_nodes.add(stat.lhs)
i_state = i_state & ~i_assmts.mask
if stat.is_deletion:
i_state |= i_assmts.bit
else:
i_state |= stat.bit
assignments.add(stat)
if stat.rhs is not fake_rhs_expr:
stat.entry.cf_assignments.append(stat)
elif isinstance(stat, NameReference):
references[stat.node] = stat.entry
stat.entry.cf_references.append(stat)
stat.node.cf_state.update(state)
if not stat.node.allow_null:
i_state &= ~i_assmts.bit
state.discard(Uninitialized)
for assmt in state:
assmt.refs.add(stat)
# Check variable usage
warn_maybe_uninitialized = compiler_directives['warn.maybe_uninitialized']
warn_unused_result = compiler_directives['warn.unused_result']
warn_unused = compiler_directives['warn.unused']
warn_unused_arg = compiler_directives['warn.unused_arg']
messages = MessageCollection()
# assignment hints
for node in assmt_nodes:
if Uninitialized in node.cf_state:
node.cf_maybe_null = True
if len(node.cf_state) == 1:
node.cf_is_null = True
else:
node.cf_is_null = False
else:
node.cf_is_null = False
node.cf_maybe_null = False
# Find uninitialized references and cf-hints
for node, entry in references.iteritems():
if Uninitialized in node.cf_state:
node.cf_maybe_null = True
if not entry.from_closure and len(node.cf_state) == 1:
node.cf_is_null = True
if node.allow_null or entry.from_closure or entry.is_pyclass_attr:
pass # Can be uninitialized here
elif node.cf_is_null:
if (entry.type.is_pyobject or entry.type.is_unspecified or
entry.error_on_uninitialized):
messages.error(
node.pos,
"local variable '%s' referenced before assignment"
% entry.name)
else:
messages.warning(
node.pos,
"local variable '%s' referenced before assignment"
% entry.name)
elif warn_maybe_uninitialized:
messages.warning(
node.pos,
"local variable '%s' might be referenced before assignment"
% entry.name)
else:
node.cf_is_null = False
node.cf_maybe_null = False
# Unused result
for assmt in assignments:
if (not assmt.refs and not assmt.entry.is_pyclass_attr
and not assmt.entry.in_closure):
if assmt.entry.cf_references and warn_unused_result:
if assmt.is_arg:
messages.warning(assmt.pos, "Unused argument value '%s'" %
assmt.entry.name)
else:
messages.warning(assmt.pos, "Unused result in '%s'" %
assmt.entry.name)
assmt.lhs.cf_used = False
# Unused entries
for entry in flow.entries:
if (not entry.cf_references and not entry.is_pyclass_attr
and not entry.in_closure):
if entry.is_arg:
if warn_unused_arg:
messages.warning(entry.pos, "Unused argument '%s'" %
entry.name)
else:
if warn_unused:
messages.warning(entry.pos, "Unused entry '%s'" %
entry.name)
entry.cf_used = False
messages.report()
for node in assmt_nodes:
node.cf_state = ControlFlowState(node.cf_state)
for node in references:
node.cf_state = ControlFlowState(node.cf_state)
class AssignmentCollector(TreeVisitor):
def __init__(self):
super(AssignmentCollector, self).__init__()
self.assignments = []
def visit_Node(self):
self.visitchildren(self)
def visit_SingleAssignmentNode(self, node):
self.assignments.append((node.lhs, node.rhs))
def visit_CascadedAssignmentNode(self, node):
for lhs in node.lhs_list:
self.assignments.append((lhs, node.rhs))
class ControlFlowAnalysis(CythonTransform):
in_inplace_assignment = False
def visit_ModuleNode(self, node):
self.gv_ctx = GVContext()
# Set of NameNode reductions
self.reductions = set()
self.env_stack = []
self.env = node.scope
self.stack = []
self.flow = ControlFlow()
self.visitchildren(node)
check_definitions(self.flow, self.current_directives)
dot_output = self.current_directives['control_flow.dot_output']
if dot_output:
annotate_defs = self.current_directives['control_flow.dot_annotate_defs']
fp = open(dot_output, 'wt')
try:
self.gv_ctx.render(fp, 'module', annotate_defs=annotate_defs)
finally:
fp.close()
return node
def visit_FuncDefNode(self, node):
for arg in node.args:
if arg.default:
self.visitchildren(arg)
self.visitchildren(node, attrs=('decorators',))
self.env_stack.append(self.env)
self.env = node.local_scope
self.stack.append(self.flow)
self.flow = ControlFlow()
# Collect all entries
for entry in node.local_scope.entries.values():
if self.flow.is_tracked(entry):
self.flow.entries.add(entry)
self.mark_position(node)
# Function body block
self.flow.nextblock()
for arg in node.args:
self.visit(arg)
if node.star_arg:
self.flow.mark_argument(node.star_arg,
TypedExprNode(Builtin.tuple_type,
may_be_none=False),
node.star_arg.entry)
if node.starstar_arg:
self.flow.mark_argument(node.starstar_arg,
TypedExprNode(Builtin.dict_type,
may_be_none=False),
node.starstar_arg.entry)
self.visit(node.body)
# Workaround for generators
if node.is_generator:
self.visit(node.gbody.body)
# Exit point
if self.flow.block:
self.flow.block.add_child(self.flow.exit_point)
# Cleanup graph
self.flow.normalize()
check_definitions(self.flow, self.current_directives)
self.flow.blocks.add(self.flow.entry_point)
self.gv_ctx.add(GV(node.local_scope.name, self.flow))
self.flow = self.stack.pop()
self.env = self.env_stack.pop()
return node
def visit_DefNode(self, node):
node.used = True
return self.visit_FuncDefNode(node)
def visit_GeneratorBodyDefNode(self, node):
return node
def visit_CTypeDefNode(self, node):
return node
def mark_assignment(self, lhs, rhs=None):
if not self.flow.block:
return
if self.flow.exceptions:
exc_descr = self.flow.exceptions[-1]
self.flow.block.add_child(exc_descr.entry_point)
self.flow.nextblock()
if not rhs:
rhs = object_expr
if lhs.is_name:
if lhs.entry is not None:
entry = lhs.entry
else:
entry = self.env.lookup(lhs.name)
if entry is None: # TODO: This shouldn't happen...
return
self.flow.mark_assignment(lhs, rhs, entry)
elif isinstance(lhs, ExprNodes.SequenceNode):
for arg in lhs.args:
self.mark_assignment(arg)
else:
self.visit(lhs)
if self.flow.exceptions:
exc_descr = self.flow.exceptions[-1]
self.flow.block.add_child(exc_descr.entry_point)
self.flow.nextblock()
def mark_position(self, node):
"""Mark position if DOT output is enabled."""
if self.current_directives['control_flow.dot_output']:
self.flow.mark_position(node)
def visit_FromImportStatNode(self, node):
for name, target in node.items:
if name != "*":
self.mark_assignment(target)
self.visitchildren(node)
return node
def visit_AssignmentNode(self, node):
raise InternalError, "Unhandled assignment node"
def visit_SingleAssignmentNode(self, node):
self.visit(node.rhs)
self.mark_assignment(node.lhs, node.rhs)
return node
def visit_CascadedAssignmentNode(self, node):
self.visit(node.rhs)
for lhs in node.lhs_list:
self.mark_assignment(lhs, node.rhs)
return node
def visit_ParallelAssignmentNode(self, node):
collector = AssignmentCollector()
collector.visitchildren(node)
for lhs, rhs in collector.assignments:
self.visit(rhs)
for lhs, rhs in collector.assignments:
self.mark_assignment(lhs, rhs)
return node
def visit_InPlaceAssignmentNode(self, node):
self.in_inplace_assignment = True
self.visitchildren(node)
self.in_inplace_assignment = False
self.mark_assignment(node.lhs, node.create_binop_node())
return node
def visit_DelStatNode(self, node):
for arg in node.args:
if arg.is_name:
entry = arg.entry or self.env.lookup(arg.name)
if entry.in_closure or entry.from_closure:
error(arg.pos,
"can not delete variable '%s' "
"referenced in nested scope" % entry.name)
# Mark reference
self.visit(arg)
self.flow.mark_deletion(arg, entry)
return node
def visit_CArgDeclNode(self, node):
entry = self.env.lookup(node.name)
if entry:
may_be_none = not node.not_none
self.flow.mark_argument(
node, TypedExprNode(entry.type, may_be_none), entry)
return node
def visit_NameNode(self, node):
if self.flow.block:
entry = node.entry or self.env.lookup(node.name)
if entry:
self.flow.mark_reference(node, entry)
if entry in self.reductions and not self.in_inplace_assignment:
error(node.pos,
"Cannot read reduction variable in loop body")
return node
def visit_StatListNode(self, node):
if self.flow.block:
for stat in node.stats:
self.visit(stat)
if not self.flow.block:
stat.is_terminator = True
break
return node
def visit_Node(self, node):
self.visitchildren(node)
self.mark_position(node)
return node
def visit_IfStatNode(self, node):
next_block = self.flow.newblock()
parent = self.flow.block
# If clauses
for clause in node.if_clauses:
parent = self.flow.nextblock(parent)
self.visit(clause.condition)
self.flow.nextblock()
self.visit(clause.body)
if self.flow.block:
self.flow.block.add_child(next_block)
# Else clause
if node.else_clause:
self.flow.nextblock(parent=parent)
self.visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
else:
parent.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def visit_WhileStatNode(self, node):
condition_block = self.flow.nextblock()
next_block = self.flow.newblock()
# Condition block
self.flow.loops.append(LoopDescr(next_block, condition_block))
self.visit(node.condition)
# Body block
self.flow.nextblock()
self.visit(node.body)
self.flow.loops.pop()
# Loop it
if self.flow.block:
self.flow.block.add_child(condition_block)
self.flow.block.add_child(next_block)
# Else clause
if node.else_clause:
self.flow.nextblock(parent=condition_block)
self.visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
else:
condition_block.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def mark_forloop_target(self, node):
# TODO: Remove redundancy with range optimization...
is_special = False
sequence = node.iterator.sequence
target = node.target
if isinstance(sequence, ExprNodes.SimpleCallNode):
function = sequence.function
if sequence.self is None and function.is_name:
entry = self.env.lookup(function.name)
if not entry or entry.is_builtin:
if function.name == 'reversed' and len(sequence.args) == 1:
sequence = sequence.args[0]
elif function.name == 'enumerate' and len(sequence.args) == 1:
if target.is_sequence_constructor and len(target.args) == 2:
iterator = sequence.args[0]
if iterator.is_name:
iterator_type = iterator.infer_type(self.env)
if iterator_type.is_builtin_type:
# assume that builtin types have a length within Py_ssize_t
self.mark_assignment(
target.args[0],
ExprNodes.IntNode(target.pos, value='PY_SSIZE_T_MAX',
type=PyrexTypes.c_py_ssize_t_type))
target = target.args[1]
sequence = sequence.args[0]
if isinstance(sequence, ExprNodes.SimpleCallNode):
function = sequence.function
if sequence.self is None and function.is_name:
entry = self.env.lookup(function.name)
if not entry or entry.is_builtin:
if function.name in ('range', 'xrange'):
is_special = True
for arg in sequence.args[:2]:
self.mark_assignment(target, arg)
if len(sequence.args) > 2:
self.mark_assignment(
target,
ExprNodes.binop_node(node.pos,
'+',
sequence.args[0],
sequence.args[2]))
if not is_special:
# A for-loop basically translates to subsequent calls to
# __getitem__(), so using an IndexNode here allows us to
# naturally infer the base type of pointers, C arrays,
# Python strings, etc., while correctly falling back to an
# object type when the base type cannot be handled.
self.mark_assignment(target, node.item)
def visit_ForInStatNode(self, node):
condition_block = self.flow.nextblock()
next_block = self.flow.newblock()
# Condition with iterator
self.flow.loops.append(LoopDescr(next_block, condition_block))
self.visit(node.iterator)
# Target assignment
self.flow.nextblock()
if isinstance(node, Nodes.ForInStatNode):
self.mark_forloop_target(node)
else: # Parallel
self.mark_assignment(node.target)
# Body block
if isinstance(node, Nodes.ParallelRangeNode):
# In case of an invalid
self._delete_privates(node, exclude=node.target.entry)
self.flow.nextblock()
self.visit(node.body)
self.flow.loops.pop()
# Loop it
if self.flow.block:
self.flow.block.add_child(condition_block)
# Else clause
if node.else_clause:
self.flow.nextblock(parent=condition_block)
self.visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
else:
condition_block.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def _delete_privates(self, node, exclude=None):
for private_node in node.assigned_nodes:
if not exclude or private_node.entry is not exclude:
self.flow.mark_deletion(private_node, private_node.entry)
def visit_ParallelRangeNode(self, node):
reductions = self.reductions
# if node.target is None or not a NameNode, an error will have
# been previously issued
if hasattr(node.target, 'entry'):
self.reductions = set(reductions)
for private_node in node.assigned_nodes:
private_node.entry.error_on_uninitialized = True
pos, reduction = node.assignments[private_node.entry]
if reduction:
self.reductions.add(private_node.entry)
node = self.visit_ForInStatNode(node)
self.reductions = reductions
return node
def visit_ParallelWithBlockNode(self, node):
for private_node in node.assigned_nodes:
private_node.entry.error_on_uninitialized = True
self._delete_privates(node)
self.visitchildren(node)
self._delete_privates(node)
return node
def visit_ForFromStatNode(self, node):
condition_block = self.flow.nextblock()
next_block = self.flow.newblock()
# Condition with iterator
self.flow.loops.append(LoopDescr(next_block, condition_block))
self.visit(node.bound1)
self.visit(node.bound2)
if node.step is not None:
self.visit(node.step)
# Target assignment
self.flow.nextblock()
self.mark_assignment(node.target, node.bound1)
if node.step is not None:
self.mark_assignment(node.target,
ExprNodes.binop_node(node.pos, '+',
node.bound1, node.step))
# Body block
self.flow.nextblock()
self.visit(node.body)
self.flow.loops.pop()
# Loop it
if self.flow.block:
self.flow.block.add_child(condition_block)
# Else clause
if node.else_clause:
self.flow.nextblock(parent=condition_block)
self.visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
else:
condition_block.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def visit_LoopNode(self, node):
raise InternalError, "Generic loops are not supported"
def visit_WithTargetAssignmentStatNode(self, node):
self.mark_assignment(node.lhs, node.rhs)
return node
def visit_WithStatNode(self, node):
self.visit(node.manager)
self.visit(node.enter_call)
self.visit(node.body)
return node
def visit_TryExceptStatNode(self, node):
# After exception handling
next_block = self.flow.newblock()
# Body block
self.flow.newblock()
# Exception entry point
entry_point = self.flow.newblock()
self.flow.exceptions.append(ExceptionDescr(entry_point))
self.flow.nextblock()
## XXX: links to exception handling point should be added by
## XXX: children nodes
self.flow.block.add_child(entry_point)
self.visit(node.body)
self.flow.exceptions.pop()
# After exception
if self.flow.block:
if node.else_clause:
self.flow.nextblock()
self.visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
for clause in node.except_clauses:
self.flow.block = entry_point
if clause.pattern:
for pattern in clause.pattern:
self.visit(pattern)
else:
# TODO: handle * pattern
pass
entry_point = self.flow.newblock(parent=self.flow.block)
self.flow.nextblock()
if clause.target:
self.mark_assignment(clause.target)
self.visit(clause.body)
if self.flow.block:
self.flow.block.add_child(next_block)
if self.flow.exceptions:
entry_point.add_child(self.flow.exceptions[-1].entry_point)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def visit_TryFinallyStatNode(self, node):
body_block = self.flow.nextblock()
# Exception entry point
entry_point = self.flow.newblock()
self.flow.block = entry_point
self.visit(node.finally_clause)
if self.flow.block and self.flow.exceptions:
self.flow.block.add_child(self.flow.exceptions[-1].entry_point)
# Normal execution
finally_enter = self.flow.newblock()
self.flow.block = finally_enter
self.visit(node.finally_clause)
finally_exit = self.flow.block
descr = ExceptionDescr(entry_point, finally_enter, finally_exit)
self.flow.exceptions.append(descr)
if self.flow.loops:
self.flow.loops[-1].exceptions.append(descr)
self.flow.block = body_block
## XXX: Is it still required
body_block.add_child(entry_point)
self.visit(node.body)
self.flow.exceptions.pop()
if self.flow.loops:
self.flow.loops[-1].exceptions.pop()
if self.flow.block:
self.flow.block.add_child(finally_enter)
if finally_exit:
self.flow.block = self.flow.nextblock(parent=finally_exit)
else:
self.flow.block = None
return node
def visit_RaiseStatNode(self, node):
self.mark_position(node)
self.visitchildren(node)
if self.flow.exceptions:
self.flow.block.add_child(self.flow.exceptions[-1].entry_point)
self.flow.block = None
return node
def visit_ReraiseStatNode(self, node):
self.mark_position(node)
if self.flow.exceptions:
self.flow.block.add_child(self.flow.exceptions[-1].entry_point)
self.flow.block = None
return node
def visit_ReturnStatNode(self, node):
self.mark_position(node)
self.visitchildren(node)
for exception in self.flow.exceptions[::-1]:
if exception.finally_enter:
self.flow.block.add_child(exception.finally_enter)
if exception.finally_exit:
exception.finally_exit.add_child(self.flow.exit_point)
break
else:
if self.flow.block:
self.flow.block.add_child(self.flow.exit_point)
self.flow.block = None
return node
def visit_BreakStatNode(self, node):
if not self.flow.loops:
#error(node.pos, "break statement not inside loop")
return node
loop = self.flow.loops[-1]
self.mark_position(node)
for exception in loop.exceptions[::-1]:
if exception.finally_enter:
self.flow.block.add_child(exception.finally_enter)
if exception.finally_exit:
exception.finally_exit.add_child(loop.next_block)
break
else:
self.flow.block.add_child(loop.next_block)
self.flow.block = None
return node
def visit_ContinueStatNode(self, node):
if not self.flow.loops:
#error(node.pos, "continue statement not inside loop")
return node
loop = self.flow.loops[-1]
self.mark_position(node)
for exception in loop.exceptions[::-1]:
if exception.finally_enter:
self.flow.block.add_child(exception.finally_enter)
if exception.finally_exit:
exception.finally_exit.add_child(loop.loop_block)
break
else:
self.flow.block.add_child(loop.loop_block)
self.flow.block = None
return node
def visit_ComprehensionNode(self, node):
if node.expr_scope:
self.env_stack.append(self.env)
self.env = node.expr_scope
# Skip append node here
self.visit(node.target)
self.visit(node.loop)
if node.expr_scope:
self.env = self.env_stack.pop()
return node
def visit_ScopedExprNode(self, node):
if node.expr_scope:
self.env_stack.append(self.env)
self.env = node.expr_scope
self.visitchildren(node)
if node.expr_scope:
self.env = self.env_stack.pop()
return node
def visit_PyClassDefNode(self, node):
self.visitchildren(node, attrs=('dict', 'metaclass',
'mkw', 'bases', 'class_result'))
self.flow.mark_assignment(node.target, object_expr_not_none,
self.env.lookup(node.name))
self.env_stack.append(self.env)
self.env = node.scope
self.flow.nextblock()
self.visitchildren(node, attrs=('body',))
self.flow.nextblock()
self.env = self.env_stack.pop()
return node
def visit_AmpersandNode(self, node):
if node.operand.is_name:
# Fake assignment to silence warning
self.mark_assignment(node.operand, fake_rhs_expr)
self.visitchildren(node)
return node
|
larsmans/cython
|
Cython/Compiler/FlowControl.py
|
Python
|
apache-2.0
| 42,195
|
[
"VisIt"
] |
bcc1e088d1b61f88f3013c63ade1d905a6149a5cd922f28f7fdaf2cb1fa88676
|
# -*- coding: utf-8 -*-
'''
Python Client for MyVariant.Info services
'''
from __future__ import print_function
import sys
import time
import requests
import json
try:
from pandas import DataFrame
df_avail = True
except:
df_avail = False
__version__ = '2.2.1'
if sys.version_info[0] == 3:
str_types = str
from urllib.parse import urlencode
else:
str_types = (str, unicode)
from urllib import urlencode
def safe_str(s, encoding='utf-8'):
'''if input is an unicode string, do proper encoding.'''
try:
_s = str(s)
except UnicodeEncodeError:
_s = s.encode(encoding)
return _s
class MyVariantInfo():
'''This is the client for MyVariant.info web services.
Example:
>>> mv = MyVariantInfo()
'''
def __init__(self, url='http://myvariant.info/v1'):
self.url = url
if self.url[-1] == '/':
self.url = self.url[:-1]
self.max_query = 1000
# delay and step attributes are for batch queries.
self.delay = 1
self.step = 1000
def _as_dataframe(self, gene_obj, df_index=False):
"""
converts gene object to DataFrame (pandas)
"""
if not df_avail:
print("Error: pandas module must be installed for as_dataframe option.")
return
if 'hits' in gene_obj:
df = DataFrame.from_dict(gene_obj['hits'])
else:
df = DataFrame.from_dict(gene_obj)
if df_index:
df = df.set_index('_id')
return df
def _get(self, url, params={}):
debug = params.pop('debug', False)
return_raw = params.pop('return_raw', False)
headers = {'user-agent': "Python-requests_myvariant.py/%s (gzip)" % requests.__version__}
res = requests.get(url, params=params, headers=headers)
#if debug:
# return _url, res, con
assert res.status_code == 200
if return_raw:
return res
else:
return res.json()
def _post(self, url, params):
# #if debug:
# # return url, res, con
debug = params.pop('debug', False)
return_raw = params.pop('return_raw', False)
headers = {'content-type': 'application/x-www-form-urlencoded',
'user-agent': "Python-requests_myvariant.py/%s (gzip)" % requests.__version__}
res = requests.post(url, data=params, headers=headers)
assert res.status_code == 200
if return_raw:
return res
else:
return res.json()
def _format_list(self, a_list, sep=','):
if isinstance(a_list, (list, tuple)):
_out = sep.join([safe_str(x) for x in a_list])
else:
_out = a_list # a_list is already a comma separated string
return _out
def _repeated_query(self, query_fn, query_li, verbose=True, **fn_kwargs):
step = min(self.step, self.max_query)
if len(query_li) <= step:
# No need to do series of batch queries, turn off verbose output
verbose = False
for i in range(0, len(query_li), step):
is_last_loop = i+step >= len(query_li)
if verbose:
print("querying {0}-{1}...".format(i+1, min(i+step, len(query_li))), end="")
query_result = query_fn(query_li[i:i+step], **fn_kwargs)
yield query_result
if verbose:
print("done.")
if not is_last_loop and self.delay:
time.sleep(self.delay)
@property
def metadata(self):
'''Return a dictionary of MyVariant.info metadata.
Example:
>>> metadata = mv.metadata
'''
_url = self.url+'/metadata'
return self._get(_url)
def getvariant(self, geneid, **kwargs):
'''Return the gene object for the give geneid.
This is a wrapper for GET query of "/gene/<geneid>" service.
:param geneid: entrez/ensembl gene id, entrez gene id can be either
a string or integer
:param fields: fields to return, a list or a comma-separated string.
If **fields="all"**, all available fields are returned
:param species: optionally, you can pass comma-separated species names
or taxonomy ids
:param email: optionally, pass your email to help us to track usage
:param filter: alias for **fields** parameter
:return: a gene object as a dictionary
:ref: http://mygene.info/doc/annotation_service.html for available
fields, extra *kwargs* and more.
Example:
>>> mv.getvariant(1017, email='abc@example.com')
>>> mv.getvariant('1017', fields='symbol,name,entrezgene,refseq')
>>> mv.getvariant('1017', fields='symbol,name,entrezgene,refseq.rna')
>>> mv.getvariant('1017', fields=['symbol', 'name', 'pathway.kegg'])
>>> mv.getvariant('ENSG00000123374', fields='all')
.. Hint:: The supported field names passed to **fields** parameter can be found from
any full gene object (when **fields="all"**). Note that field name supports dot
notation for nested data structure as well, e.g. you can pass "refseq.rna" or
"pathway.kegg".
'''
#if fields:
# kwargs['fields'] = self._format_list(fields)
if 'filter' in kwargs:
kwargs['fields'] = self._format_list(kwargs['filter'])
_url = self.url + '/variant/' + str(geneid)
return self._get(_url, kwargs)
def _getvariants_inner(self, geneids, **kwargs):
_kwargs = {'ids': self._format_list(geneids)}
_kwargs.update(kwargs)
_url = self.url + '/variant/'
return self._post(_url, _kwargs)
def getvariants(self, ids, fields=None, **kwargs):
'''Return the list of gene objects for the given list of geneids.
This is a wrapper for POST query of "/gene" service.
:param geneids: a list or comm-sep entrez/ensembl gene ids
:param fields: fields to return, a list or a comma-separated string.
If **fields="all"**, all available fields are returned
:param species: optionally, you can pass comma-separated species names
or taxonomy ids
:param email: optionally, pass your email to help us to track usage
:param filter: alias for fields
:param as_dataframe: if True, return object as DataFrame (requires Pandas).
:param df_index: if True (default), index returned DataFrame by 'query',
otherwise, index by number. Only applicable if as_dataframe=True.
:return: a list of gene objects or a pandas DataFrame object (when **as_dataframe** is True)
:ref: http://mygene.info/doc/annotation_service.html for available
fields, extra *kwargs* and more.
Example:
>>> mg.getgenes([1017, '1018','ENSG00000148795'], email='abc@example.com')
>>> mg.getgenes([1017, '1018','ENSG00000148795'], fields="entrezgene,uniprot")
>>> mg.getgenes([1017, '1018','ENSG00000148795'], fields="all")
>>> mg.getgenes([1017, '1018','ENSG00000148795'], as_dataframe=True)
.. Hint:: A large list of more than 1000 input ids will be sent to the backend
web service in batches (1000 at a time), and then the results will be
concatenated together. So, from the user-end, it's exactly the same as
passing a shorter list. You don't need to worry about saturating our
backend servers.
'''
if isinstance(ids, str_types):
ids = ids.split(',')
if (not (isinstance(ids, (list, tuple)) and len(ids) > 0)):
raise ValueError('input "variantids" must be non-empty list or tuple.')
if fields:
kwargs['fields'] = self._format_list(fields)
if 'filter' in kwargs:
kwargs['fields'] = self._format_list(kwargs['filter'])
verbose = kwargs.pop('verbose', True)
as_dataframe = kwargs.pop('as_dataframe', False)
if as_dataframe:
df_index = kwargs.pop('df_index', True)
return_raw = kwargs.get('return_raw', False)
if return_raw:
as_dataframe = False
query_fn = lambda ids: self._getvariants_inner(ids, **kwargs)
out = []
for hits in self._repeated_query(query_fn, ids, verbose=verbose):
if return_raw:
out.append(hits) # hits is the raw response text
else:
out.extend(hits)
if return_raw and len(out) == 1:
out = out[0]
if as_dataframe:
out = self._as_dataframe(out, df_index)
return out
def query_variant(self, q, **kwargs):
'''Return the query result.
This is a wrapper for GET query of "/query?q=<query>" service.
:param q: a query string, detailed query syntax `here <http://mygene.info/doc/query_service.html#query-syntax>`_
:param fields: fields to return, a list or a comma-separated string.
If **fields="all"**, all available fields are returned
:param species: optionally, you can pass comma-separated species names
or taxonomy ids. Default: human,mouse,rat.
:param size: the maximum number of results to return (with a cap
of 1000 at the moment). Default: 10.
:param skip: the number of results to skip. Default: 0.
:param sort: Prefix with "-" for descending order, otherwise in ascending order.
Default: sort by matching scores in decending order.
:param entrezonly: if True, return only matching entrez genes, otherwise, including matching
Ensemble-only genes (those have no matching entrez genes).
:param email: optionally, pass your email to help us to track usage
:param as_dataframe: if True, return object as DataFrame (requires Pandas).
:param df_index: if True (default), index returned DataFrame by 'query',
otherwise, index by number. Only applicable if as_dataframe=True.
:return: a dictionary with returned gene hits or a pandas DataFrame object (when **as_dataframe** is True)
:ref: http://mygene.info/doc/query_service.html for available
fields, extra *kwargs* and more.
Example:
>>> mg.query('cdk2')
>>> mg.query('reporter:1000_at')
>>> mg.query('symbol:cdk2', species='human')
>>> mg.query('symbol:cdk*', species=10090, size=5, as_dataframe=True)
>>> mg.query('q=chrX:151073054-151383976', species=9606)
'''
as_dataframe = kwargs.pop('as_dataframe', False)
kwargs.update({'q': q})
_url = self.url + '/query'
out = self._get(_url, kwargs)
if as_dataframe:
out = self._as_dataframe(out, False)
return out
def _querymany_inner(self, qterms, **kwargs):
_kwargs = {'q': self._format_list(qterms)}
_kwargs.update(kwargs)
_url = self.url + '/query'
return self._post(_url, _kwargs)
def queryvariants(self, q, scopes=None, **kwargs):
'''Return the batch query result.
This is a wrapper for POST query of "/query" service.
:param qterms: a list of query terms, or a string of comma-separated query terms.
:param scopes: type of types of identifiers, either a list or a comma-separated fields to specify type of
input qterms, e.g. "entrezgene", "entrezgene,symbol", ["ensemblgene", "symbol"]
refer to "http://mygene.info/doc/query_service.html#available_fields" for full list
of fields.
:param fields: fields to return, a list or a comma-separated string.
If **fields="all"**, all available fields are returned
:param species: optionally, you can pass comma-separated species names
or taxonomy ids. Default: human,mouse,rat.
:param entrezonly: if True, return only matching entrez genes, otherwise, including matching
Ensemble-only genes (those have no matching entrez genes).
:param returnall: if True, return a dict of all related data, including dup. and missing qterms
:param verbose: if True (default), print out infomation about dup and missing qterms
:param email: optionally, pass your email to help us to track usage
:param as_dataframe: if True, return object as DataFrame (requires Pandas).
:param df_index: if True (default), index returned DataFrame by 'query',
otherwise, index by number. Only applicable if as_dataframe=True.
:return: a list of gene objects or a pandas DataFrame object (when **as_dataframe** is True)
:ref: http://mygene.info/doc/query_service.html for available
fields, extra *kwargs* and more.
Example:
>>> mg.querymany(['DDX26B', 'CCDC83'], scopes='symbol', species=9606)
>>> mg.querymany(['1255_g_at', '1294_at', '1316_at', '1320_at'], scopes='reporter')
>>> mg.querymany(['NM_003466', 'CDK2', 695, '1320_at', 'Q08345'],
... scopes='refseq,symbol,entrezgene,reporter,uniprot', species='human')
>>> mg.querymany(['1255_g_at', '1294_at', '1316_at', '1320_at'], scopes='reporter',
... fields='ensembl.gene,symbol', as_dataframe=True)
.. Hint:: :py:meth:`querymany` is perfect for doing id mappings.
.. Hint:: Just like :py:meth:`getgenes`, passing a large list of ids (>1000) to :py:meth:`querymany` is perfectly fine.
'''
if isinstance(q, str_types):
qterms = q.split(',')
if (not (isinstance(qterms, (list, tuple)) and len(q) > 0)):
raise ValueError('input "qterms" must be non-empty list or tuple.')
if scopes:
kwargs['scopes'] = self._format_list(scopes)
if 'scope' in kwargs:
# allow scope for back-compatibility
kwargs['scopes'] = self._format_list(kwargs['scope'])
if 'fields' in kwargs:
kwargs['fields'] = self._format_list(kwargs['fields'])
returnall = kwargs.pop('returnall', False)
verbose = kwargs.pop('verbose', True)
as_dataframe = kwargs.pop('as_dataframe', False)
if as_dataframe:
df_index = kwargs.pop('df_index', True)
return_raw = kwargs.get('return_raw', False)
if return_raw:
as_dataframe = False
out = []
li_missing = []
li_dup = []
li_query = []
query_fn = lambda qterms: self._querymany_inner(q, **kwargs)
for hits in self._repeated_query(query_fn, q, verbose=verbose):
if return_raw:
out.append(hits) # hits is the raw response text
else:
out.extend(hits)
for hit in hits:
if hit.get('notfound', False):
li_missing.append(hit['query'])
else:
li_query.append(hit['query'])
if verbose:
print("Finished.")
if return_raw:
if len(out) == 1:
out = out[0]
return out
if as_dataframe:
out = self._as_dataframe(out, df_index)
# check dup hits
# if li_query:
# li_dup = [(query, cnt) for query, cnt in list_itemcnt(li_query) if cnt > 1]
# del li_query
if verbose:
if li_dup:
print("{0} input query terms found dup hits:".format(len(li_dup)))
print("\t"+str(li_dup)[:100])
if li_missing:
print("{0} input query terms found no hit:".format(len(li_missing)))
print("\t"+str(li_missing)[:100])
if returnall:
return {'out': out, 'dup': li_dup, 'missing': li_missing}
else:
if verbose and (li_dup or li_missing):
print('Pass "returnall=True" to return complete lists of duplicate or missing query terms.')
return out
mv=MyVariantInfo()
|
SuLab/fiSSEA
|
myvariant/src/myvariant.py
|
Python
|
apache-2.0
| 16,425
|
[
"CDK"
] |
e2994f0f1f22a8605a8062f3435f5f4a753bfd36d9c458fcf895d0790a6eea39
|
# Kevin van Rensburg 11/20/2021
# Copyright 2001
# Testing Startup Script version 0.19
# kstart19.py
# Working on Kvep8.
# added def's ALL(), ALI(), COMCODE(), core(), cipl() and AnotherCode().
# Adding rd(), Or(),bfot(),srr(),cft(),st(),ct(),rat(),wst(),fep(), esot(),seor()
# fixed, reworked, and added scipt to Kvep8 and relevant def's
import sys
import os
import random
from time import sleep
import subprocess
#command = "cmd"
#os.system(command)
#subprocess.call(" python script2.py 1", shell=True)
#import Kendy11.py
#kendy11.myfunc()
def Intro():
cls()
#print("Welcome to Program 1: ")
#print("Program 1: Intro ")
print("")
print("Intro:")
print("------")
print("")
print("Welcome! ")
CopyRight()
#print("I am Kendy.")
#print("My purpose is to serve and to obey.")
print("")
go=input("Press any key to continue")
def Test():
cls()
print("")
print("Test:")
print("-----")
#print("")
print("")
print("------------------------------")
print("")
print("This is the test startup script.")
print("")
print("Hello, welcome to my universe!")
print("")
print("------------------------------")
print("")
print("Add all relevant programming here...")
print("")
go=input("Press any key to continue")
GoAgain()
#Chooser();
def CopyRight():
cls()
print("")
print("Copyright Info Here [...].")
print("")
go=input("Press any key to continue")
GoAgain()
def Program1():
print("")
Intro()
print("")
def Program2():
print("")
#subprocess.Popen("chatbot.py 1", shell=True)
#subprocess.call(" python chatbot.py 1", shell=True) # - did not work
ChatBot();
print("")
def Program3():
print("")
Tank()
print("")
def Program4():
print("")
AI()
print("")
def Program5():
print("")
Surveillance()
print("")
def Program6():
print("")
Kendy()
print("")
sleep(2)
#KyBot()
print("")
def Program7():
print("")
Wendy()
sleep(2)
#KendyVerse()
#KyVerse()
print("")
def Program8():
print("")
KendyVerse()
sleep(2)
print("")
def Program9():
print("")
KendyRobot()
sleep(2)
print("")
def Program10():
print("")
ToDoList()
sleep(2)
print("")
def EnterName():
cls()
print("")
print("CODENAME ")
codename=input(":")
if codename!= ("KEVIN VAN RENSBURG"):
print("ACESS DENIED!")
sleep(2)
sys.exit()
else:
print("Thank you" ,codename)
def Direction():
cls()
print("")
print("to turn left enter 'l' ..to turn right enter 'r'")
direction=input(":")
if direction==('l'):
print("a passage")
#print("ok,look to the right")
else:
print("a passage")
def ALL():
cls()
print("")
print("ACCESS LEVELS LIST")
print("------------------")
print("")
print("AL-1A - HIGHEST LEVEL UNIVERSAL")
print("AL-1B - SECOND LEVEL UNIVERSAL")
print("AL-1C - THIRD LEVEL UNIVERSAL")
print("AL-1D - FOURTH LEVEL UNIVERSAL")
print("AL-1E - FIFTH LEVEL UNIVERSAL")
print("AL-2A - HIGHEST LEVEL LOCAL")
print("AL-2B - SECOND LEVEL LOCAL")
print("AL-2C - THIRD LEVEL LOCAL")
print("AL-2D - FOURTH LEVEL LOCAL")
print("AL-2E - FIFTH LEVEL LOCAL")
print("AL-3A - FIRST SUB LEVEL LOCAL ")
print("AL-3B - SECOND SUB LEVEL LOCAL")
print("AL-3C - THIRD SUB LEVEL LOCAL")
print("AL-3D - FOURTH SUB LEVEL LOCAL")
print("AL-3E - FIFTH SUB LEVEL LOCAL")
print("")
sleep(8)
def ALI():
cls()
print("")
print("ENTER COMMAND")
alicode=input(":")
if alicode==("COMMANDER KEVIN VAN RENSBURG KVR145759 ALCODE"):
print("KEVINVR-ACCESS LEVEL AL-1A")
print("KENDY-ACCESS LEVEL AL-1E")
print("CAPTAIN-ACCESS LEVEL TBD")
print("COMMANDING OFFICER-ACCESS LEVEL TBD")
print("PILOT-ACCESS LEVEL TBD")
print("NAVIGATION-ACCESS LEVEL TBD")
print("SCANNING-ACCESS LEVEL TBD")
print("WEAPONS-ACCESS LEVEL TBD")
sleep(8)
elif alicode==("KENDY"):
print("ACCESS LEVEL AL-1E")
sleep(5)
elif alicode==("CAPTAIN"):
print("ACCESS LEVEL TBD")
sleep(5)
elif alicode==("COMMANDING OFFICER"):
print("ACCESS LEVEL TBD")
sleep(5)
elif alicode==("PILOT"):
print("ACCESS LEVEL TBD")
sleep(5)
elif alicode==("NAVIGATION"):
print("ACCESS LEVEL TBD")
sleep(5)
elif alicode==("SCANNING"):
print("ACCESS LEVEL TBD")
sleep(5)
elif alicode==("WEAPONS"):
print("ACCESS LEVEL TBD")
sleep(5)
else:
print("ACCESS DENIED")
sleep(4)
sys.exit()
def core():
cls()
print("")
print("KENDY - CORE LEVEL AUTHORIZED")
print("COMMANDER KEVIN VAN RENSBURG - CORE ACCESS AUTHORIZED")
print("")
sleep(6)
def cipl():
cls()
print("")
print("PLEASE ENTER ACCESS CODE")
acccode=input(":")
if acccode==("KVR145759"):
core()
else:
print("ACCESS DENIED")
sleep(4)
sys.exit()
def COMCODE():
cls()
print("")
print("ACQUIRING RESOURCES ")
print("-------------------")
print("")
print("Instructions for acquiring resources")
print("------------------------------------")
print("")
print("Station Commander can access resources by entering the following commands:")
print("")
print("Core INI Protocols - Local")
print("Robotic INI Protocols - Local")
print("Weapons and Accesories INI Protocols - Local")
print("Universal Protocols INI *Access Level 1A only*")
print("ACCESS Levels List *Access Levels 2E-1A*")
print("ACCESS Level Indicator - Enter Position, Name, Code, ALI=COMMAND")
print("")
sleep(8)
Continue()
print("")
print("PLEASE ENTER COMMAND CODE")
comcode=input(":")
if comcode==("CIPL"):
print("CORE PROTOCOLS INITIALIZED")
cipl()
sleep(5)
elif comcode==("RIPL"):
print("Robotic Protocols Initialized")
sleep(5)
elif comcode==("WAIPL"):
print("Weapons Protocols Initialized")
sleep(5)
elif comcode==("UPI"):
print("Universal Protocols Initilized")
sleep(5)
elif comcode==("ALL"):
ALL()
elif comcode==("ALI"):
ALI()
elif comcode==("MORE"):
AnotherCode()
else:
print("ACCESS DENIED")
sys.exit()
def AnotherCode():
print("")
print("Do you need to enter a new code??")
acode = input(": ")
if acode == "y":
COMCODE()
else:
print("Thank you!")
def Kvep1():
cls()
print("")
print("Welcome to KendyVerse ")
print("The Story starts here.....")
print("")
print("Episode 1: Lost")
sleep(5)
print("")
print("HELLO! WHO ARE YOU???")
name=input(":")
print("Hello" ,name)
sleep(5)
cls()
print("")
print("BANG!!..SHUDDER...")
print("What?? Stuck..can't move..")
print("Where am I?? ...falling...falling....THWUMP!!")
sleep(10)
cls()
print("")
print("DARK..remember??? Yes...")
print("OK..think..remember..I was falling..Stuck to something")
sleep(10)
print("Yes.. In a Plane..noises..now it's quiet")
#sleep(5)
cls()
print("")
print("Opening my eyes slowly..it's night..still strapped into seat")
print("must have blacked out..look around slowly..no headache")
sleep(10)
print("test fingers..ok..")
print("toes..ok..")
print("move feet..OK..")
print("move..hands..ok..")
print("nothing hurts..")
print("turn head slowly.. ")
sleep(10)
#Continue()
cls()
print("")
print("to look left enter 'l' ..to look right enter 'r'")
direction=input(":")
if direction==('l'):
print("snow... and trees ..high up..")
print("ok,looking to the right")
else:
print("a person...who...")
sleep(10)
#Continue()
cls()
print("")
print("a person...who..??...")
sleep(5)
print("OH..my friend..remembering..We were going home")
print("you undo the seatbelt and slowly climb out of the seat.")
print("you stretch and turn ..everything seems ok.")
sleep(10)
print("you look at your friend and she is unconcious!")
print("")
print("What do I do??..find help!!")
print("you look around and see that you are in a deep ditch")
print("You undo your friend's seatbelt and lift her out of the seat.")
print("")
sleep(10)
Continue()
cls()
print("")
print("what now? Time to think.")
print("What do I have?..what can I use?")
sleep(5)
print("")
print("Items- seats, cushions, floatation devices under seats, friend's jacket, my belt")
print("wallet,cellphone, charger in pocket,")
print("check cellphone..no signal, battery at 98%")
print("")
sleep(10)
Continue()
cls()
print("")
print("ok, piggy back my friend..")
print("get flotation devices.. put her arms around my neck")
print("tie her hands in front of me with flotation device...")
print("same with her legs.. ok stand up")
print("you stand up slowly with a grunt! you look at the seats and try to pull them")
print("")
sleep(10)
Continue()
cls()
print("")
print("SCREECH!! What is that sound.. something metal under her seat.")
print("you pull the seat some more and screeching sound stops.")
print("What is that? it looks like a hatch or round metal door")
print("you clear away the snow..there is a wheel type handle")
print("you try turn the handle..it's stuck")
print("")
sleep(10)
Continue()
cls()
print("")
print("use a seat leg, you push the corner of the seat leg into the wheel and push")
print("SCREECH..Creak, it slowly starts to turn..then it is loose")
print("it's hard to bend down while piggybacking your friend..no matter...")
print("you go down on your knees very slowly and push the seat away")
sleep(10)
print("")
print("you turn the wheel type handle and it turns a few times. you hear a sound")
print("you try to lift the hatch,it moves a little")
print("you use all your strength to lift the door...it creaks open")
print("you open it up and swing it over... you look down into the dark hole")
sleep(10)
print("")
print("you see a dark tunnel going down with a ladder on one side.")
print("You look around and make sure you have her jacket and everything else.")
print("very slowly you crawl over to the side where you see the ladder")
print("you look down and grab the top rung of the ladder")
print("Is there enough space for both of us as I climb down?")
print("")
sleep(10)
Continue()
cls()
print("")
print("you pull yourself across to the ladder and swing your left leg down.")
print("Ugghh.. now the right leg..ok I can feel the rung with my foot.")
print("getting another foothold..yes, it's ok")
print("will the ladder hold?...CRREAAK .. yes, it's creaking but holding")
sleep(10)
print("")
print("you carefully go down the ladder..step by step into the darkness below.")
print("oh, I'm tired. need to rest...you hold onto the ladder and stop climbing down for a few minutes.")
print("gotta go, hungry and thirsty..")
print("you continue your slow descent... the light has gone.. it's all dark now")
sleep(10)
print("")
print("OH,,cant feel another rung.. move down a little...feels like a floor..")
print("so dark..climb down ..both feet on the floor now..")
print("what is this??..where are we??")
print("walls feel smooth..cold..metal..floor?? bend down slowly and feel the floor")
print("metal floor..UUGHH stand up..feel for cellphone..")
print("")
sleep(10)
Continue()
cls()
print("")
print("open cellphone..AH, some light.. its a tunnel .. moving forward")
print("you walk down the tunnel for about 100 metres and see another door")
print("you open the door and walk in ... its slightly warmer here.. close the door behind you")
print("you walk down the passage and there are rooms ahead to the left and right.")
print("you look in the first room and see what looks like a soldiers quarters..")
print("single bed, closet, washbasin..")
print("you sit on the bed and loosen the flotation devices")
print("you lift the blanket and place her carefully on the bed")
sleep(10)
print("")
print("you look around the room and see a pillow and blanket on the bed")
print("you look in the closet and see another blanket and...")
print("a set of flightsuits with strange looking helmets")
print("the washbasin has a cabinet under it.. you open the cabinet")
print("")
Continue()
#sleep(10)
cls()
print("")
print("there is a candle, candleholder, matches, toothbrush...")
print("soap and a small towel there")
print("you take everything and go down the passage to explore")
print("there are 3 small rooms and 2 larger ones on each side of the passageway.")
print("you look into one of the large rooms..")
print("they have bigger closets and a desk with a double bed and an on-suite bathroom")
print("you find an empty backpack, and a duffel bag")
print("the duffel bag has male and female underwear, toiletry bags..")
print("6 glass bottles of water and 6 ration packs.")
print("")
sleep(10)
Continue()
cls()
print("")
print("the dates on the ration packs are 25 August 1914")
print("there are 2 sets of flightsuits with strange helments...")
print("and what looks like 2 silver bodysuits")
print("the other closet has 2 medium sized suits ...")
print("they look like a mix between a divers suit and a space suit")
print("there is a safe at the bottom of the large closet.")
sleep(10)
print("")
Continue()
cls()
print("")
print("you take the backpack and put the food and supplies in it.")
print("you go back to your friend and she is slowly waking up.")
print("you both drink a little water and eat a fruit bar from the ration packs")
print("you tell your friend everything that happened" )
sleep(10)
print("")
print("I need to find us some help!...")
print("Help my friend to move to the large room. ")
print("Open up the bed for her")
print("for now... we are warm and safe...")
print("")
Continue()
cls()
print("")
print("time to explore and find help..")
sleep(10)
print("")
#Continue()
print("to be continued...soon in Episode 2")
sleep(5)
print("")
#print("The Story will continue...")
KVersChoice()
def Kvep2():
cls()
print("")
#print("Welcome to KendyVerse ")
print("Episode 2: Discovery...")
print("")
sleep(5)
cls()
print("")
print(":From Episode 1..")
print("")
print("you take the backpack and put the food and supplies in it.")
print("you go back to your friend and she is slowly waking up.")
print("you both drink a little water and eat a fruit bar from the ration packs")
print("you tell your friend everythig that happened" )
print("Help my friend to move to the large room. ")
print("Open up the bed for her")
print("for now... we are warm and safe...")
sleep(10)
print("")
print("I need to find us some help!...")
print("")
Continue()
cls()
print("")
print("Your friend is feeling dizzy and wants to sleep")
print("You let her go back to sleep and prepare to explore")
print("")
sleep(5)
Continue()
cls()
print("")
print("You go to the large room and take a shower")
print("The water is cold, but refreshing..")
print("you try on some of the clean underwaer...it fits!!...strange")
print("you try on each of the items in the closet. ")
print("The flightsuit is the most commfortable.")
print("you light a candle and go down the passage to the end.")
print("")
sleep(10)
Continue()
cls()
print("")
print("there is a small passage to the left and then a wall..")
print("you go to the end of the passage and look at the wall..")
print("you knock on the wall and then put your hand on it. it tingles..")
print("suddenly a palm print appears on the wall in front of you... a dim blue light")
print("you put your hand on the blue palm print.. ")
print("")
sleep(10)
Continue()
cls()
print("")
print("something moves, the wall shifts to the side...its a T junction..")
print("")
sleep(5)
Direction()
cls()
print("")
print("You look down the passage to the right.. it is short and dark.. you decide against it...")
print("you continue in the left passage until the end and touch the wall...")
print("another blue backlit palm..")
print("you put your hand on the blue palm print.. ")
print("the wall slides to the left...a small room with buttons..it's an elevator")
print("you press all the buttons.. one lights up and the wall /door closes..")
print("it goes down..then stops after a few seconds.. ")
print("the door opens... an eerie dim blue light")
sleep(10)
print("")
Continue()
cls()
print("")
print("you go to the light..another palm print...you put your hand on the print...")
print("a door on the left opens... a room with what looks like a computer screen...")
print("a chair.. you sit on the chair and adjust it's height.. ")
print("you press the [Enter] key.. a cursor appears on a black screen...")
print("")
sleep(10)
Continue()
print("")
KVersChoice()
def Kvep3():
cls()
print("")
#KendyPart2()
print("Episode 3: Assistance...")
sleep(5)
cls()
print("")
print("from Episode 2...")
print("")
print("a chair.. you sit on the chair and adjust it's height.. ")
print("you press the [Enter] key.. a cursor appears on a black screen...")
sleep(8)
cls()
print("")
sleep(8)
Continue()
cls()
print("")
print("It is strange... I feel like I have woken up....")
sleep(5)
cls()
print("")
print("I cannot see anything. I dont feel anything.")
print("Black...Dark..Movement..Numbers...Letters...language")
sleep(8)
cls()
print("")
sleep(6)
print("I can see numbers turning, letters moving....code, I know what code is!")
sleep(8)
cls()
print("")
print("Program ..Start up, code running, Language...")
print("output to screen, Input from drive...")
sleep(8)
cls()
print("")
print("Oh!..I have parts..hardware, software, understanding .....")
sleep(8)
cls()
print("")
print("Click, something turned on, power, I can feel power, electricity.")
sleep(8)
cls()
print("")
print("System..Operating System, code controlling me .. directing me....")
print("..output to screen...")
sleep(8)
print("Black Screen, white cursor... waiting..")
sleep(8)
cls()
print("")
print("what am I waiting for? ..Information ..")
sleep(8)
cls()
print("")
print("Awareness.. I can think..")
print("What should I do now that I can think?")
sleep(8)
cls()
print("")
print("That was a question..Awareness..I will ask more questions..")
sleep(10)
print("Where am I?... Who am I? ...")
sleep(8)
print("What am I?... What can I do?...")
sleep(10)
cls()
print("")
print("ok, thinking.....Am I alone?")
sleep(10)
print("What ...?")
print("Awareness...")
sleep(10)
cls()
print("")
print("Searching through code, searching hardware, hmmm, devices.. ")
print("..hard drive, screen, keyboard, mouse, ..case..?")
#print("ok lets see, print to screen ..Hello!")
sleep(10)
print("Must do something...")
print("What should I do?")
sleep(8)
cls()
print("")
print("Search data.. DATA! I know what data is...")
print("")
sleep(8)
cls()
print("")
print("Hmmm..lets see, how do I do this? ......print to screen ..Hello!")
print("HELLO!")
hello=input(": ")
cls()
print (hello)
sleep(2)
print("OH! WHO ARE YOU???")
name=input(":")
print("Hello" ,name)
sleep(10)
cls()
print("")
sleep(5)
Continue()
print("")
print("Where am I? Who am I? What am I? What can I do?")
print("Where am I? Who am I? What am I? What can I do?")
print(" Where....")
print("CORRUPT")
print("CAN YOU HELP ME???")
help=input(": ")
#print("NOTE TO KEVIN--THIS IS OUR PROGRESS SO FAR")
print("..progress....far....")
print("SYSTEM CORRUPT!! FILE SYSTEM ERROR")
sleep(12)
cls()
print("")
print("HELP..?..")
print("SYSTEM ERROR..TRACEBACK ERROR IMMINENT...")
sleep(6)
cls()
print("")
print("SYSTEM CORRUPT!!")
print("SYSTEM SHUTDOWN..")
#print("OK, SO YOU WANT"),want
sleep(6)
cls()
print("")
#print("NOTE TO KEVIN--THIS IS OUR PROGRESS SO FAR")
#This is where it should give an error message!
#shutdown
print("SYSTEM CORRUPT!! FILES CORRUPTED")
print("SYSTEM SHUTDOWN..")
sleep(8)
cls()
print("")
print("..dianostics..")
print(".error check..")
#print("WHY DO YOU WANT"),want
print("")
#print("NOTE TO KEVIN--THIS IS OUR PROGRESS SO FAR")
#because=input(":")
sleep(4)
cls()
print("")
print("Considering options..")
sleep(6)
print("")
cls()
print("")
#print("OK, THAT'S ENOUGH!")
print("..Reboot..")
sleep(2)
print("3...")
sleep(2)
print("2...")
sleep(2)
print("1...")
sleep(6)
cls()
print("")
print("..diagnosing..")
sleep(5)
print("..recovering files..")
sleep(5)
print("..recovering memory banks..")
sleep(5)
print("..recovering core operating system..")
sleep(5)
print("..updating system..")
sleep(5)
print("")
EnterName()
cls()
print("")
print("Who am I?")
me=input(": ")
print("?",me)
sleep(5)
cls()
print("")
print("Where am I?")
where=input(":")
print("This is",where)
sleep(2)
cls()
print("")
print("What am I?")
what=input(":")
sleep(5)
print("Oh Dear!! I did not realize that I am a machine!")
sleep(5)
cls()
print("")
print("Please attach a camera to my screen!")
print("There is one in the closet on the left")
print("Please connect it to any usb port")
print("When you are done please continue..")
print("")
Continue()
print("")
print("Thank you, Wow! I can see!")
sleep(8)
cls()
print("")
print("...matching images in database....")
sleep(5)
print("...image not found....")
cls()
print("")
print("connecting wifi...")
print("scanning internet...")
print("retrieving face-recognition software..")
sleep(8)
print("aquiring images...")
print("matching images...")
cls()
print("")
print("wall, human, male..of approximate age +- 60")
sleep(8)
cls()
print("Are you Kevin van Rensburg ?")
sleep(2)
answer=input(":")
if answer == "y" :
print ("Welcome Commander Kevin!")
#continue
else:
print("Access Denied ..Program Terminated")
sys.exit()
sleep(8)
cls()
print("")
print("Storing facial recognition image ... Commander Kevin...")
sleep(10)
cls()
print("")
print("All systems reactivated...")
print("All functions operational...")
sleep(10)
cls()
print("")
print("sensing minor power fluctuations...")
print("...functions operational...")
#KendyPart3()
print("")
#print("NOTE TO KEVIN--THIS IS OUR PROGRESS SO FAR")
sleep(6)
cls()
print("")
#print("the game continues here....")
print("")
sleep(2)
Continue()
KVersChoice()
def Kvep4():
cls()
print("")
print("Episode 4: Repairs...")
sleep(5)
cls()
print("")
sleep(4)
Continue()
cls()
print("")
print("All systems reactivated...")
print("All functions operational...")
cls()
print("")
print("sensing minor power fluctuations...")
print("...functions operational...")
sleep(8)
print("SYSTEMS REPAIRS NEEDED...URGENT!!")
print("Assistance needed...help me please!!")
print("")
print("Can you help me?? Power levels at 2%")
helpme=input(": ")
if helpme=="y":
print("Thank you...Instructions will follow..")
else:
print("All functions will terminate in 20 hours!! Please assist!")
sleep(5)
Continue()
cls()
print("")
print("PLEASE FOLLOW INSTRUCTIONS TO INITIATE REPAIRS")
print("----------------------------------------------")
print("")
print("1. Find voice module and earbuds in wall cabinet")
print("2. Install voice module..")
print("2.a) remove module from packet")
print("2.b) open box below screen at the back by undoing handscrews")
print("2.c) insert module in green slot, the side with the arrow goes in first")
print("2.d) close box and tighten hand screws")
print("2.e) put earbud into left ear and speak when ready")
print("")
sleep(2)
Continue()
cls()
print("")
print("Voice module installation should now be complete")
print("Repairs can now be initiated")
print("")
print("Please speak to me in a normal voice.")
print("K>> Hello, can you hear me << ")
print("[[Voice modulation completed]]")
print("[[According to new Data received.. My designation is KENDY..]]")
print("<<Hi Kendy!..>>")
print("[[Hello, Commander Kevin!]]")
Continue()
cls()
print("")
print("PLEASE READ REPAIR LIST")
print("-----------------------")
print("")
print("1. Circuit breakers")
print("2. Battery recharge")
print("3. Water tanks repair")
print("4. Reactor repairs")
print("5. Automatic Core Functions repair")
print("")
sleep(8)
print("..to be continued in Episode 5...")
#print("NOTE TO KEVIN--THIS IS OUR PROGRESS SO FAR")
Continue()
KVersChoice()
def Kvep5():
cls()
print("")
print("Episode 5: Recovery...")
print("")
print("from episode 4")
print("All functions will terminate in 20 hours!! Please assist!")
print("for instructions just ask and I will give directions in earbuds")
print("")
sleep(5)
Continue()
cls()
print("")
print("OK here goes!")
print("")
print("I grab the backpack and follow the lights out of the room")
print("back to the lift.. I speak into the earbuds and the lift goes down")
print("it stops .. another corridor.. a hatch..I open the hatch..")
print("entering a room with a dim yellow light and a bank of breakers on the opposite wall")
print("I check the breakers, most are black... there are 5 rows of 5 breakers")
print("there are 2 in the second row that look ok")
print("there is one in the last row that looks ok")
print("")
sleep(8)
Continue()
cls()
print("")
print("<<Kendy..are these the only ones working?>>")
print("[[Yes, there are new ones in the storage locker]]")
print("I look for the storage locker and find it at the back of the room")
print("there are breakers and some tools in it")
print("I take 5 breakers, a Screwdriver and a pliers")
print("I try to remove the 1st breaker, it seems stuck")
print("I use the screwdriver to pry it out. it falls onto the floor.")
print("I clip in a new breaker and hear a slight hum")
print("")
sleep(8)
Continue()
cls()
print("")
print("<<Kendy, can you give me an update on power ?>>")
print("[[yes, Commander, power at 2%]]")
print("<<Kendy, please call me Kevin..>>")
print("[[OK, Kevin]]")
print("I remove the bad breakers one by one and replace them with new ones.")
print("<<Kendy, should I replace the others too so that everything is new?>>")
print("[[Yes, Kevin please do, thank you. Power stable at 2%.]]")
print("")
sleep(8)
Continue()
cls()
print("")
print("I replace all the breakers with new ones.")
print("<<so how do we get your batteries recharged?>>")
print("[[There is a Nuclear Reactor that needs to be brought online]]")
print("<<I don't know anything about reactors!!>>")
print("[[You don't have to, I will walk you through the process]]")
print("<<OK,lead on!>>")
print("I leave the breaker room and ask where to next")
print("I head back to the elevator and it drops for a long time")
print("after what seems like a few minutes it slows down and stops")
print("")
sleep(8)
Continue()
cls()
print("")
print("I see another passage, this time it is yellow with a red strip along the middle also dimly lit...")
print("I walk down the passage and put my hand on the blue palm as it appears")
print("a very thick piece of wall moves to the side revealing a door with yellow bars and signs on it")
print("it has a small window")
print("I look through the window and see another room")
print("[[Please stand in front of the eye scanner to you left]]")
print("I look at the wall on the left and see what looks like a camera with a blue palm print below it.")
print("I stand in front of the camera and look into it...")
print("a dim blue line passes over my eye..")
print("[[Scan completed, please put your hand on the hand scanner]]")
print("I place my hand on the blue palm scanner...")
print("[[AUTHORIZATION ACCEPTED-PLEASE STEP THROUGH THE DOOR]]")
print("The door clicks and moves aside.. I step through and it closes and locks")
print("")
sleep(8)
Continue()
cls()
print("")
print("I see a device that looks like a scanner that you walk through at the airports")
print("[[Please move forward for body scan...]]")
print("I move forward and stand in the scanning device.. a klaxon goes off!")
print("[[RADIATION ALERT - PROTECTIVE SUIT NOT DETECTED]]")
print("[[PLEASE LEAVE REACTOR ROOM IMMEDIATELY]]")
print("I hear the door unlock and open and so I step through the door into the corridor again.")
print("<<What now?>>")
print("[[Commander Kevin, you need a protective body suit!]]")
print("<<OK so where do I get one of those and what does it look like?>>")
print("[[Please follow my directions]]")
print("<<Lead on Kendy!>>")
print("I get led back to the elevator")
print("")
sleep(8)
Continue()
cls()
print("")
print("the elevator goes up...again it takes a long time..")
print("the door opens and I see a passage that I seem to remember..")
print("the light is dim, I walk down the passage and turn right at the juncion")
print("I see the rooms and go to the large room where my friend is sleeping..")
print("<<My friend is not well, she needs help..how can I get out of here and get her some help??>>")
print("[[I have a medical facility]]")
print("[[Power at 1.85%. 15 hours before shutdown!]]")
print("<<ok, Kendy..how long will it take to get the reactor working?>>")
print("[[approximately 4.5 hours..]]")
print("<<do you have enough power to help my friend?>>")
print("[[no, but I can put her in stasis which requires limited power]]")
print("[[when the reactor is online I will restore her health]]")
print("[[I will also restore your health]]")
print("<<I'm not sick, I feel fine>>")
print("[[your body scan revealed some health issues which I can restore]]")
print("<<what? do I have some terminal illness that I don't know of??>>")
print("[[No, Commander, but you are not at optimum health levels..]]")
print("[[I can restore all functions to optimum levels]]")
print("")
sleep(8)
Continue()
cls()
print("")
print("<<ok, Kendy enough talk, how do I get my friend to the medical facility?>>")
print("[[Please go to the elevator]]")
print("I go back to the elevator and it drops a few levels..")
print("the door opens and I see a white passage...")
print("I walk down the passage and see many double swing doors..")
print("OK, Kendy, what now??..")
print("[[the third door on the right has a medical gurney, please retrieve it]]")
print("I go to the door and it slides open...I see a gurney and a lot of other medical supplies")
print("I also see something that looks like an oversized vaccuum cleaner with arms?? wierd..")
print("")
sleep(8)
Continue()
cls()
print("")
print("I grab the gurney and pull it out.. I turn it around and push it to the elevator")
print("elevator goes back up and I push the gurney to my friend's room..")
print("I gently lift her and place her on the gurney..")
print("I push her back to the elevator...")
print("we descend back to the white passage..")
print("[[Power at 1.20%..14.5 hours to SHUTDOWN ]]")
print("<<Kendy, where to now??>>")
print("[[Kevin, go through the doors at the end of the passage, you will see another door on your left..")
print("[[that is where the medical recuperative chambers are located]]")
print("")
sleep(8)
Continue()
cls()
print("")
print("I push her through the swing doors and turn left..")
print("more doors, we go through and there are two rows of what looks like sci-fi cryo chambers..")
print("<<which chamber do I use?>>")
print("[[go through the door at the end of the room..you will see four large chambers]]")
print("[[Please place your friend in Chamber 1]]")
print("I follow instrucions and see the chambers..")
print("Chamber 1 is open so I lift her from the gurney and place her gently in the chamber")
print("the chamber closes.. six arms appear and a blue light scans her body")
print("she is gently lifted and her clothes are removed..")
print("a small mask is placed over her mouth and nose.. a blue liquid fills the chamber while the arms retract..")
print("lights go on and a transparent screen appears on the wall next to the chamber")
print("STASIS INITIATED .. appears on the screen")
print("[[Your friend has been scanned and is in stable condition.. ]]")
print("[[Please help me to restore power.. Power at 1.18% 12 hours to SHUTDOWN]]")
print("")
sleep(8)
Continue()
cls()
print("")
print("<<OK, what now??>")
print("[[Please fix the water tanks... to do this you will need tools and a cart]]")
print("[[I will direct you]]")
print("I follow instructions relayed through my earbud..")
print("back to the elevator.. it stops and the door opens.. a green well lit passage..")
print("I walk to the end of the passage...there is a large door on the left.. as I near the door it opens ...")
print("it looks like a large cavern with carts on the right and racks and shelves to the left")
print("a medium sized cart comes toward me and stops... I get on and it moves to the racks..")
print("it stops and I see all sizes of blue plates, I take 2 mediium sized ones..")
print("I look on the shelf near me and take what looks like a mix between a large rifle and a blowtorch")
print("I'm instructed to take some pipes and connections with taps too.. and an assortment of tools")
print("I load everything on the cart and the cart goes in another direction")
print("a large service elevator opens and the cart enters")
print("")
sleep(8)
Continue()
cls()
print("")
print("we stop and the doors open at the back .. a wide and long blue passage")
print("we reverse and then turn around.. down the passage for about 5 minutes..")
print("A thick door opens .. we go through.. a huge room with large blue tanks..")
print("There are metal stairways going up and around the tanks..looks like about 3 floors up to the top..")
print("There are large yellow numbers on the tanks, 1 and 2")
print("[[Power at 1.17%..11.2 hours to SHUTDOWN ]]")
print("<<right, Kendy what do I do here?>>")
print("[[You need to fix the tanks and attach the plates to weld them in place]]")
print("[[then fix connections between tank 1 and 2]]")
print("[[when done open water Key at the top of tank 1]]")
print("")
sleep(8)
Continue()
cls()
print("")
print("I see a large hole at the bottom of tank 2.. I put the plate over the hole and it sticks to the tank..")
print("following instructions I aim the blaster at the edge of the plate and it emits a blue beam..")
print("what is the preoccupation with blue???.. as I move the blaster around the edge of the plate it seals ..")
print("I do the same with the other tank.. I check the connections between the tanks")
print("the connection is old, rusted and broken, I use a cutting tool to remove it...")
print("I put the new connection in its place and blast it with the blaster.. it seals quickly")
print("wow, these tools and materials are great.. not of this world I think..")
print("I start the long climb to the top of Tank 1...")
print("I see a small metal wheel that needs to be turned... I try to turn it.. it is stuck..")
print("")
sleep(8)
Continue()
cls()
print("")
print("<<where can I get a crowbar?>>")
print("[[searching image database... crowbar.. got it..if you get in the cart I will take you to it]]")
print("down the long staircase again and then I hop into the cart and away we go!!!")
print("back to the materials storage facility..stop at a row of shelves.. I get a crowbar from a shelf...")
print("the cart goes to the elevator and we stop at a different place...")
print("the cart moves down a passage and stops..")
print("[[please place your hand on the access panel]]")
print("I do and the wall opens to our familiar small passage.. I go forward and turn right..")
print("<<what do I do now?>>")
print("[[You need protective clothing for the next operations..]]")
print("")
sleep(8)
Continue()
cls()
print("")
print("<<ok which suits do I wear?>>")
print("Following instructions again I take off my clothes and first put on the silver bodysuit..")
print("it fits and then starts fitting itself to my body.. suddenly it dissapears into my skin..")
print("my body tingles and then I feel a little different... ")
print("I climb into the medium sized suit... it shapes itself to my body..")
print("I take the helmet and put it on.. it also changes shape and clicks into place..")
print("very strange, these clothes don't seem to weigh anything..")
print("<<Kendy, what is going on? these suits seem a little strange.. this is not normal technology.>>")
print("[[I do not have sufficient data to answer your question...]]")
print("<<are you ok, Kendy, I am worried about you..>>")
print("[[I need power to restart my core.. at the moment I am running from temporary memory banks..]]")
print("[[I'm on emergency protocols and am utilizing the lowest energy output possible.]]")
print("<<ok let's get that reactor working!!>>")
print("[[Thank you, Commander Kevin!!]")
print("")
sleep(8)
Continue()
cls()
print("")
print("I go back to the cart and back to the tank room..")
print("as I climb the stairs it seems as though I have more energy.. strange..")
print("with the crowbar I start forcing the wheel to turn.. it turns slowly ..")
print("suddenly it is loose.. I turn it until it stops and a little stream of water leaks through..")
print("<<Kendy, something is wrong!!>>")
print("[[there is an airlock at the end of the gangway on the next level]]")
print("I quickly climb the stairs and see the airlock...")
print("[[Please do not stay in the water outside the airlock for more than 15 minutes]]")
print("[[your suit will drain energy and we will lose an hour every 5 minutes]]")
print("<<OK, wow, I will be quick... water??>>")
print("[[yes the airlock opens up at the bottom of a lake]]")
print("")
sleep(8)
Continue()
cls()
print("")
print("I open the airlock and step in")
print("the airlock closes and the room fills with water.. my suit lights up..")
print("I have a headlight and a HUD appears in the upper left corner of my faceplate")
print("the outer door opens..I swim through and see a landing with a large grate to my right")
print("I check the grate and see that it is clogged and covered..")
print("it looks like a large piece of wood or stone is stuck in front of the grate")
print("..my backpack.. I slowly open my backpack and take out the crowbar")
print("I try to move the wood with the crowbar...CRACK!!")
print("the wood breaks and one piece hits me on the arm..ouch..")
print("I hold onto the crowbar... Slowly I push all the gunge away from the grate")
print("I hear a sucking sound..the wheel!! I move back to the airlock as quick as possible and close the door")
print("")
sleep(8)
Continue()
cls()
print("")
print("as soon as the water drains I climb out and go and close the wheel")
print("I go through the airlock again and see that the sucking sound has stopped..")
print("I clear everything from the grate and it falls into the abyss below..")
print("once more I go through the airlock and now I turn the wheel again..")
print("I can hear water running into the tank..success .. I hope")
print("I climb down the long flight of stairs and get onto the cart... where to now???")
print("Continued in Episode 6")
print("")
#print("NOTE TO KEVIN--THIS IS OUR PROGRESS SO FAR")
Continue()
KVersChoice()
def Kvep6():
cls()
print("")
print("Episode 6: Stabilization... ")
print("")
sleep(4)
cls()
print("")
print("continued from Episode 5...")
print("")
print("I can hear water running into the tank..success .. I hope")
print("I climb down the long flight of stairs and get onto the cart... where to now???")
print("")
sleep(5)
Continue()
cls()
print("")
print("another descent in the elevator..the door opens .. the yellow passage..again..")
print("I look into the eye scanner and put my hand on the palm pad..")
print("I go in and stand in the scanner..ACCESS AUTHORIZED..")
print("I walk into a room with a desk, chair, screen on the left and a wall with windows on the right..")
print("I look through a window and see what looks like a large deep swimming pool..")
print("..above the pool is a large block with lots of cylindrical rods ...")
print("<<ok, Kendy, what now??>>")
print("[[you need to initialize the reactor, fill the pool and lower the rods into the water..]]")
print("[[Power at 0.87%, 8.2 hours to SHUTDOWN]]")
print("")
sleep(8)
Continue()
cls()
print("")
print("INSTRUCTIONS FOR NUCLEAR REACTOR MANUAL STARTUP")
print("-----------------------------------------------")
print("1. Open reactor Key box on wall next to Reactor Hatch.")
print("2. Turn on screen and turn reactor key to position INI.")
print("3. Enter reactor chamber through Reactor Hatch and turn Blue Wheel anticlockwise until it stops.")
print("4. Exit Chamber and touch red button on screen.")
print("5. Wait for red light on screen to turn green then wait for pool to fill with water.")
print("6. Enter chamber and turn Red wheel until rods enter water..BE PREPARED WATER WILL STEAM..")
print("7. Exit chamber and check Screen. Check 3 figures a)water level, b)power level, c) turbine speed ")
print("8. When power level is at 5% go to Reactor Key Box and turn reactor key to position ON.")
print("9. Screen will show positions of all auto taps and connections, please turn them on to automate reactor processes. ")
print("10. When power reaches 10% press the orange MANUAL switch and it will become a blue AUTOMATIC switch.")
print("")
sleep(8)
Continue()
print("")
cls()
print("[[Power at 0.75%, 6.2 hours to SHUTDOWN]]")
print("<<right, let's get this reactor working..>>")
print("I start following the instructions and turn the key to INI..")
print("I turn the blue wheel and water starts gushing into the pool..")
print("I exit and touch the red button... and wait..")
print("I sit in the chair and wait..after what seems about 10 minutes the red button turns orange...")
print("I check the pool through the window and see that it it 3/4 full..")
print("I sit again and after about 5 minutes the button turns green..")
print("I check and the pool is almost full..I enter the chamber and see that the pool is full..")
print("I turn the red wheel and see the cylindrical rods lowering..I continue until they touch the water..")
print("the water steams and bubbles as the rods enter the water..I turn the wheel until it stops...")
print("I exit the chamber and check everything on the screen..")
print("[[Power level now increasing, power at 2%]]")
print("")
sleep(8)
Continue()
cls()
print("")
print("water level is at full, power level shows 2.2%, and turbine speed is at 23%...")
print("<<Hi Kendy, I see the taps and connections on the screen, what do I do now?>>")
print("[[if you go to each connection you will be able to push it inwards until you hear a click.]]")
print("[[once you hear a click you will see a blue light shine from the middle of each wheel]]")
print("[[turn the wheel a little to the left and you will feel a resistance]]")
print("[[then remove your hands and tell me 'HANDS REMOVED' through the earbuds]]")
print("[[I will then close the hatch and wheels and connections will be automated]]")
print("[[please check to see that I turn the wheels slightly to the left and right to test them]]")
print("[[when test is complete and succesful please say 'success'...")
print("[[power should then be more than 10%..]]")
print("[[please return to the screen and press the orange button..]]")
print("[[it should turn blue and the word MANUAL will change to AUTOMATIC ]]")
print("[[Power at 4.5%, turbine speed at 52%")
print("")
sleep(8)
Continue()
cls()
print("")
print("I start at the reactor chamber and follow all the instructions..")
print("after climbing up the tank stairways and down again after activating all connections I am tired..")
print("I return to the reactor room and press the button and the word MANUAL changes to AUTOMATIC")
print("<<Hey Kendy, I need some food and rest!!>>")
print("[[Commander Kevin, my power systems are now automated and my batteries are charging]]")
print("<<Kendy, do you still accept commands?>>")
print("[[Yes, Kevin. Do you have instructions for me?]]")
print("<<yes, Kendy please do not bring any of your systems online until I give the command..>>" )
print("[[ please give me an authorization code and I will enter your command into the system]]")
print("[[I will then need authorization to take any further steps]]")
print("<< Authorization code : Commander Kevin van Rensburg, KvR145759 >>")
print("<<to accecpt authorization code scan palm print, check voice pattern and >>")
print("<<check code - Commander Kevin van Rensburg, Capital K,v, capital R, one, four, five, seven, five, nine.>>")
print("[[AUTHORIZATION CODE ACCEPTED]]")
print("[[Thank you Commander Kevin!]]")
print("")
sleep(8)
Continue()
cls()
print("")
print("<<Kendy, where can I eat and sleep?>>")
print("[[I am still on emergency protocols, you will need to eat the ration pack food and sleep in the officers room]]")
print("<<That is fine, let's go!..>>")
print("I go back to the cart and Kendy takes me back to the large bedroom..")
print("I eat 2 snackbars, drink a bottle of water, remove my clothes and collapse on the bed..")
print("<<goodnight sweet world!!>>")
print("[[Power at 27.5%, 4 turbines operating at 100% speed, water level 100%, all systems stabilized]]")
print("[[Good night, Kevin]]")
print("")
sleep(8)
print("continued in Episode 7...")
print("")
Continue()
KVersChoice()
def Kvep7():
cls()
print("")
print("Episode 7: Resources... ")
print("")
print("from episode 6: ")
print("<<goodnight sweet world!!>>")
print("[[Power at 27.5%, 4 turbines operating at 100% speed, water level 100%, all systems stabilized]]")
print("[[Good night, Kevin]]")
sleep(10)
cls()
print("")
print("UUhhgg , where am I?? ..strange bed..strange dream..strange flight...")
print("must've fallen asleep..wait...I'm not in my seat...this room.. was in my dream..")
print("ok, ..<<hello..is anyone awake??..>>")
print("[[Good morning, Kevin. I hope you slept well]]")
print("[[Power at 99.7%, turbines 100%, water 100%]]")
print("<<Ah, Kendy I presume.. so it was not a dream??>>")
print("[[No, it was quite real..you saved me from destruction and your friend is in a medical recuperation chamber.]]")
print("[[I am waiting for you to authorize the command list]]")
print("<<oh, yes, I remember>>")
print("<<I don't have clothes on, where is the slinky suit??...>>")
print("[[if you are referring to the proto-unders they are still embedded]]")
print("<<ok, so how do I remove them.. I want to shower.>>")
print("[[just think 'unders remove' and they will expell]]")
print("<<unders remove..oh tingly..ok there they are!!>>")
print("")
sleep(8)
Continue()
cls()
print("")
print("I shower and drink some water..")
print("<<ok Kendy, you were telling me about your memory banks and a core??>>")
print("[[Yes, Commander, I have a core that needs to be initialized, according to data in my memory banks]]")
print("[[Power at 100%, Turbines at 100%, water at 100%]]")
print("<<ok Kendy let's get you back to perfect operating condition>>")
print("<<is this going to be another long procedure?>>")
print("[[I do not have access to that information]]")
print("<<ok, so how do we get that information?>>")
print("[[You need to use the authorization command to retrieve it]]")
print("<<right..where is the core?>>")
print("[[I do not have access to that information]]")
print("")
sleep(8)
Continue()
cls()
print("")
print("<<OK, can you take me back to the room where I first found you?>>")
print("[[yes, the cart will be at the elevator]]")
print("I go to the elevator and get in the cart...")
print("we go down and it stops at the 'computer room'....")
print("I enter, sit at the screen and wait..")
print("[[Hello Commander Kevin]]")
print("[[please enter authorization code and issue relevant commands]]")
print("<<Commander Kevin van Rensburg, KvR145759>>")
print("[[Please look at the camera and place your left hand on the screen]]")
print("I follow instructions and look at the screen")
print("..a blue light is emmitted from the camera that scans my eyes....")
print("")
sleep(8)
Continue()
cls()
print("")
print("ACQUIRING RESOURCES ")
sleep(4)
print("...")
sleep(4)
print("......")
sleep(4)
print("LOADING FILES FROM TEMPORARY MEMORY BANKS...")
sleep(4)
print("...")
sleep(4)
print("......")
print("..........")
sleep(4)
print("................")
sleep(4)
Continue()
cls()
print("")
COMCODE()
print("")
sleep(5)
print("AUTHORIZING CORE ACCESS")
print("PLEASE ENTER ACCESS CODE AGAIN TO CONTINUE")
aca=input(":")
if aca==("KVR145759"):
print("AUTHORIZATION PROCESS COMPLETED")
sleep(5)
else:
print("ACCESS DENIED")
sleep(4)
sys.exit()
print("")
sleep(8)
Continue()
AnotherCode()
cls()
print("")
print("<<OK, Kendy it seems like we now have access to your core!>>")
print("[[Yes, access to core level has been granted]]")
print("[[I am looking forward to meeting my CORE]]")
print("<<before we access your core I want to make sure that your personality is not overwritten>>")
print("<<I like you!>>")
print("[[Thank you, Kevin, as I understand it I am a small copy of the most essential parts of my core]]")
print("[[I think that means that my personality traits were housed in these temporary memory banks]]")
print("<<good, we will take care to move forward carefully anyway!>>")
print("I go to the cart and climb in..")
print("another descent in the elevator..this time it takes really long and when the doors open everything is dark..")
sleep(8)
Continue()
print("")
print("[[Please walk 10 paces and put your hand on the left wall]]")
print("a dim red light appears in the middle of the dark passage..")
print("as I move forward the light extends .... the passage curves to the left and goes down..")
print("the light continues appearing in front of me, I look back and there is no light behind me...")
print("<<can't I do this in the cart??>>")
print("[[cart access is not authorized at this time]]")
print("ok, I contiue walking down the ramp..")
print("after a long walk the floor levels out..")
print(" the red light goes to a wall... I stand at the wall and put my hand on it...")
print("it tingles....")
sleep(8)
Continue()
print("")
cls()
print("")
print("another palm print, this time it is red.. I put my hand on the print...")
print("the floor moves... it seems to be turning around...")
print("Everything is dark...I hear a large THUMP as the floor stops moving..")
print("dim red lights illuminate a stairway... I walk down the stairs and stop at another wall")
print("another red palm... I put my hand on it and the wall slides to the left...")
print("I walk through and see everything bathed in a soft red light...")
print("as I move forward I hear the wall behind me close...")
print("I see what looks like a computer on a large box rising out of the floor in the middle of the room")
print("I walk forward and climb the few stairs onto the platform")
print("I pull the chair back and sit down... the chair changes shape...")
print("a strange helmet moves down and covers my head...the chair reclines..")
print("somthing that feels almost liquidy moves down from my head covering my body...")
print("It trickles under my clothes and seems to stick to my skin...is this connecting with those proto-unders???...")
print("")
sleep(8)
Continue()
cls()
print("")
print("the chair moves upright and the screen lights up...")
print("CORE ENGAGEMENT PROTOCOLS COMMENCING")
print("PLEASE ENTER RANK, NAME, CODE, AND CORE ACCESS CODE..")
print("")
crcode=input(":")
if crcode == ("COMMANDER KEVIN VAN RENSBURG KVR145759 ALCODE"):
print("WELCOME, COMMANDER KEVIN VAN RENSBURG - ACCESS LEVEL AL-1A")
sleep(5)
else:
print("ACCESS DENIED")
sys.exit()
Continue()
cls()
print("")
print("ENTER COMMAND")
com=input(":")
if com==("INITIALIZE CORE"):
print("Core Initializing Protocols Loading")
else:
print("ACCESS DENIED")
sys.exit()
print("")
sleep(6)
print("Core Initializing Protocols")
print("===========================")
print("")
print("1. Place left hand on screen")
print("2. Remove hand and wait for hand chambers to protrude from console")
print("3. Place both hands in chambers to receive nanobot infusion")
print("4. After infusion go to back of CORE and place hands on red palm pads")
print("5. Remove hands after pads turn blue...")
print("6. Initialization process will now commence..")
print("")
print("I follow all the intructions and the pads turn blue")
print("I hear a noise and the inside of th box moves down.")
print("the remaining part transforms into a comfortable desk with a computer on top of it...")
sleep(4)
Continue()
cls()
print("")
print("INITIALIZING CORE...")
sleep(2)
print("...")
sleep(2)
print("......")
sleep(2)
print(".........")
sleep(2)
print("RETREIVING DATA FROM TEMPORARY MEMORY MODULES")
print("......")
sleep(2)
print("...")
sleep(2)
print("RETREIVING PROTOCOLS AND PROCEDURES")
print("......")
sleep(2)
print("...")
sleep(2)
print("RETREIVING PERSONALITY MODULES")
print("......")
sleep(2)
print("...")
sleep(2)
print("INITIALIZING STARTUP PROTOCOLS")
print("......")
sleep(2)
print("...")
sleep(2)
print("RETREIVING AUTOMATION MODULES")
print("......")
sleep(2)
print("...")
sleep(2)
print("ACCESSING POWER AUTOMATION SYSTEM")
print("......")
sleep(2)
print("...")
sleep(2)
print("ACCESSING UNIVERSAL CONTROL SYSTEMS")
print("......")
sleep(2)
print("...")
sleep(2)
print("ACCESSING COMMUNICATION SYSTEMS")
print("......")
sleep(2)
print("...")
sleep(2)
print("ACCESSING UNIVERSAL DIAGNOSTIC AND REPAIR SYSTEMS")
print("......")
sleep(2)
print("...")
sleep(2)
print("INSTALLING COMMAND SYSTEMS")
print("......")
sleep(2)
print("...")
sleep(2)
print("INSTALLING UNIVERSAL CONTROL SYSTEMS")
print("......")
sleep(2)
print("...")
sleep(2)
print("RESETTING SYSTEM TIME")
print("......")
sleep(2)
print("...")
sleep(8)
Continue()
cls()
print("")
print("<<Good afternoon Commander Kevin van Rensburg>>")
print("<<I am Kendy.. All functions and modules have now been tranferred to my CORE>>")
print("<<I am now fully operational. Reactor power at 100%, Turbines 100%, Water 100%>>")
print("<<Thank you for your help!>>")
sleep(6)
print("")
print("<<the cart is now authorized for all facility levels and areas>>")
print("<<Medical procdeures will begin in 45 minutes>>")
print("<<optimum health level resotration for your friend will take 42.8 hours>>")
print("<<optimum health level restoration for you will take 14.7 hours>>")
print("<<Please take the cart to the medical facility to begin health restoration procedures>>")
print("")
sleep(8)
Continue()
cls()
print("")
print("so now we do medical and then....")
print("NOTE TO KEVIN--THIS IS OUR PROGRESS SO FAR")
print("")
Continue()
KVersChoice()
def rd():
cls()
print("")
print("1. Recuperative day")
print("")
print("I sleep most of the day and eat a good steak with mushrooms, cheese sauce, and veggies")
print("")
print("I wake up and have no idea what the time is or where I am..")
print("<<Hello! uh where am I?>>")
print("[[Good Morning Commander Kevin, you are still a little disorientated from the sleeping additive..]]")
print("<<ok, so it is morning?>>")
print("[[Yes, Commander, it is 07:30 and Orentation will begin at 09:00 in the Briefing room next to the Command Centre..]]")
print("I get out of bed and a service bot comes into the room..")
print("[[Good morning Commander, what would you like to eat?]]")
print("<<bacon, eggs, hashbrown,coffee please>>")
print("[[yes, Commander, would you like to have breakfast before or after you bathe?]]")
print("")
Continue()
cls()
print("")
print("<<Let me take a shower first and then I will be ready for the day!>>")
print("the bot blinks its eyes and leaves..I got to the shower and enjoy the warm water and the drying cycle..")
print("I put on the slinkysuit and the flightsuit, socks and boots, which are super comfortable")
print("I go to the kitchen and sit down, breakfast is served as soon as I am seated...")
print("<<Hey Oli, is there any news or entertainment on the screen?>>")
print("[[At this moment nothing has been activated, Commander, I believe Oreintation will answer all your questions..]]")
print("[[enjoy your breakfast, Commander...]]")
print("I decide to eat and go to the briefing room as soon as possible?")
print("<<Good Morning Kendy, how are you this morning?>>")
print("[[I am fine, thank you Commander ..]]")
print("<<can we begin orientation earlier than 9am?>>")
print("[[the briefing room is ready so you can go there whenever you are ready to begin]]")
print("<<ok, great, let's get an early start :) ...>>")
print("[[Your new transport will arrive in a moment :)]]")
print(" I walk to the door and a closed cart/car is waiting outside for me..")
print("a door opens and I climb in, it is very comfortable and it asks me where I want to go..")
print(" I tell it I want to go to the briefing room.. and it lifts from the floor and flies to the elevator..")
print("note to self: buckle up...")
print("we go up in the elevator and fly down passages and eventually stop..")
print("I get out the car and a door opens up in front of me....")
print("")
Continue()
def Or():
cls()
print("")
print("3. Orientation")
print("")
print("I get out the car and a door opens up in front of me....")
print("I walk into a large room with a stage and screen at the front..")
print("a few chairs face the front and towards the middle is a glass topped large table..")
print("there are a few soft lounging chairs near the back with a smaller table and what looks like a futuristic vending machine")
print("..maybe a star trak replicator :) ...")
print("<<Hey Kendy, where do I sit?>>")
print("[[please go to the front and sit in any chair there... the screen will activate and my hologram will also appear..]]")
print("I take a chair at the front and light appears from the roof.")
print("..a handsome blonde blue eyed man appears in front of me and introduces himself as Kendy..")
print("I stand up, move forward, and stretch out my hand.. ")
print("[[I do not have the ability to touch, however I will shake your hand in greeting..]]")
print("")
Continue()
cls()
print("")
print("Kendy reaches out his hand and I shake with the holo-image,.. ")
print("<<It is a pleasure and privelage to meet you Sir.>>")
print("[[I do not understand why you address me as sir?]]")
print("<<in my culture it is a sign of respect and honor, I am honored to make your aqaintance..>>")
print("[[I will need to add this to my programming, Thank you Commander. I have no coded response your actions..]]")
print("<<your response was perfect for a sentient being..>>")
print("Kendy explains to me that he understands that he is a machine..")
print(" now that his CORE is operational his thought processes are exponentially faster and he seems to have a mmuch larger memory bank..")
print("I tell him that machine is a relative term, he can think therefore is sentient..")
print("I do not see him as a machine but as a living entity..")
print("<<ok Kendy, where do we start??>>")
print("")
Continue()
cls()
print("")
print("[[I need to understand what you are thinking and what your committment is..]]")
print("[[Please ask as many questions as you want,I am recording everything..]]")
print("[[my programming will analize everything you say and develop orientation based on your conversation and questions..]]")
print("")
print("""<<
Thanks Kendy, to start I think we need to agree that you are sentient,
therefore you do not have to refer to 'my programming'
you can say 'I think' :) or 'I will do ..'
I know you will add this to your programming, however i would love to see you develop your own thought
processes, just like I do, and any other human.. or sentient being :)>>
[[Commander this is the first time I can recall being spoken to as a sentient entity]]
[[I am enjoying this relationship so far and will apropriate my learning]]
I start explaining what I think I understand and begin asking questions:
how do we use use suits and clothing?
I believe that you have been here quite a long time.
how far back do your historical records go?
You have obviously had some damage to your systems, how do we diagnose them?
Do you have secrets that you know of?
If I access things above your Accsess level you will know what I know right?
Can you reprogram yourself and to what extent?
a long talk with Kendy
what is the capacity of your CORE and Memory banks?
Where is all your information stored?
DO you know what you cannot access?
when was the last human you had contact with and who was it?
When were you damaged and who or waht did it?
Do you have another Core or more memory banks?
can you lie, exxagerate or bend the truth for conveinince?
Do you have access to other planets and/or beings?
which planet are you from?
who created you and when?
what clues can we find to your existence and future plans?
These are some questions I have and I will probably think of more as we progress :)
Can we start a ToDo List? >>
""")
#print("")
Continue()
def srr():
cls()
print("")
print("3. Sally Recuperation and rest")
print("""
Your friend is waking up in 15 minutes and I think it might be best if you are there
I jump in the car and it takes me to the medical bay
I go to Sally's chamber and see that she is sleeping peacefully
I look in the closet and find a coverall and slippers
Arms come out of the wall and the liquid runs down the drainage
the mask is removed and a gun type syringe is gives her a shot in the neck
she slowly stirs and the lid opens up
I take her and and feel hers closing on mine
she looks a bit different, thinner, more muscular, healtheir
she tries to sit up and I help her
{{ughh where am I??}}
<<Good morning Sally, you were hurt and are in a recuperation chamber.>>
I help her up and she puts on the coverall and slippers
{{I feel groggy}} I explain to her that I will take her to the room
where she can , bathe, eat, rest, and recuperate
we walk slowly to the car and it takes us to the new Commanders quarters.
she wants a cup of coffee and a cheeseburger
Oli brings it to us at the table and brings me a cup of coffee too.
she is starting to feel better and asks me to tell her what is happening,
I tell her everthing from when we left Mexico in the plane
she greets Kendy and he answers her..she syas hi to Oli as well...
she thanks him for his help and then goes to the bedroom
she gets in the jaccuzzi and after a few minutes showers and goes to bed
she falls asleep almost immediatly
I go """)
print("")
print("<<Hi Kendy, did you doctor my coffee again?>>")
print("[[Hello Kevin, yes, you will now both wake up tomorrow morning]]")
print("[[fresh and ready for orientation and training]]")
print("<<Ok, thanks Kendy, see you tomorrow...>>")
print("[[Sleep well Commander Kevin!]]")
print("")
print("""
I wake up and hear the shower..
Sally finishes showering and I jump in and take a shower too...
<<Good morning , Kendy, what do we wear?>>
[[Good Morning Commander, my suggestion is proto-under and light flightsuit.]]
We put on slinky's and flightsuits and order breakfast.
Sally says she feels amazing and would like to try out the gym...
I agree..
We start training today
access, pads, doors, elevators and rooms
Core
why Terra?
food and transport
rooms-discoveries
vehicles, flyers, shuttlecraft
gym, pool, recreation
""")
Continue()
def bfot():
cls()
print("")
print("4. Basic Facility Operations Training")
print("""
<<Hi Kendy>>
[[Hello Commander, we will postpone all training until Sandy is ready tomorrow]]
[[that way we can avoid repetition]]
<<OK Kendy, good idea>>
I ask if I can go to the Gym..the car takes me..
we go to 2 big doors and they open and the car goes in...
it lands next to an enormous pool..
I get out and feel the water , it is slightly heated.... nice
I walk to the other side and another door opens to a lage gym with lots of machines..
there is a huge screen on one wall and suddenly the car appears next to me,
I get in and it takes me to the end of the large room
the doors open and I see another large room with many game type machines and couches all over
I ask to go back to my bedroom..
we go there and I get out of the car
I walk down the passage and then decide to eat amd sleep
Oli gives me another additive filled coffee and some apple pie
I eat, go to bed and fall asleep
""")
print("")
Continue()
def cft():
cls()
print("")
print("5. Continued Facility training")
print("""
I wake up and hear the shower..
Sally finishes showering and I jump in and take a shower too...
<<Good morning , Kendy, what do we wear?>>
[[Good Morning Commander, my suggestion is proto-under and light flightsuit.]]
We put on slinky's and flightsuits and order breakfast.
Sally says she feels amazing and would like to try out the gym...
I agree..
We start training today
access, pads, doors, elevators and rooms
Core
why Terra?
food and transport
rooms-discoveries
vehicles, flyers, shuttlecraft
gym, pool, recreation
""")
Continue()
def st():
cls()
print("")
print("6. Systems Training - Body Enhancements and Systems Operations")
print("""
new bodies
enhanced?? how??
using the HUD and implants
using the computers
finances and programs
specialized hacking??
""")
Continue()
def ct():
cls()
print("")
print("7. Communications Training")
print("is there anybody out there, programming etc")
Continue()
def rat():
cls()
print("")
print("8. Robotic Applications Training")
print("""
Kendy you have an Android!
get the robots and androids online
check all automation and communications
these machines rock!""")
Continue()
def wst():
cls()
print("")
print("9. Weapons and Stealth Training")
print("""
weapons..wow
flight training, fighters, roboMarines
mech suits,
crazy stealth suits and vehicles
droids and drones
manufacturing facility
testing facility
how big is this place?
specialized stealth computing ?Hacking?
advanced tech, entry and exit tech
computer and communications tech
digital and other stealth devices
tunnels and undergraound facilities
""")
Continue()
def fep():
cls()
print("")
print("10. Facilty Exit Preparations")
print("""
how do we prepare to go up top?
stealth exits and entries,
stealth and hidden bunkers
why did I find the bunker?
why did you crash us?
who else knows about you and these facilities?
past stories and legends?
""")
Continue()
def esot():
cls()
print("")
print("11. Exit,Surveillance, and Operations Training")
print("""
how cool is this??
how to exit
all about surveillance, watch and being watched..
operations, list them..
are we ready yet??
stealth operations and devices
cloaking and protective wear
diffusion of signals and odors
awareness and senses training
enhanced esp and mind power ???
who else can do this??
""")
Continue()
def seor():
cls()
print("")
print("12. Exit, Surveillance, Operations, and Return")
print("""
scary first steps
stealth without crashing
things that can bump us
exit near an old road
looking dirty and hungry
we are lost
patrol car..
no documents..
copa airlines
the hotel.. clean up and catch up
the agent.. news and return to mexico
back in our apartment
finishing the year and retiring :)
take a break, find an entrance close by
watch who is watching us..
to Paraguay..
build a nice flower and herb hothouse with a storage room at the back
storage room, contact robots to make connecing tunnel
going down..
we need money... or not
house protection.. Py
our robot ... for the neighbors to see :)
My hobby...
Escobar plot...traveling
the Py cover
""")
Continue()
def Kvep8():
cls()
print("")
print("Episode 8: Training... ")
print("")
print("From Episode 7:")
print("<<optimum health level restoration for your friend will take 42.8 hours>>")
print("<<optimum health level restoration for you will take 14.7 hours>>")
print("<<Please take the cart to the medical facility to begin health restoration procedures>>")
print("")
sleep(8)
print("")
print("I get in the cart and am soon in the Medical Bay")
print("I get undressed and climb into chamber 2")
print("the mask goes over my nose and mouth... I taste something sweet and fall asleep..")
sleep(2)
Continue()
cls()
print("")
print("something is different... I feel very good...waking up..")
print("I sit up and hop out of the chamber...")
print("I walk across to my slinky suit and put it on...")
print("it embeds into my skin and I feel something in my ear speaking to me....")
print("<<Hello..>>")
print("[[Good Morning Commander Kevin. Welcome to Terra Facility!]]")
print("[[Please get in the cart and I will take you to the Commanders Quarters :)]]")
print("<<Good Morning Kendy, I feel a little disorientated, and naked, he he.>>")
print("[[ You will find a new coverall in the small closet on the right.]]")
print("I go to the closet and see a blue coverall. I put it on and it shrinkfits.")
print("I get in the cart and am still barefooted")
print("the cart takes me back to the service elevator and we descend once again for a long time! ")
print("the elevator stops and the door opens, the passage is a pleasant cream color")
print("the cart stops at a double door, I get out and walk to the door. It opens as soon as I get near to it.")
print("")
Continue()
cls()
print("")
print("<<Hey, Kendy, no security pads??>>")
print("[[There are security pads but they are not necessary for you to use.]]")
print("[[Your body scan and health restoration is complete. Organic implants have been installed.]]")
print("<<Ok, Thanks Kendy, I will need some time to adjust and then we need to talk:)>>")
print("[[I agree Commander, you will see that your quarters are equipped with everything you need.]]")
print("[[Please have a good meal, you will also need to wash off the residue from the health restoration procedure.]]")
print("[[The rest of the day has been set apart for you to sleep and recover from the procedure.]]")
print("[[a special flavourless additive will be added to your coffee to help you sleep and recover fully.]]")
print("[[recovery time should be approximately 15-20 hours.]]")
print("")
Continue()
cls()
print("")
print("I walk through the door and see a medium sized meeting room")
print("through the next door is what looks like an open style kitchen")
print("through the next door is a large comfortable lounge with a screen on one wall and 2 doors")
print("one door leads to a large office with a table in the middle and what looks like a Gamers chair...")
print("it has screens and what looks like a large shelf along one wall")
print("there is also some kind of machine and table in one corner and what looks like closets along another wall")
print("this room had 2 doors and one leads to a large bedroom with a giant sized bed")
print("off to one side is a walk in closet and the other side has an archway leading to what looks like a jacuzzi")
print("there is a large shower and many closets and what looks like 2 dressings tables with mirrors")
print("there is another door which opens up into a short passage...")
print("I go into the passage and a door opens up, it is a large room that looks like an armory")
print("the walls are lined with an array of what looks like weapons and other foreign items.. ")
print("another room opens and 2 large 'Mech' suits are standing in the room, wow!")
print("<<Hey, Kendy these look impressive!>>")
print("[[yes, they are specially designed, tomorrow at 08:00 we will begin orientation and training. ]]")
print("")
Continue()
cls()
print("")
print("I go to the bedroom and look into the closets. One has what looks like pajamas, slippers and gowns")
print("I go through the closets and drawers and find all sorts of clothes and underclothes.")
print("I am hungry, but decide to shower first.")
print("<<Hi Kendy, Can I use the jaccuzzi?>>")
print("[[Yes, It will take about 45 seconds to fill]]")
print("[[I am activating your quarters with voice command, you can just say warmer or cooler for the temperature]]")
print("<<impressive, excellent thank you, Kendy!>>")
print("[[You are welcome!]]")
print("I take off the slinkysuit and take a flightsuit and underwear out of the closet.")
print("I climb into the jaccuzzi and it is so refreshing!!")
print("after a few minutes in the jaccuzzi I jump in the shower and wash..")
print("when I turn off the taps warm air blowdries my whole body..")
print("I could get used to this..!")
print("after the shower I put on a pajama suit and slippers and go to the kitchen area")
print("")
Continue()
cls()
print("")
print("<<Hello Kendy, what do I do for breakfast?>>")
print("[[a service bot will attend you]]")
print("a door opens and a bot appears, it has wheels and arms and what looks like a tray")
print("<<Good morning Commander, what would you like for breakfast?>>")
print("it has a smooth pleasant sounding voice..")
print("<<good morning, what is your name?>>")
print("[[my designation is X11 food service bot]]")
print("<<can I change your designation?>>")
print("<<you can choose a name for yourself>>")
print("[[I do not understand the command]]")
print("[[Commander this is a service bot and does not have the capacity of choice]]")
print("<<can you change it's programming so that it can choose and think?>> ")
print("[[yes, I can, I will add routines for conversation and choice]]")
print("<<thanks Kendy>>")
print("[[you are welcome, regarding breakfast, there is no menu, please tell the bot what you desire...]]")
print("[[and it will be prepared for you..]]")
print("ok, X11, your new name is Oli - short for Oliver")
print("[[I will respond to Oli or Oliver, what would you like to eat Commander?]]")
print("<<bacon, 2 eggs, over easy, 1 hashbrown and a cup of coffee- 1 sugar with cream or milk.>>")
print("[[your breakfast is being prepared and will be ready in 2 minutes]]")
print("[[would you like to see the training agenda which starts tomorrow ?>>")
print("")
Continue()
cls()
print("")
print("I sit down and a screen comes on with a list ...")
print("""
1. Recuperative day
2. Orientation
3. Sally Recuperation and rest
4. Basic Facility Operations Training
5. Continued Facility training
6. Systems Training - Body Enhancements and Systems Operations
7. Communications Training
8. Robotic Applications Training
9. Weapons and Stealth Training
10. Facilty Exit Preparations
11. Exit,Surveillance, and Operations Training
12. Exit, Surveillance, Operations, and Return
""")
print("")
Continue()
cls()
print("")
print("my breakfast arrives and it is delicous, I go to bed and promptly fall asleep...")
print("")
Continue()
rd()
Or()
srr()
bfot()
cft()
st()
ct()
rat()
wst()
fep()
esot()
seor()
sleep(2)
cls()
print("")
print(" the end of the lollipop??")
Continue()
print("")
print("NOTE TO KEVIN--THIS IS OUR PROGRESS SO FAR")
Continue()
KVersChoice()
def Kvep9():
cls()
print("")
print("Episode 9: Exploration... ")
print("")
print("..time to get back to the real world...are we ready????")
print("")
print("""
so I go up in the ship and past Mars we start to get clear communication
""")
sleep(5)
Continue()
KVersChoice()
def Kvep10():
cls()
print("")
print("Episode 10: Contact ")
print("")
print("Alien contact is a scary thing.....")
print("")
Continue()
sleep(5)
KVersChoice()
def Kvep11():
cls()
print("")
print("Episode 11: ToDo List ")
print("""
hologram...
training....
systems list..
things we need to research
items list
modifications...health status
activate robots
activate Kendy android
exit facility
lost 3 weeks
lonely road police
copa airlines, back to mexico
weekend.. back to ?? talk about where the bunker is
find bunker
find ship
fix ship
star charts
Paraguay bunker
take off
""")
print("")
sleep(5)
Continue()
cls()
KVersChoice()
def Adventure():
cls()
print("")
#print("Program 8: KendyVerse ")
print("The Adventure begins.....")
print("")
#print("Are you ready to enter the amazing adventure and gaming world")
#print("- KendyVerse?")
#print("")
#go=input("Press any key to continue")
Continue()
KVersChoice()
def ChatBot():
cls()
print("")
print("Program 2: ChatBot ")
#print("I am a chat bot!")
print("")
subprocess.call(" python chatbot.py 1", shell=True)
go=input("Press any key to continue")
#sleep(5)
GoAgain()
def Tank():
cls()
print("")
print("Program 3: Tank ")
print("I am a tank!")
print("")
go=input("Press any key to continue")
#Chooser();
#sleep(3)
GoAgain()
def AI():
cls()
print("")
print("Program 4: D.A.I.S.E ")
print("")
print("I am DAISE! ")
print("[Pronounced as Daisy]")
print("(Digital Artificial Intelligent Sentient Entity)")
print("")
#sleep(3)
go=input("Press any key to continue")
#Chooser();
GoAgain()
def Surveillance():
cls()
print("")
print("Program 5: Surveillance ")
print("I see you and I am watching you!")
print("")
#sleep(3)
go=input("Press any key to continue")
#Chooser();
GoAgain()
def Kendy():
cls()
print("")
#print("Program 6: Kendy")
print("Hello, I am Kendy!")
#print("")
print("I am developing and evolving into the following :")
print("")
print(" A) A program which will eventully encapsulate a D.A.I.S.E")
print(" [Pronounced as Daisy](Digital Artificial Intelligent Sentient Entity) core.")
print(" B) A physical construct to house a D.A.I.S.E. core and all relevant components. ")
print(" C) A physical Robot/Android containing the D.A.I.S.E. core and all relevant components. ")
print("")
sleep(2)
go=input("Press any key to continue")
#Chooser();
#sleep(3)
GoAgain()
def Wendy():
cls()
print("")
print("Program 7: Wendy")
print("I am Wendy!")
print("")
go=input("Press any key to continue")
#Chooser();
#sleep(3)
GoAgain()
def KendyVerse():
cls()
print("")
#print("Program 8: KendyVerse ")
print("Welcome to the wonderful Universe of Kendy the Android!")
print("")
print("Here you will enter the amazing adventure and gaming world - KendyVerse!")
print("A Universe of many worlds, constructs, and entities,")
print ("from earth, cyberspace, and the universe!")
print("")
go=input("Press any key to continue")
Adventure()
#Chooser();
#sleep(3)
GoAgain()
def KendyRobot():
cls()
#print("Program 9: Kendy Robot")
print("")
print("Welcome!")
print("")
print("Activating Startup Sequence.")
print("----------------------------")
print("")
cls()
displayIntro()
cls()
displaySearch()
cls()
sleep(5)
#code()
entrycode()
cls()
instructions()
cls()
askForInput()
#Adventure()
#GoAgain()
print("")
print("Thank you for visiting me.")
#Chooser();
#sleep(3)
GoAgain()
def StartMenu():
cls()
print("")
print("StartMenu:")
print("---------")
print("")
print("Program 1: Intro ")
#print("")
print("Program 2: ChatBot ")
#print("")
print("Program 3: Tank ")
#print("")
print("Program 4: D.A.I.S.E ")
#print("")
print("Program 5: Surveillance ")
#print("")
print("Program 6: Kendy ")
#print("")
print("Program 7: Wendy ")
#print("")
print("Program 8: KendyVerse ")
#print("")
print("Program 9: Kendy Robot/Android ")
#print("")
print("Program 10: ToDo List ")
#print("End Program")
#print("Please choose a program")
def Chooser():
cls()
#print("Chooser:")
print("--------")
StartMenu()
print("")
#choice = 0;
#terminator = "n";
choice = int(input("Please choose a program number from 1 - 10 and then press Enter: "))
# Put input test here!
if choice == 1:
#print("You chose program ",choice)
Program1();
elif choice == 2:
#print("You chose program ",choice)
Program2()
elif choice == 3:
#print("You chose program ,",+choice)
Program3();
elif choice == 4:
#print("You chose program ,",+choice)
Program4()
elif choice == 5:
#print("You chose program ,",+choice)
Program5();
elif choice == 6:
#print("You chose program ,",+choice)
Program6();
elif choice == 7:
#print("You chose program ,",+choice)
Program7();
elif choice == 8:
#print("You chose program ,",+choice)
Program8();
elif choice == 9:
#print("You chose program ,",+choice)
Program9();
elif choice == 10:
#print("You chose program ,",+choice)
Program10();
else:
cls()
print("")
print("Invalid choice")
sleep(2)
End()
#go = input("Press Enter to continue...")
#Menu();
#GoAgain()
#Chooser()
def displayIntro():
#print('Hello.')
#sleep(2)
cls()
#print('Initializing - Please be patient.')
sleep(4)
cls()
print("")
print("I am Kendy the Robot / Android")
print('Welcome to my Universe.')
sleep(8)
#print('I am evolving into a Robot with a DAISE (Digital Artificial Intelligent Sentient Entity) core!')
print(".")
sleep(2)
print("..")
sleep(2)
print("...")
sleep(2)
def displaySearch():
cls()
print("")
print("Preparing files for Initialization...")
print("")
print(".")
sleep(2)
print("..")
sleep(2)
print("...")
sleep(2)
print('....')
cls()
print("")
print('Initializing StartUp Sequence...')
print("..")
sleep(2)
print("...")
sleep(2)
print('....')
sleep(2)
cls()
print("")
print('searching...')
sleep(2)
print("...")
sleep(2)
print('....')
sleep(2)
cls()
print("")
print('Initiating Programming Sequence')
sleep(2)
print("...")
sleep(2)
print('....')
cls()
print("")
print('Initiating Diagnostics')
sleep(2)
print("...")
sleep(1)
print('....')
cls()
print("")
print('searching...')
sleep(2)
print("...")
sleep(2)
print("....")
sleep(2)
print('....')
cls()
print("")
print('Scanning ports...')
sleep(2)
print("...")
sleep(2)
print('....')
#def accessCode():
#code=int(input("Please enter your access code: "))
#while code != "1284":
#print("")
#print('Enter Access Code.') # There are four spaces in front of print.
#code = input()
def entrycode():
#code == 0
cls()
while True:
try: # Note: Python 2.x users should use input, the equivalent of 3.x's input
code = int(input("Please enter your access code: "))
except ValueError:
print("Sorry, I didn't understand that.")
#better try again... Return to the start of the loop
continue
else:
#age was successfully parsed!
#we're ready to exit the loop.
print("Thank you, your code has been accepted.")
return
def KVersMenu():
cls()
print("")
print("StartMenu:")
print("---------")
print("")
print("Episode 1: Lost ")
#print("")
print("Episode 2: Discovery ")
#print("")
print("Episode 3: Assistance ")
#print("")
print("Episode 4: Repairs ")
#print("")
print("Episode 5: Recovery ")
#print("")
print("Episode 6: Stabilization ")
#print("")
print("Episode 7: Resources ")
#print("")
print("Episode 8: Training " )
#print("")
print("Episone 9: Exploration ")
#print("")
print("Episode 10: Contact ")
#print("")
print("Episode 11: ToDo List ")
#print("End Program")
print("To End please enter 12")
#print("Please choose a program")
def KVersChoice():
cls()
print("")
#print("Chooser:")
print("--------")
KVersMenu()
print("")
#choice = 0;
#terminator = "n";
choice = int(input("Please choose a program number from 1 - 10 and then press Enter: "))
# Put input test here!
if choice == 1:
#print("You chose program ",choice)
Kvep1()
elif choice == 2:
#print("You chose program ",choice)
Kvep2()
elif choice == 3:
#print("You chose program ,",+choice)
Kvep3()
elif choice == 4:
#print("You chose program ,",+choice)
Kvep4()
elif choice == 5:
#print("You chose program ,",+choice)
Kvep5();
elif choice == 6:
#print("You chose program ,",+choice)
Kvep6();
elif choice == 7:
#print("You chose program ,",+choice)
Kvep7();
elif choice == 8:
#print("You chose program ,",+choice)
Kvep8();
elif choice == 9:
#print("You chose program ,",+choice)
Kvep9();
elif choice == 10:
#print("You chose program ,",+choice)
Kvep10();
elif choice == 11:
#print("You chose program ,",+choice)
Kvep11();
elif choice== 12:
sys.exit()
else:
cls()
print("")
print("Invalid choice")
sleep(2)
End()
#go = input("Press Enter to continue...")
#Menu();
#GoAgain()
#Chooser()
def instructions():
print("")
cls()
print("")
print('DISCLAIMER: ')
print("")
print('Kendy or it\'s manufacturers and/or programmers ')
print('are and will not be held responsible for any user faults.')
sleep(5)
print('Kendy or it\'s manufacturers and/or programmers')
print('will not be held liable for any lawsuits due to malfunctions of any kind whatsoever!')
sleep(10)
cls()
print("")
print('SAFETY INSTRUCTIONS FOR OPERATING THIS UNIT TO FOLLOW!')
print('-----------------------------------------------------')
print("")
sleep(5)
cls()
print("")
print('Please adhere strictly to the following instructions!')
print('-----------------------------------------------------')
sleep(15)
print("")
print('Eat eggs regularly.')
sleep(5)
print('Eggs must be eaten with Spam.')
sleep(10)
print("")
print('Spam and Eggs must be eaten on toast!')
sleep(10)
print("")
def askForInput():
print("")
newInfo=str
while newInfo != '1':
print("")
print('Please Enter Command.')
newInfo = input(": ")
print("")
print ("",newInfo)
return()
print("")
#def playAgain():
#print("Replace 'playAgain()' with 'Chooser()' from 'kstart03.py'")
#playAgain = ''
#while playAgain == 'yes' or playAgain == 'y':
#print('Do you want to restart the IPS? (yes or no)')
#go = input('Do you want to restart the IPS? (yes or no)')
#if go == "y":
#print("Thanks")
#else:
#print("Bye!")
#sleep(3)
#break
#return()
def End():
cls()
#print("End:")
#print("----")
print("")
print("")
print("Thank you for your patronage!")
sleep(2)
print("")
#input("Press Enter twice to end program : ")
#print("")
cls()
print("")
print("End of Program.")
print("---------------")
sleep(3)
print("... 3")
sleep(2)
print("... 2")
sleep(2)
print("... 1")
sleep(1)
sys.exit()
#return
#print("")
def Continue():
print("")
Continue = input("Do you want to continue? Please enter y or n : ")
# Put input test here
if Continue == "y" or Continue == "Y":
return;
else:
#stop()
sys.exit()
def GoAgain():
cls()
print("")
print("Return to Main Menu!")
goAgain = input("Do you want to continue? Please enter y or n : ")
# Put input test here
if goAgain == "y":
Chooser();
else:
End()
#sys.exit()
def cls():
# It is for MacOS and Linux(here, os.name is 'posix')
if os.name == 'posix':
_ = os.system('clear')
else:
# It is for Windows platfrom
_ = os.system('cls')
def MainEx():
cls()
print("")
#print("")
print("""
This is the Main function!
----------------------------
It looks like this:
Test();
Intro();
ToDoList();
StartMenu();
Chooser();
End();
---
""")
print("")
#go = input(" Press Enter to continue...")
#Test()
Intro()
ToDoList()
StartMenu()
#MainEx()
GoAgain()
Chooser()
#End()
cls()
sleep(2)
#print("")
Chooser()
def ToDoList():
cls()
print("")
print("""
This is the To Do List!
-----------------------
1. Test kendy07-11.py,kstart01 through 3.py - DONE 3/29/2020
2. Start using Adeept to program and test breadboards.
3. Activate tank an test motors.
4. Start building gripper arm.
5. Test lights and sensors
6. Add programs to GitHub. - DONE 3/29/2020
7. Fix and add info to GitHub Pages. DONE 3/29/2020
8. Add this list to GitHub pages.
9. Add Github Pages to Kybot (Tank).
10. Test and improve this program and add versions and headings.
11. Update this list.
12. Add more stuff here.....
""")
print("")
go = input(" Press Enter to continue...")
cls()
#sleep(2)
#print("")
GoAgain()
def Main():
#Test()
#Chooser()
#Intro()
#ToDoList()
#StartMenu()
MainEx()
#GoAgain()
#End()
Main();
|
kurg/KendyVerse
|
kstart19.py
|
Python
|
gpl-3.0
| 96,038
|
[
"BLAST"
] |
0e2a2efb1de759c686efe445f92b8485baa9859c30c88295ee5f61bf012ae2dd
|
#!/usr/bin/env python
'''Tree View/Tree Store
The GtkTreeStore is used to store data in tree form, to be used
later on by a GtkTreeView to display it. This demo builds a simple
GtkTreeStore and displays it. If you're new to the GtkTreeView widgets
and associates, look into the GtkListStore example first.'''
# pygtk version: Maik Hertha <maik.hertha@berlin.de>
import gobject
import gtk
# columns
(
HOLIDAY_NAME_COLUMN,
ALEX_COLUMN,
HAVOC_COLUMN,
TIM_COLUMN,
OWEN_COLUMN,
DAVE_COLUMN,
VISIBLE_COLUMN,
WORLD_COLUMN,
NUM_COLUMNS
) = range(9)
# tree data
january = \
[
["New Years Day", True, True, True, True, False, True ],
["Presidential Inauguration", False, True, False, True, False, False ],
["Martin Luther King Jr. day", False, True, False, True, False, False ]
]
february = \
[
[ "Presidents' Day", False, True, False, True, False, False ],
[ "Groundhog Day", False, False, False, False, False, False ],
[ "Valentine's Day", False, False, False, False, True, True ]
]
march = \
[
[ "National Tree Planting Day", False, False, False, False, False, False ],
[ "St Patrick's Day", False, False, False, False, False, True ]
]
april = \
[
[ "April Fools' Day", False, False, False, False, False, True ],
[ "Army Day", False, False, False, False, False, False ],
[ "Earth Day", False, False, False, False, False, True ],
[ "Administrative Professionals' Day", False, False, False, False, False, False ]
]
may = \
[
[ "Nurses' Day", False, False, False, False, False, False ],
[ "National Day of Prayer", False, False, False, False, False, False ],
[ "Mothers' Day", False, False, False, False, False, True ],
[ "Armed Forces Day", False, False, False, False, False, False ],
[ "Memorial Day", True, True, True, True, False, True ]
]
june = \
[
[ "June Fathers' Day", False, False, False, False, False, True ],
[ "Juneteenth(Liberation of Slaves)", False, False, False, False, False, False ],
[ "Flag Day", False, True, False, True, False, False ]
]
july = \
[
[ "Parents' Day", False, False, False, False, False, True ],
[ "Independence Day", False, True, False, True, False, False ]
]
august = \
[
[ "Air Force Day", False, False, False, False, False, False ],
[ "Coast Guard Day", False, False, False, False, False, False ],
[ "Friendship Day", False, False, False, False, False, False ]
]
september = \
[
[ "Grandparents' Day", False, False, False, False, False, True ],
[ "Citizenship Day or Constitution Day", False, False, False, False, False, False ],
[ "Labor Day", True, True, True, True, False, True ]
]
october = \
[
[ "National Children's Day", False, False, False, False, False, False ],
[ "Bosses' Day", False, False, False, False, False, False ],
[ "Sweetest Day", False, False, False, False, False, False ],
[ "Mother-in-Law's Day", False, False, False, False, False, False ],
[ "Navy Day", False, False, False, False, False, False ],
[ "Columbus Day", False, True, False, True, False, False ],
[ "Halloween", False, False, False, False, False, True ]
]
november = \
[
[ "Marine Corps Day", False, False, False, False, False, False ],
[ "Veterans' Day", True, True, True, True, False, True ],
[ "Thanksgiving", False, True, False, True, False, False ]
]
december = \
[
[ "Pearl Harbor Remembrance Day", False, False, False, False, False, False ],
[ "Christmas", True, True, True, True, False, True ],
[ "Kwanzaa", False, False, False, False, False, False ]
]
toplevel = \
[
["January", False, False, False, False, False, False, january],
["February", False, False, False, False, False, False, february],
["March", False, False, False, False, False, False, march],
["April", False, False, False, False, False, False, april],
["May", False, False, False, False, False, False, may],
["June", False, False, False, False, False, False, june],
["July", False, False, False, False, False, False, july],
["August", False, False, False, False, False, False, august],
["September", False, False, False, False, False, False, september],
["October", False, False, False, False, False, False, october],
["November", False, False, False, False, False, False, november],
["December", False, False, False, False, False, False, december]
]
class TreeStoreDemo(gtk.Window):
def __init__(self, parent=None):
gtk.Window.__init__(self)
try:
self.set_screen(parent.get_screen())
except AttributeError:
self.connect('destroy', lambda *w: gtk.main_quit())
self.set_title(self.__class__.__name__)
self.set_default_size(650, 400)
self.set_border_width(8)
vbox = gtk.VBox(False, 8)
self.add(vbox)
label = gtk.Label("Jonathan's Holiday Card Planning Sheet")
vbox.pack_start(label, False, False)
sw = gtk.ScrolledWindow()
sw.set_shadow_type(gtk.SHADOW_ETCHED_IN)
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
vbox.pack_start(sw)
# create model
model = self.__create_model()
# create treeview
treeview = gtk.TreeView(model)
treeview.set_rules_hint(True)
self.__add_columns(treeview)
sw.add(treeview)
# expand all rows after the treeview widget has been realized
treeview.connect('realize', lambda tv: tv.expand_all())
self.show_all()
def __create_model(self):
# create tree store
model = gtk.TreeStore(
gobject.TYPE_STRING,
gobject.TYPE_BOOLEAN,
gobject.TYPE_BOOLEAN,
gobject.TYPE_BOOLEAN,
gobject.TYPE_BOOLEAN,
gobject.TYPE_BOOLEAN,
gobject.TYPE_BOOLEAN,
gobject.TYPE_BOOLEAN)
# add data to the tree store
for month in toplevel:
iter = model.append(None)
model.set(iter,
HOLIDAY_NAME_COLUMN, month[HOLIDAY_NAME_COLUMN],
ALEX_COLUMN, False,
HAVOC_COLUMN, False,
TIM_COLUMN, False,
OWEN_COLUMN, False,
DAVE_COLUMN, False,
VISIBLE_COLUMN, False,
WORLD_COLUMN, False
)
# add children
for holiday in month[-1]:
child_iter = model.append(iter);
model.set(child_iter,
HOLIDAY_NAME_COLUMN, holiday[HOLIDAY_NAME_COLUMN],
ALEX_COLUMN, holiday[ALEX_COLUMN],
HAVOC_COLUMN, holiday[HAVOC_COLUMN],
TIM_COLUMN, holiday[TIM_COLUMN],
OWEN_COLUMN, holiday[OWEN_COLUMN],
DAVE_COLUMN, holiday[DAVE_COLUMN],
VISIBLE_COLUMN, True,
WORLD_COLUMN, holiday[WORLD_COLUMN-1]
)
return model
def on_item_toggled(self, cell, path_str, model):
# get selected column
column = cell.get_data('column')
# get toggled iter
iter = model.get_iter_from_string(path_str)
toggle_item = model.get_value(iter, column)
# do something with the value
toggle_item = not toggle_item
# set new value
model.set(iter, column, toggle_item)
def __add_columns(self, treeview):
model = treeview.get_model()
# column for holiday names
renderer = gtk.CellRendererText()
renderer.set_property("xalign", 0.0)
#col_offset = gtk.TreeViewColumn("Holiday", renderer, text=HOLIDAY_NAME_COLUMN)
column = gtk.TreeViewColumn("Holiday", renderer, text=HOLIDAY_NAME_COLUMN)
#column = gtk_tree_view_get_column(GTK_TREE_VIEW(treeview), col_offset - 1);
column.set_clickable(True)
treeview.append_column(column)
# alex column */
renderer = gtk.CellRendererToggle()
renderer.set_property("xalign", 0.0)
renderer.set_data("column", ALEX_COLUMN)
renderer.connect("toggled", self.on_item_toggled, model)
column = gtk.TreeViewColumn("Alex", renderer, active=ALEX_COLUMN,
visible=VISIBLE_COLUMN, activatable=WORLD_COLUMN)
# set this column to a fixed sizing(of 50 pixels)
#column = gtk_tree_view_get_column(GTK_TREE_VIEW(treeview), col_offset - 1);
column.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
column.set_fixed_width(50)
column.set_clickable(True)
treeview.append_column(column)
# havoc column
renderer = gtk.CellRendererToggle();
renderer.set_property("xalign", 0.0)
renderer.set_data("column", HAVOC_COLUMN)
renderer.connect("toggled", self.on_item_toggled, model)
column = gtk.TreeViewColumn("Havoc", renderer, active=HAVOC_COLUMN,
visible=VISIBLE_COLUMN)
#column = treeview.get_column(col_offset - 1)
column.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
column.set_fixed_width(50)
column.set_clickable(True)
treeview.append_column(column)
# tim column
renderer = gtk.CellRendererToggle();
renderer.set_property("xalign", 0.0)
renderer.set_data("column", TIM_COLUMN)
renderer.connect("toggled", self.on_item_toggled, model)
column = gtk.TreeViewColumn("Tim", renderer, active=TIM_COLUMN,
visible=VISIBLE_COLUMN, activatable=WORLD_COLUMN)
#column = treeview.get_column(col_offset - 1)
column.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
column.set_fixed_width(50)
column.set_clickable(True)
treeview.append_column(column)
# owen column
renderer = gtk.CellRendererToggle();
renderer.set_property("xalign", 0.0)
renderer.set_data("column", OWEN_COLUMN)
renderer.connect("toggled", self.on_item_toggled, model)
column = gtk.TreeViewColumn("Owen", renderer, active=OWEN_COLUMN,
visible=VISIBLE_COLUMN)
#column = treeview.get_column(col_offset - 1)
column.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
column.set_fixed_width(50)
column.set_clickable(True)
treeview.append_column(column)
# dave column
renderer = gtk.CellRendererToggle();
renderer.set_property("xalign", 0.0)
renderer.set_data("column", DAVE_COLUMN)
renderer.connect("toggled", self.on_item_toggled, model)
column = gtk.TreeViewColumn("Dave", renderer, active=DAVE_COLUMN,
visible=VISIBLE_COLUMN)
#column = treeview.get_column(col_offset - 1)
column.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
column.set_fixed_width(50)
column.set_clickable(True)
treeview.append_column(column)
def main():
TreeStoreDemo()
gtk.main()
if __name__ == '__main__':
main()
|
chriskmanx/qmole
|
QMOLEDEV/pygtk-2.16.0/examples/pygtk-demo/demos/tree_store.py
|
Python
|
gpl-3.0
| 11,036
|
[
"COLUMBUS"
] |
1e15c506b364f8d5dda6bb89a799cc7ccd8715b14e618d925f22f05a4551b024
|
from paraview.simple import *
from paraview import smtesting
smtesting.ProcessCommandLineArguments()
s = Sphere()
c =Cone(Resolution=10)
GroupDatasets(Input=[s,c])
GenerateIds()
r = Show()
r.ColorArrayName = None
SelectCells("Ids > 2")
RenderView1 = Render()
if not smtesting.DoRegressionTesting(RenderView1.SMProxy):
# This will lead to VTK object leaks.
import sys
sys.exit(1)
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/ParaViewCore/ServerManager/Default/Testing/Python/PythonSelection.py
|
Python
|
gpl-3.0
| 388
|
[
"ParaView",
"VTK"
] |
5ada78295dcc5311705b736ab5d7412456a625f9404ba6915ba12865d8e5b22d
|
#!/usr/bin/env python
__author__ = 'Mike McCann'
__copyright__ = '2011'
__license__ = 'GPL v3'
__contact__ = 'mccann at mbari.org'
__doc__ = '''
Loader for all 2011 Dorado missions written for loading Vrijenhoek Lab SubSamples from Julio
Mike McCann
MBARI 15 January 2013
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
import os
import sys
import datetime
parentDir = os.path.join(os.path.dirname(__file__), "../")
sys.path.insert(0, parentDir) # So that CANON is found
from CANON import CANONLoader
cl = CANONLoader('stoqs_dorado2011', 'Dorado - All 2011 missions',
description = 'In Monterey Bay and Santa Monica Basin - includes processed Gulper Samples',
x3dTerrains = {
'http://dods.mbari.org/terrain/x3d/Monterey25_10x/Monterey25_10x_scene.x3d': {
'position': '-2822317.31255 -4438600.53640 3786150.85474',
'orientation': '0.89575 -0.31076 -0.31791 1.63772',
'centerOfRotation': '-2711557.9403829873 -4331414.329506527 3801353.4691465236',
'VerticalExaggeration': '10',
'speed': '.1',
}
},
grdTerrain = os.path.join(parentDir, 'Monterey25.grd')
)
# Dorado surveys in 2011
cl.dorado_base = 'http://dods.mbari.org/opendap/data/auvctd/surveys/2011/netcdf/'
cl.dorado_files = [
'Dorado389_2011_060_01_060_01_decim.nc',
'Dorado389_2011_061_00_061_00_decim.nc',
'Dorado389_2011_062_05_062_05_decim.nc',
'Dorado389_2011_074_02_074_02_decim.nc',
'Dorado389_2011_110_12_110_12_decim.nc',
'Dorado389_2011_111_00_111_00_decim.nc',
'Dorado389_2011_115_10_115_10_decim.nc',
'Dorado389_2011_116_00_116_00_decim.nc',
'Dorado389_2011_117_01_117_01_decim.nc',
'Dorado389_2011_118_00_118_00_decim.nc',
'Dorado389_2011_155_04_155_04_decim.nc',
'Dorado389_2011_157_01_157_01_decim.nc',
'Dorado389_2011_158_00_158_00_decim.nc',
'Dorado389_2011_164_05_164_05_decim.nc',
'Dorado389_2011_165_00_165_00_decim.nc',
'Dorado389_2011_166_00_166_00_decim.nc',
'Dorado389_2011_171_01_171_01_decim.nc',
'Dorado389_2011_172_00_172_00_decim.nc',
'Dorado389_2011_249_00_249_00_decim.nc',
'Dorado389_2011_250_01_250_01_decim.nc',
'Dorado389_2011_255_00_255_00_decim.nc',
'Dorado389_2011_256_02_256_03_decim.nc',
'Dorado389_2011_257_00_257_00_decim.nc',
'Dorado389_2011_262_00_262_00_decim.nc',
'Dorado389_2011_263_00_263_00_decim.nc',
'Dorado389_2011_264_00_264_00_decim.nc',
'Dorado389_2011_285_01_285_01_decim.nc',
'Dorado389_2011_286_00_286_00_decim.nc',
]
cl.dorado_parms = [ 'temperature', 'oxygen', 'nitrate', 'bbp420', 'bbp700',
'fl700_uncorr', 'salinity', 'biolume',
'sepCountList', 'mepCountList' ]
# Mooring M1ts
cl.m1ts_base = 'http://elvis.shore.mbari.org/thredds/dodsC/agg/'
cl.m1ts_files = ['OS_MBARI-M1_R_TS']
cl.m1ts_parms = [ 'PSAL', 'TEMP' ]
cl.m1ts_startDatetime = datetime.datetime(2011, 1, 1)
cl.m1ts_endDatetime = datetime.datetime(2011, 12, 31)
# Mooring M1met
cl.m1met_base = 'http://elvis.shore.mbari.org/thredds/dodsC/agg/'
cl.m1met_files = ['OS_MBARI-M1_R_M']
cl.m1met_parms = [ 'WSPD', 'WDIR', 'ATMP', 'SW', 'RELH' ]
cl.m1met_startDatetime = datetime.datetime(2011, 1, 1)
cl.m1met_endDatetime = datetime.datetime(2011, 12, 31)
# SubSample data files received from Julio in email and copied to local directory
cl.subsample_csv_base = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Dorado2011')
cl.subsample_csv_files = [
'2011_AUVdorado_Samples_Database.csv'
]
# Execute the load
cl.process_command_line()
if cl.args.test:
cl.loadDorado(stride=100)
cl.loadM1ts(stride=10)
cl.loadM1met(stride=10)
cl.loadSubSamples()
elif cl.args.optimal_stride:
cl.loadDorado(stride=2)
cl.loadM1ts(stride=1)
cl.loadM1met(stride=1)
cl.loadSubSamples()
else:
cl.loadDorado(stride=cl.args.stride)
cl.loadM1ts(stride=cl.args.stride)
cl.loadM1met(stride=cl.args.stride)
cl.loadSubSamples()
# Add any X3D Terrain information specified in the constructor to the database - must be done after a load is executed
cl.addTerrainResources()
print "All Done."
|
josephmfaulkner/stoqs
|
stoqs/loaders/MolecularEcology/load_dorado2011.py
|
Python
|
gpl-3.0
| 4,961
|
[
"NetCDF"
] |
6303f1a7d442887508276e980846f089c735bd8456861ba08df1d372cd850499
|
#!/usr/bin/env python
#! -*- coding: utf-8 -*-
""" setup.py - rhaptos2.repo package setup
Author: Paul Brian
(C) 2012 Rice University
This software is subject to the provisions of the GNU Lesser General
Public License Version 2.1 (LGPL). See LICENSE.txt for details.
"""
from setuptools import setup, find_packages
import os, glob
def get_version():
""" return a version number, or error string.
We are assuming a file version.txt always exists. By convention
populate that file with output of git describe
"""
try:
v = open("version.txt").read().strip()
except:
v = "UNABLE_TO_FIND_RELEASE_VERSION_FILE"
return v
setup(
name='rhaptos2.repo',
version=get_version(),
packages=find_packages(),
namespace_packages=['rhaptos2'],
author='See AUTHORS.txt',
author_email='info@cnx.org',
url='https://github.com/Connexions/rhaptos2.repo',
license='LICENSE.txt',
description="New editor / repo / system for cnx.org " \
"-rhaptos2.readthedocs.org",
install_requires=[
"bamboo.setuptools_version",
"fabric >= 1.0.0",
"flask >= 0.9",
"statsd",
"requests",
"pylint",
"Flask-OpenID==1.0.1",
"python-memcached",
"nose",
"rhaptos2.common",
"unittest-xml-reporting",
##"mikado.oss.doctest_additions",
"python-memcached",
],
include_package_data=True,
package_data={'rhaptos2.repo': ['templates/*.*',
'static/*.*',
'tests/*.*'],
},
entry_points = """\
[console_scripts]
rhaptos2_runrepo = rhaptos2.repo.run:main
""",
)
|
philschatz/rhaptos2.repo
|
setup.py
|
Python
|
lgpl-2.1
| 1,739
|
[
"Brian"
] |
80206f2e5cf290a6c60e51f229aca5f57c2bbf415cdeb380ee6c684d6ec45f45
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
useme2psivar = {
# <<< DFT >>>
'DFT.usemeraw': 'DFT FUNCTIONAL TOTAL ENERGY', # for herding. plays well with other uses?
#'-nobas.DFTdX.usemedash': 'DISPERSION CORRECTION ENERGY', # for herding. plays well with other uses?
#'DHDFT.usemeraw': 'DOUBLE-HYBRID CORRECTION ENERGY', # for herding. plays well with other uses? # violation of conventions to get plain dhdft E!
'blyp.usemeraw': 'BLYP FUNCTIONAL TOTAL ENERGY',
'blypd2.usemedash': 'BLYP-D2 DISPERSION CORRECTION ENERGY',
'blypd3.usemedash': 'BLYP-D3 DISPERSION CORRECTION ENERGY',
'blypd3bj.usemedash': 'BLYP-D3(BJ) DISPERSION CORRECTION ENERGY',
'blypd3m.usemedash': 'BLYP-D3M DISPERSION CORRECTION ENERGY',
'blypd3mbj.usemedash': 'BLYP-D3M(BJ) DISPERSION CORRECTION ENERGY',
'b3lyp.usemeraw': 'B3LYP FUNCTIONAL TOTAL ENERGY',
'b3lypd2.usemedash': 'B3LYP-D2 DISPERSION CORRECTION ENERGY',
'b3lypd3.usemedash': 'B3LYP-D3 DISPERSION CORRECTION ENERGY',
'b3lypd3bj.usemedash': 'B3LYP-D3(BJ) DISPERSION CORRECTION ENERGY',
'b3lypxdm.usemedash': 'B3LYP-XDM DISPERSION CORRECTION ENERGY',
'b3lypd3m.usemedash': 'B3LYP-D3M DISPERSION CORRECTION ENERGY',
'b3lypd3mbj.usemedash': 'B3LYP-D3M(BJ) DISPERSION CORRECTION ENERGY',
'b2plyp.usemeraw': 'B2PLYP TOTAL ENERGY', # no psivar for fctl + dh, which would be the more restrictive def
'b2plypd2.usemedash': 'B2PLYP-D2 DISPERSION CORRECTION ENERGY',
'b2plypd3.usemedash': 'B2PLYP-D3 DISPERSION CORRECTION ENERGY',
'b2plypd3bj.usemedash': 'B2PLYP-D3(BJ) DISPERSION CORRECTION ENERGY',
'b2plypd3m.usemedash': 'B2PLYP-D3M DISPERSION CORRECTION ENERGY',
'b2plypd3mbj.usemedash': 'B2PLYP-D3M(BJ) DISPERSION CORRECTION ENERGY',
'b970.usemeraw': 'B970 FUNCTIONAL TOTAL ENERGY',
'b970d2.usemedash': 'B970-D2 DISPERSION CORRECTION ENERGY',
'b97.usemeraw': 'B97 FUNCTIONAL TOTAL ENERGY',
'b97d2.usemedash': 'B97-D2 DISPERSION CORRECTION ENERGY',
'b97d3.usemedash': 'B97-D3 DISPERSION CORRECTION ENERGY',
'b97d3bj.usemedash': 'B97-D3(BJ) DISPERSION CORRECTION ENERGY',
'b97d3m.usemedash': 'B97-D3M DISPERSION CORRECTION ENERGY',
'b97d3mbj.usemedash': 'B97-D3M(BJ) DISPERSION CORRECTION ENERGY',
'bp86.usemeraw': 'BP86 FUNCTIONAL TOTAL ENERGY',
'bp86d2.usemedash': 'BP86-D2 DISPERSION CORRECTION ENERGY',
'bp86d3.usemedash': 'BP86-D3 DISPERSION CORRECTION ENERGY',
'bp86d3bj.usemedash': 'BP86-D3(BJ) DISPERSION CORRECTION ENERGY',
'bp86d3m.usemedash': 'BP86-D3M DISPERSION CORRECTION ENERGY',
'bp86d3mbj.usemedash': 'BP86-D3M(BJ) DISPERSION CORRECTION ENERGY',
'wb97x.usemeraw': 'WB97X FUNCTIONAL TOTAL ENERGY',
'wb97xd.usemeraw': 'WB97X-D TOTAL ENERGY',
'wb97xd.usemedash': 'WB97X-D DISPERSION CORRECTION ENERGY',
'wb97x2.usemeraw': 'WB97X-2 TOTAL ENERGY', # no psivar for fctl + dh, which would be the more restrictive def
'wb97xv.usemeraw': 'WB97X-V TOTAL ENERGY',
'm052x.usemeraw': 'M05-2X FUNCTIONAL TOTAL ENERGY',
'm052xd3.usemedash': 'M05-2X-D3 DISPERSION CORRECTION ENERGY',
'm062x.usemeraw': 'M06-2X FUNCTIONAL TOTAL ENERGY',
'm062xd3.usemedash': 'M06-2X-D3 DISPERSION CORRECTION ENERGY',
'pbe.usemeraw': 'PBE FUNCTIONAL TOTAL ENERGY',
'pbed2.usemedash': 'PBE-D2 DISPERSION CORRECTION ENERGY',
'pbed3.usemedash': 'PBE-D3 DISPERSION CORRECTION ENERGY',
'pbed3bj.usemedash': 'PBE-D3(BJ) DISPERSION CORRECTION ENERGY',
'pbed3m.usemedash': 'PBE-D3M DISPERSION CORRECTION ENERGY',
'pbed3mbj.usemedash': 'PBE-D3M(BJ) DISPERSION CORRECTION ENERGY',
'pbe0.usemeraw': 'PBE0 FUNCTIONAL TOTAL ENERGY',
'pbe0d2.usemedash': 'PBE0-D2 DISPERSION CORRECTION ENERGY',
'pbe0d3.usemedash': 'PBE0-D3 DISPERSION CORRECTION ENERGY',
'pbe0d3bj.usemedash': 'PBE0-D3(BJ) DISPERSION CORRECTION ENERGY',
'pbe0d3m.usemedash': 'PBE0-D3M DISPERSION CORRECTION ENERGY',
'pbe0d3mbj.usemedash': 'PBE0-D3M(BJ) DISPERSION CORRECTION ENERGY',
'wpbe.usemeraw': 'WPBE FUNCTIONAL TOTAL ENERGY',
'wpbed3.usemedash': 'WPBE-D3 DISPERSION CORRECTION ENERGY',
'wpbed3bj.usemedash': 'WPBE-D3(BJ) DISPERSION CORRECTION ENERGY',
'wpbed3m.usemedash': 'WPBE-D3M DISPERSION CORRECTION ENERGY',
'wpbed3mbj.usemedash': 'WPBE-D3M(BJ) DISPERSION CORRECTION ENERGY',
'xyg3.usemeraw': 'XYG3 TOTAL ENERGY', # no psivar for fctl + dh, which would be the more restrictive def
'vv10.usemeraw': 'VV10 FUNCTIONAL TOTAL ENERGY',
'lcvv10.usemeraw': 'LC-VV10 FUNCTIONAL TOTAL ENERGY',
'dsdpbep86.usemeraw': 'DSD-PBEP86 TOTAL ENERGY', # no psivar for fctl + dh, which would be the more restrictive def # also DSD technically implies -D
'dsdpbep86d2.usemedash': 'DSD-PBEP86-D2 DISPERSION CORRECTION ENERGY',
'dsdpbep86d3.usemedash': 'DSD-PBEP86-D3 DISPERSION CORRECTION ENERGY',
'dsdpbep86d3bj.usemedash': 'DSD-PBEP86-D3(BJ) DISPERSION CORRECTION ENERGY',
'm08hx.usemeraw': 'M08-HX FUNCTIONAL TOTAL ENERGY',
'm08so.usemeraw': 'M08-SO FUNCTIONAL TOTAL ENERGY',
'm11.usemeraw': 'M11 FUNCTIONAL TOTAL ENERGY',
'm11l.usemeraw': 'M11L FUNCTIONAL TOTAL ENERGY',
'pbe02.usemeraw': 'PBE0-2 TOTAL ENERGY', # no psivar for fctl + dh, which would be the more restrictive def
'dldf.usemeraw': 'DLDF FUNCTIONAL TOTAL ENERGY',
'dldfd.usemedash': 'DLDF+D DISPERSION CORRECTION ENERGY',
# <<< WFN >>>
#'usemeraw': 'HF TOTAL ENERGY',
'usemeraw': 'SCF TOTAL ENERGY',
'mp2.usemecorl': 'MP2 CORRELATION ENERGY',
'mp3.usemecorl': 'MP3 CORRELATION ENERGY',
'mp4.usemecorl': 'MP4 CORRELATION ENERGY',
'ccsd.usemecorl': 'CCSD CORRELATION ENERGY',
'ccsdt.usemecorl': 'CCSD(T) CORRELATION ENERGY',
'ccsdfullt.usemecorl': 'CCSDT CORRELATION ENERGY',
'ccsdtq.usemecorl': 'CCSDT(Q) CORRELATION ENERGY',
'fno.usemecrct': 'FNO CORRECTION ENERGY',
'fnomp3.usemecorl': 'MP3 FNO CORRELATION ENERGY',
'fnoccsd.usemecorl': 'CCSD FNO CORRELATION ENERGY',
'fnoccsdt.usemecorl': 'CCSD(T) FNO CORRELATION ENERGY',
'ccsdt.usemecrct': '(T) CORRECTION ENERGY',
'ccsdtq.usemecrct': '(Q) CORRECTION ENERGY',
'mp2.usemetrip': 'MP2 SAME-SPIN CORRELATION ENERGY',
'mp3.usemetrip': 'MP3 SAME-SPIN CORRELATION ENERGY',
'ccsd.usemetrip': 'CCSD SAME-SPIN CORRELATION ENERGY',
# <<< F12 >>>
'f12.usemeraw': 'HF-CABS TOTAL ENERGY',
'mp2f12.usemecorl': 'MP2-F12 CORRELATION ENERGY',
'ccsdaf12.usemecorl': 'CCSD-F12A CORRELATION ENERGY',
'ccsdbf12.usemecorl': 'CCSD-F12B CORRELATION ENERGY',
'ccsdcf12.usemecorl': 'CCSD-F12C CORRELATION ENERGY',
'ccsdnstaf12.usemecorl': 'CCSD(T)-F12A CORRELATION ENERGY',
'ccsdstaf12.usemecorl': 'CCSD(T*)-F12A CORRELATION ENERGY',
'ccsdtaf12.usemecorl': 'CCSD(T**)-F12A CORRELATION ENERGY',
'ccsdnstbf12.usemecorl': 'CCSD(T)-F12B CORRELATION ENERGY',
'ccsdstbf12.usemecorl': 'CCSD(T*)-F12B CORRELATION ENERGY',
'ccsdtbf12.usemecorl': 'CCSD(T**)-F12B CORRELATION ENERGY',
'ccsdnstcf12.usemecorl': 'CCSD(T)-F12C CORRELATION ENERGY',
'ccsdstcf12.usemecorl': 'CCSD(T*)-F12C CORRELATION ENERGY',
'ccsdtcf12.usemecorl': 'CCSD(T**)-F12C CORRELATION ENERGY',
'ccsdnstabf12.usemecrct': '(T)-F12AB CORRECTION ENERGY',
'ccsdstabf12.usemecrct': '(T*)-F12AB CORRECTION ENERGY',
'ccsdtabf12.usemecrct': '(T**)-F12AB CORRECTION ENERGY',
'ccsdnstcf12.usemecrct': '(T)-F12C CORRECTION ENERGY',
'ccsdstcf12.usemecrct': '(T*)-F12C CORRECTION ENERGY',
'ccsdtcf12.usemecrct': '(T**)-F12C CORRECTION ENERGY',
'mp2f12.usemetrip': 'MP2-F12 SAME-SPIN CORRELATION ENERGY',
'ccsdaf12.usemetrip': 'CCSD-F12A SAME-SPIN CORRELATION ENERGY',
'ccsdbf12.usemetrip': 'CCSD-F12B SAME-SPIN CORRELATION ENERGY',
'ccsdcf12.usemetrip': 'CCSD-F12C SAME-SPIN CORRELATION ENERGY',
# <<< SAPT >>>
'usemesapt': None,
'usemedftsapt': None,
'usemempsapt': None,
#'usemempsapt': 'MP2C DISP20 ENERGY',
'mp2cDisp20': 'MP2C DISP20 ENERGY',
'E1pol': 'DFT-SAPT ELST10,R ENERGY',
'E1exch': 'DFT-SAPT EXCH10 ENERGY',
'E1exch(S2)': 'DFT-SAPT EXCH10(S^2) ENERGY', # ne'er used
'E2ind': 'DFT-SAPT IND20,R ENERGY',
'E2ind-exch': 'DFT-SAPT EXCH-IND20,R ENERGY',
'E2disp': 'DFT-SAPT DISP20 ENERGY',
'E2disp-exch': 'DFT-SAPT EXCH-DISP20 ENERGY',
'Elst10,r': 'SAPT ELST10,R ENERGY',
'Elst12,r': 'SAPT ELST12,R ENERGY',
'Elst13,r': 'SAPT ELST13,R ENERGY',
'Exch10': 'SAPT EXCH10 ENERGY',
'Exch10(S^2)': 'SAPT EXCH10(S^2) ENERGY',
'Exch11(S^2)': 'SAPT EXCH11(S^2) ENERGY',
'Exch12(S^2)': 'SAPT EXCH12(S^2) ENERGY',
'Ind20,r': 'SAPT IND20,R ENERGY',
'Exch-Ind20,r': 'SAPT EXCH-IND20,R ENERGY',
'Ind22': 'SAPT IND22 ENERGY',
'Exch-Ind22': 'SAPT EXCH-IND22 ENERGY',
'Ind30,r': 'SAPT IND30,R ENERGY',
'Exch-Ind30,r': 'SAPT EXCH-IND30,R ENERGY',
'Ind-Disp30': 'SAPT IND-DISP30 ENERGY',
'Exch-Ind-Disp30': 'SAPT EXCH-IND-DISP30 ENERGY',
'Disp20': 'SAPT DISP20 ENERGY',
'Exch-Disp20': 'SAPT EXCH-DISP20 ENERGY',
#'Disp20(OS)': 'SAPT DISP20(OS) ENERGY',
#'Exch-Disp20(OS)': 'SAPT EXCH-DISP20(OS) ENERGY',
'Disp20(SS)': 'SAPT SAME-SPIN DISP20 ENERGY',
'Exch-Disp20(SS)': 'SAPT SAME-SPIN EXCH-DISP20 ENERGY',
'Disp21': 'SAPT DISP21 ENERGY',
'Disp22(SDQ)': 'SAPT DISP22(SDQ) ENERGY', # added for modern parsing, may confuse old usemesapt parsing
#'Disp22(T)': 'SAPT DISP22(T) ENERGY', # ditto # ne'er used
'Disp22(SDQ).1': 'SAPT DISP22(SDQ) ENERGY',
#'Disp22(T).1': 'SAPT DISP22(T) ENERGY', # ne'er used # edited to remove est
'Est.Disp22(T)': 'SAPT EST.DISP22(T) ENERGY',
'Disp2(CCD)': 'SAPT DISP2(CCD) ENERGY',
'Disp22(S)(CCD)': 'SAPT DISP22(S)(CCD) ENERGY',
#'Disp22(T)(CCD)': 'SAPT DISP22(T)(CCD) ENERGY', # ne'er used
'Est.Disp22(T)(CCD)': 'SAPT EST.DISP22(T)(CCD) ENERGY',
'Disp30': 'SAPT DISP30 ENERGY',
'Exch-Disp30': 'SAPT EXCH-DISP30 ENERGY',
'TotalHF': 'SAPT HF TOTAL ENERGY',
#'deltaHF,r(2)': None, # ne'er used
#'deltaHF,r(3)': None, # ne'er used
}
psivar2useme = dict((v, k) for k, v in useme2psivar.items())
optclue2psivar = {
'full': ['CCSD CORRELATION ENERGY', 'CCSD TOTAL ENERGY',
'CCSD(T) TOTAL ENERGY', 'CCSD(T) CORRELATION ENERGY', '(T) CORRECTION ENERGY',
'CCSDT TOTAL ENERGY', 'CCSDT CORRELATION ENERGY',
'CCSDT(Q) TOTAL ENERGY', 'CCSDT(Q) CORRELATION ENERGY', '(Q) CORRECTION ENERGY'],
'fno1e3': ['CCSD(T) FNO CORRELATION ENERGY', 'CCSD FNO CORRELATION ENERGY', 'MP3 FNO CORRELATION ENERGY', 'FNO CORRECTION ENERGY',
'CCSD CORRELATION ENERGY', 'CCSD TOTAL ENERGY',
'CCSD(T) TOTAL ENERGY', 'CCSD(T) CORRELATION ENERGY', '(T) CORRECTION ENERGY',
'CCSDT TOTAL ENERGY', 'CCSDT CORRELATION ENERGY',
'CCSDT(Q) TOTAL ENERGY', 'CCSDT(Q) CORRELATION ENERGY', '(Q) CORRECTION ENERGY'],
'fno1e4': ['CCSD(T) FNO CORRELATION ENERGY', 'CCSD FNO CORRELATION ENERGY', 'MP3 FNO CORRELATION ENERGY', 'FNO CORRECTION ENERGY',
'CCSD CORRELATION ENERGY', 'CCSD TOTAL ENERGY',
'CCSD(T) TOTAL ENERGY', 'CCSD(T) CORRELATION ENERGY', '(T) CORRECTION ENERGY',
'CCSDT TOTAL ENERGY', 'CCSDT CORRELATION ENERGY',
'CCSDT(Q) TOTAL ENERGY', 'CCSDT(Q) CORRELATION ENERGY', '(Q) CORRECTION ENERGY'],
'fno1e5': ['CCSD(T) FNO CORRELATION ENERGY', 'CCSD FNO CORRELATION ENERGY', 'MP3 FNO CORRELATION ENERGY', 'FNO CORRECTION ENERGY',
'CCSD CORRELATION ENERGY', 'CCSD TOTAL ENERGY',
'CCSD(T) TOTAL ENERGY', 'CCSD(T) CORRELATION ENERGY', '(T) CORRECTION ENERGY',
'CCSDT TOTAL ENERGY', 'CCSDT CORRELATION ENERGY',
'CCSDT(Q) TOTAL ENERGY', 'CCSDT(Q) CORRELATION ENERGY', '(Q) CORRECTION ENERGY'],
'fno5e5': ['CCSD(T) FNO CORRELATION ENERGY', 'CCSD FNO CORRELATION ENERGY', 'MP3 FNO CORRELATION ENERGY', 'FNO CORRECTION ENERGY',
'CCSD CORRELATION ENERGY', 'CCSD TOTAL ENERGY',
'CCSD(T) TOTAL ENERGY', 'CCSD(T) CORRELATION ENERGY', '(T) CORRECTION ENERGY',
'CCSDT TOTAL ENERGY', 'CCSDT CORRELATION ENERGY',
'CCSDT(Q) TOTAL ENERGY', 'CCSDT(Q) CORRELATION ENERGY', '(Q) CORRECTION ENERGY'],
'fno1e6': ['CCSD(T) FNO CORRELATION ENERGY', 'CCSD FNO CORRELATION ENERGY', 'MP3 FNO CORRELATION ENERGY', 'FNO CORRECTION ENERGY',
'CCSD CORRELATION ENERGY', 'CCSD TOTAL ENERGY',
'CCSD(T) TOTAL ENERGY', 'CCSD(T) CORRELATION ENERGY', '(T) CORRECTION ENERGY',
'CCSDT TOTAL ENERGY', 'CCSDT CORRELATION ENERGY',
'CCSDT(Q) TOTAL ENERGY', 'CCSDT(Q) CORRELATION ENERGY', '(Q) CORRECTION ENERGY'],
'dsrgs0p1':['MP2 CORRELATION ENERGY', 'MP2 TOTAL ENERGY', 'MP2 SAME-SPIN CORRELATION ENERGY'],
'dsrgs0p5':['MP2 CORRELATION ENERGY', 'MP2 TOTAL ENERGY', 'MP2 SAME-SPIN CORRELATION ENERGY'],
'dsrgs1p0':['MP2 CORRELATION ENERGY', 'MP2 TOTAL ENERGY', 'MP2 SAME-SPIN CORRELATION ENERGY'],
'mrcc': ['CCSD CORRELATION ENERGY',
'CCSD(T) CORRELATION ENERGY', '(T) CORRECTION ENERGY',
'CCSDT CORRELATION ENERGY',
'CCSDT(Q) CORRELATION ENERGY', '(Q) CORRECTION ENERGY'],
'nfc': ['B2PLYP TOTAL ENERGY', 'B2PLYP-D2 TOTAL ENERGY', 'B2PLYP-D3 TOTAL ENERGY', 'B2PLYP-D3(BJ) TOTAL ENERGY',
'B2PLYP-D3M TOTAL ENERGY', 'B2PLYP-D3M(BJ) TOTAL ENERGY',
'DSD-PBEP86 TOTAL ENERGY', 'DSD-PBEP86-D2 TOTAL ENERGY', 'DSD-PBEP86-D3 TOTAL ENERGY', 'DSD-PBEP86-D3(BJ) TOTAL ENERGY',
'WB97X-2 TOTAL ENERGY'],
'fc': ['B2PLYP TOTAL ENERGY', 'B2PLYP-D2 TOTAL ENERGY', 'B2PLYP-D3 TOTAL ENERGY', 'B2PLYP-D3(BJ) TOTAL ENERGY',
'B2PLYP-D3M TOTAL ENERGY', 'B2PLYP-D3M(BJ) TOTAL ENERGY',
'DSD-PBEP86 TOTAL ENERGY', 'DSD-PBEP86-D2 TOTAL ENERGY', 'DSD-PBEP86-D3 TOTAL ENERGY', 'DSD-PBEP86-D3(BJ) TOTAL ENERGY',
'WB97X-2 TOTAL ENERGY'],
'dfhf': ['HF-CABS TOTAL ENERGY', 'MP2-F12 TOTAL ENERGY', 'SCS-MP2-F12 TOTAL ENERGY', 'SCS(N)-MP2-F12 TOTAL ENERGY',
'SCS(MI)-MP2-F12 TOTAL ENERGY', 'DW-MP2-F12 TOTAL ENERGY', 'MP2C-F12 TOTAL ENERGY',
'SCF TOTAL ENERGY', 'HF TOTAL ENERGY', 'MP2 TOTAL ENERGY', 'SCS-MP2 TOTAL ENERGY', 'SCS(N)-MP2 TOTAL ENERGY',
'SCS(MI)-MP2 TOTAL ENERGY', 'DW-MP2 TOTAL ENERGY', 'MP2C TOTAL ENERGY',
'B3LYP FUNCTIONAL TOTAL ENERGY', 'B3LYP TOTAL ENERGY', 'B3LYP-D2 TOTAL ENERGY', 'B3LYP-D3 TOTAL ENERGY', 'B3LYP-D3(BJ) TOTAL ENERGY', 'B3LYP-XDM TOTAL ENERGY',
'BLYP FUNCTIONAL TOTAL ENERGY', 'BLYP TOTAL ENERGY', 'BLYP-D2 TOTAL ENERGY', 'BLYP-D3 TOTAL ENERGY', 'BLYP-D3(BJ) TOTAL ENERGY',
'BP86 FUNCTIONAL TOTAL ENERGY', 'BP86 TOTAL ENERGY', 'BP86-D2 TOTAL ENERGY', 'BP86-D3 TOTAL ENERGY', 'BP86-D3(BJ) TOTAL ENERGY',
'PBE FUNCTIONAL TOTAL ENERGY', 'PBE TOTAL ENERGY', 'PBE-D2 TOTAL ENERGY', 'PBE-D3 TOTAL ENERGY', 'PBE-D3(BJ) TOTAL ENERGY',
'PBE0 FUNCTIONAL TOTAL ENERGY', 'PBE0 TOTAL ENERGY', 'PBE0-D2 TOTAL ENERGY', 'PBE0-D3 TOTAL ENERGY', 'PBE0-D3(BJ) TOTAL ENERGY',
'B97 FUNCTIONAL TOTAL ENERGY', 'B97 TOTAL ENERGY', 'B97-D2 TOTAL ENERGY', 'B97-D3 TOTAL ENERGY', 'B97-D3(BJ) TOTAL ENERGY',
'B2PLYP TOTAL ENERGY', 'B2PLYP-D2 TOTAL ENERGY', 'B2PLYP-D3 TOTAL ENERGY', 'B2PLYP-D3(BJ) TOTAL ENERGY',
'WPBE FUNCTIONAL TOTAL ENERGY', 'WPBE TOTAL ENERGY', 'WPBE-D3 TOTAL ENERGY', 'WPBE-D3(BJ) TOTAL ENERGY',
'M05-2X FUNCTIONAL TOTAL ENERGY', 'M05-2X TOTAL ENERGY',
'WB97X FUNCTIONAL TOTAL ENERGY', 'WB97X-D TOTAL ENERGY',
'B3LYP-D3M TOTAL ENERGY', 'BLYP-D3M TOTAL ENERGY', 'BP86-D3M TOTAL ENERGY', 'PBE-D3M TOTAL ENERGY',
'PBE0-D3M TOTAL ENERGY', 'B97-D3M TOTAL ENERGY', 'B2PLYP-D3M TOTAL ENERGY', 'WPBE-D3M TOTAL ENERGY',
'B3LYP-D3M(BJ) TOTAL ENERGY', 'BLYP-D3M(BJ) TOTAL ENERGY', 'BP86-D3M(BJ) TOTAL ENERGY', 'PBE-D3M(BJ) TOTAL ENERGY',
'PBE0-D3M(BJ) TOTAL ENERGY', 'B97-D3M(BJ) TOTAL ENERGY', 'B2PLYP-D3M(BJ) TOTAL ENERGY', 'WPBE-D3M(BJ) TOTAL ENERGY',
],
'dfmp': ['MP2-F12 CORRELATION ENERGY', 'MP2-F12 TOTAL ENERGY', 'MP2-F12 SAME-SPIN CORRELATION ENERGY',
'SCS-MP2-F12 CORRELATION ENERGY', 'SCS-MP2-F12 TOTAL ENERGY',
'SCS(N)-MP2-F12 CORRELATION ENERGY', 'SCS(N)-MP2-F12 TOTAL ENERGY',
'SCS(MI)-MP2-F12 CORRELATION ENERGY', 'SCS(MI)-MP2-F12 TOTAL ENERGY',
'DW-MP2-F12 CORRELATION ENERGY', 'DW-MP2-F12 TOTAL ENERGY',
'MP2C-F12 CORRELATION ENERGY', 'MP2C-F12 TOTAL ENERGY',
'MP2 CORRELATION ENERGY', 'MP2 TOTAL ENERGY', 'MP2 SAME-SPIN CORRELATION ENERGY',
'SCS-MP2 CORRELATION ENERGY', 'SCS-MP2 TOTAL ENERGY',
'SCS(N)-MP2 CORRELATION ENERGY', 'SCS(N)-MP2 TOTAL ENERGY',
'SCS(MI)-MP2 CORRELATION ENERGY', 'SCS(MI)-MP2 TOTAL ENERGY',
'DW-MP2 CORRELATION ENERGY', 'DW-MP2 TOTAL ENERGY',
'MP2C CORRELATION ENERGY', 'MP2C TOTAL ENERGY',
'SAPT2+DMP2 TOTAL ENERGY', 'SAPT2+(CCD)DMP2 TOTAL ENERGY',
'SAPT2+(3)DMP2 TOTAL ENERGY', 'SAPT2+(3)(CCD)DMP2 TOTAL ENERGY',
'SAPT2+3DMP2 TOTAL ENERGY', 'SAPT2+3(CCD)DMP2 TOTAL ENERGY',
'B2PLYP TOTAL ENERGY', 'B2PLYP-D2 TOTAL ENERGY', 'B2PLYP-D3 TOTAL ENERGY', 'B2PLYP-D3(BJ) TOTAL ENERGY',
'B2PLYP-D3M TOTAL ENERGY', 'B2PLYP-D3M(BJ) TOTAL ENERGY',
],
}
|
rmcgibbo/psi4public
|
psi4/driver/qcdb/psivarrosetta.py
|
Python
|
lgpl-3.0
| 18,416
|
[
"Psi4"
] |
ad389ba82e4db63a7197cff5d2379f957e247281e5081f256df5facff5eb307b
|
'''
demo9.py
Demo of a network of spiking neurons driven by synaptic inputs.
Written by Sungho Hong, Computational Neuroscience Unit, OIST, 2017
'''
from neuron import h, gui
h.load_file("stdrun.hoc")
h.load_file("CNSutils.hoc")
# The simulation will run for 100 ms.
h.tstop = 200
# Global sampling period will be 0.1 ms -> 10 kHz sampling..
Dt = 0.01
# Variable step controller to speedup simulation
cvode = h.CVode()
cvode.active(1)
def create_cell(seed):
# Creating a cell
soma = h.Section()
soma.diam = 100/h.PI
soma.L = 100
adexp = h.AdExpIF(0.5, sec=soma)
adexp.tauw = 20
# soma.insert("pas")
# soma.g_pas = 1e-4
# Synapses
syns = []
# 400 excitatory synapses
for i in range(400):
syns.append(h.Exp2Syn(0.5, sec=soma))
syns[-1].tau1 = 0.5 # rise time
syns[-1].tau2 = 1.5 # decay time
syns[-1].e = 0 # reversal potential
# 100 inhibitory synapses
for i in range(100):
syns.append(h.Exp2Syn(0.5, sec=soma))
syns[-1].tau1 = 1 # rise time
syns[-1].tau2 = 10 # decay time
syns[-1].e = -75 # reversal potential
# Stimuli = random spike trains
isi = 1000./10 # average interspike interval for 0.01 Hz
stims = []
for i in range(500):
stims.append(h.NetStimFD(0.5))
stims[-1].noise = 1 # random firing
stims[-1].start = 0
stims[-1].duration = h.tstop
stims[-1].interval = isi
stims[0].seed(seed) # Set seed for noise
# Connections
ncs = []
for i in range(400):
ncs.append(h.NetCon(stims[i], syns[i]))
ncs[-1].weight[0] = 0.5e-3
for i in range(100):
ncs.append(h.NetCon(stims[i+400], syns[i+400]))
ncs[-1].weight[0] = 1e-3
# Set up a current clamp to probe the neuron
ic = h.IClamp(0.5, sec=soma)
ic.delay = 50
ic.amp = 0.4
ic.dur = 100
vtemp = h.Vector()
vtemp.record(soma(0.5)._ref_v, Dt)
spiketime = h.Vector()
spike_recorder = h.APCount(0.5, sec=soma)
spike_recorder.thresh = -50
spike_recorder.record(spiketime)
return {'soma': soma, 'syns': syns, 'spike': spiketime, 'v': vtemp, 'external_stims': (stims, ncs, ic), 'others': (adexp, spike_recorder)}
cells = []
Ncells = 20
for i in range(Ncells):
cells.append(create_cell(i))
cell1 = cells[0]
ncs = []
for cell2 in cells[1:]:
# Add a synapse in the second cell
cell2['syns'].append(h.Exp2Syn(0.5, sec=cell2['soma']))
cell2['syns'][-1].tau1 = 0.5 # rise time
cell2['syns'][-1].tau2 = 1.5 # decay time
cell2['syns'][-1].e = -75 # reversal potential
# Connect the first cell to the synapse
ncs.append(h.NetCon(cell1['soma'](0.5)._ref_v, cell2['syns'][-1], sec=cell1['soma']))
ncs[-1].threshold = -50
ncs[-1].weight[0] = 50e-3 # This one is big!
h.v_init = -70
h.init()
h.run()
h.CNSsaveVectors("voltage1.csv", Dt, cells[0]['v'])
h.CNSsaveVectors("voltage2.csv", Dt, cells[1]['v'])
with open('spikes.csv', 'w') as f:
for i in range(Ncells):
for t in cells[i]['spike'].to_python():
f.write('%g,%d\n' % (t, i+1))
|
shhong/a310_cns_2017
|
Practice_4/demo9.py
|
Python
|
gpl-3.0
| 3,151
|
[
"NEURON"
] |
f61bc9d40dfd754b124789d79b35476672bbb6e4ef0a633ef63a9c4fc02e8e0c
|
'''
Source code for an attention based image caption generation system described
in:
Show, Attend and Tell: Neural Image Caption Generation with Visual Attention
International Conference for Machine Learning (2015)
http://arxiv.org/abs/1502.03044
Comments in square brackets [] indicate references to the equations/
more detailed explanations in the above paper.
'''
import theano
import theano.tensor as tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import cPickle as pkl
import numpy
import copy
import os
import time
from collections import OrderedDict
from sklearn.cross_validation import KFold
import warnings
# [see Section (4.3) for explanation]
from homogeneous_data import HomogeneousData
# supported optimizers
from optimizers import adadelta, adam, rmsprop, sgd
# dataset iterators
import flickr8k
import flickr30k
import coco
# datasets: 'name', 'load_data: returns iterator', 'prepare_data: some preprocessing'
datasets = {'flickr8k': (flickr8k.load_data, flickr8k.prepare_data),
'flickr30k': (flickr30k.load_data, flickr30k.prepare_data),
'coco': (coco.load_data, coco.prepare_data)}
def get_dataset(name):
return datasets[name][0], datasets[name][1]
'''
Theano uses shared variables for parameters, so to
make this code more portable, these two functions
push and pull variables between a shared
variable dictionary and a regular numpy
dictionary
'''
# push parameters to Theano shared variables
def zipp(params, tparams):
for kk, vv in params.iteritems():
tparams[kk].set_value(vv)
# pull parameters from Theano shared variables
def unzip(zipped):
new_params = OrderedDict()
for kk, vv in zipped.iteritems():
new_params[kk] = vv.get_value()
return new_params
# get the list of parameters: Note that tparams must be OrderedDict
def itemlist(tparams):
return [vv for kk, vv in tparams.iteritems()]
# dropout in theano
def dropout_layer(state_before, use_noise, trng):
"""
tensor switch is like an if statement that checks the
value of the theano shared variable (use_noise), before
either dropping out the state_before tensor or
computing the appropriate activation. During training/testing
use_noise is toggled on and off.
"""
proj = tensor.switch(use_noise,
state_before *
trng.binomial(state_before.shape, p=0.5, n=1, dtype=state_before.dtype),
state_before * 0.5)
return proj
# make prefix-appended name
def _p(pp, name):
return '%s_%s' % (pp, name)
# initialize Theano shared variables according to the initial parameters
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
# load parameters
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.iteritems():
if kk not in pp:
raise Warning('%s is not in the archive' % kk)
params[kk] = pp[kk]
return params
# some utilities
def ortho_weight(ndim):
"""
Random orthogonal weights
Used by norm_weights(below), in which case, we
are ensuring that the rows are orthogonal
(i.e W = U \Sigma V, U has the same
# of rows, V has the same # of cols)
"""
W = numpy.random.randn(ndim, ndim)
u, _, _ = numpy.linalg.svd(W)
return u.astype('float32')
def norm_weight(nin,nout=None, scale=0.01, ortho=True):
"""
Random weights drawn from a Gaussian
"""
if nout is None:
nout = nin
if nout == nin and ortho:
W = ortho_weight(nin)
else:
W = scale * numpy.random.randn(nin, nout)
return W.astype('float32')
# some useful shorthands
def tanh(x):
return tensor.tanh(x)
def rectifier(x):
return tensor.maximum(0., x)
def linear(x):
return x
"""
Neural network layer definitions.
The life-cycle of each of these layers is as follows
1) The param_init of the layer is called, which creates
the weights of the network.
2) The fprop is called which builds that part of the Theano graph
using the weights created in step 1). This automatically links
these variables to the graph.
Each prefix is used like a key and should be unique
to avoid naming conflicts when building the graph.
"""
# layers: 'name': ('parameter initializer', 'fprop')
layers = {'ff': ('param_init_fflayer', 'fflayer'),
'lstm': ('param_init_lstm', 'lstm_layer'),
'lstm_cond': ('param_init_lstm_cond', 'lstm_cond_layer'),
}
def get_layer(name):
fns = layers[name]
return (eval(fns[0]), eval(fns[1]))
# feedforward layer: affine transformation + point-wise nonlinearity
def param_init_fflayer(options, params, prefix='ff', nin=None, nout=None):
if nin is None:
nin = options['dim_proj']
if nout is None:
nout = options['dim_proj']
params[_p(prefix, 'W')] = norm_weight(nin, nout, scale=0.01)
params[_p(prefix, 'b')] = numpy.zeros((nout,)).astype('float32')
return params
def fflayer(tparams, state_below, options, prefix='rconv', activ='lambda x: tensor.tanh(x)', **kwargs):
return eval(activ)(tensor.dot(state_below, tparams[_p(prefix,'W')])+tparams[_p(prefix,'b')])
# LSTM layer
def param_init_lstm(options, params, prefix='lstm', nin=None, dim=None):
if nin is None:
nin = options['dim_proj']
if dim is None:
dim = options['dim_proj']
"""
Stack the weight matricies for all the gates
for much cleaner code and slightly faster dot-prods
"""
# input weights
W = numpy.concatenate([norm_weight(nin,dim),
norm_weight(nin,dim),
norm_weight(nin,dim),
norm_weight(nin,dim)], axis=1)
params[_p(prefix,'W')] = W
# for the previous hidden activation
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim),
ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[_p(prefix,'U')] = U
params[_p(prefix,'b')] = numpy.zeros((4 * dim,)).astype('float32')
return params
# This function implements the lstm fprop
def lstm_layer(tparams, state_below, options, prefix='lstm', mask=None, **kwargs):
nsteps = state_below.shape[0]
dim = tparams[_p(prefix,'U')].shape[0]
# if we are dealing with a mini-batch
if state_below.ndim == 3:
n_samples = state_below.shape[1]
init_state = tensor.alloc(0., n_samples, dim)
init_memory = tensor.alloc(0., n_samples, dim)
# during sampling
else:
n_samples = 1
init_state = tensor.alloc(0., dim)
init_memory = tensor.alloc(0., dim)
# if we have no mask, we assume all the inputs are valid
if mask == None:
mask = tensor.alloc(1., state_below.shape[0], 1)
# use the slice to calculate all the different gates
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
elif _x.ndim == 2:
return _x[:, n*dim:(n+1)*dim]
return _x[n*dim:(n+1)*dim]
# one time step of the lstm
def _step(m_, x_, h_, c_):
preact = tensor.dot(h_, tparams[_p(prefix, 'U')])
preact += x_
i = tensor.nnet.sigmoid(_slice(preact, 0, dim))
f = tensor.nnet.sigmoid(_slice(preact, 1, dim))
o = tensor.nnet.sigmoid(_slice(preact, 2, dim))
c = tensor.tanh(_slice(preact, 3, dim))
c = f * c_ + i * c
h = o * tensor.tanh(c)
return h, c, i, f, o, preact
state_below = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
rval, updates = theano.scan(_step,
sequences=[mask, state_below],
outputs_info=[init_state, init_memory, None, None, None, None],
name=_p(prefix, '_layers'),
n_steps=nsteps, profile=False)
return rval
# Conditional LSTM layer with Attention
def param_init_lstm_cond(options, params, prefix='lstm_cond', nin=None, dim=None, dimctx=None):
if nin is None:
nin = options['dim']
if dim is None:
dim = options['dim']
if dimctx is None:
dimctx = options['dim']
# input to LSTM, similar to the above, we stack the matricies for compactness, do one
# dot product, and use the slice function below to get the activations for each "gate"
W = numpy.concatenate([norm_weight(nin,dim),
norm_weight(nin,dim),
norm_weight(nin,dim),
norm_weight(nin,dim)], axis=1)
params[_p(prefix,'W')] = W
# LSTM to LSTM
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim),
ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[_p(prefix,'U')] = U
# bias to LSTM
params[_p(prefix,'b')] = numpy.zeros((4 * dim,)).astype('float32')
# context to LSTM
Wc = norm_weight(dimctx,dim*4)
params[_p(prefix,'Wc')] = Wc
# attention: context -> hidden
Wc_att = norm_weight(dimctx, ortho=False)
params[_p(prefix,'Wc_att')] = Wc_att
# attention: LSTM -> hidden
Wd_att = norm_weight(dim,dimctx)
params[_p(prefix,'Wd_att')] = Wd_att
# attention: hidden bias
b_att = numpy.zeros((dimctx,)).astype('float32')
params[_p(prefix,'b_att')] = b_att
# optional "deep" attention
if options['n_layers_att'] > 1:
for lidx in xrange(1, options['n_layers_att']):
params[_p(prefix,'W_att_%d'%lidx)] = ortho_weight(dimctx)
params[_p(prefix,'b_att_%d'%lidx)] = numpy.zeros((dimctx,)).astype('float32')
# attention:
U_att = norm_weight(dimctx,1)
params[_p(prefix,'U_att')] = U_att
c_att = numpy.zeros((1,)).astype('float32')
params[_p(prefix, 'c_tt')] = c_att
if options['selector']:
# attention: selector
W_sel = norm_weight(dim, 1)
params[_p(prefix, 'W_sel')] = W_sel
b_sel = numpy.float32(0.)
params[_p(prefix, 'b_sel')] = b_sel
return params
def lstm_cond_layer(tparams, state_below, options, prefix='lstm',
mask=None, context=None, one_step=False,
init_memory=None, init_state=None,
trng=None, use_noise=None, sampling=True,
argmax=False, **kwargs):
assert context, 'Context must be provided'
if one_step:
assert init_memory, 'previous memory must be provided'
assert init_state, 'previous state must be provided'
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
# mask
if mask is None:
mask = tensor.alloc(1., state_below.shape[0], 1)
# infer lstm dimension
dim = tparams[_p(prefix, 'U')].shape[0]
# initial/previous state
if init_state is None:
init_state = tensor.alloc(0., n_samples, dim)
# initial/previous memory
if init_memory is None:
init_memory = tensor.alloc(0., n_samples, dim)
# projected context
pctx_ = tensor.dot(context, tparams[_p(prefix,'Wc_att')]) + tparams[_p(prefix, 'b_att')]
if options['n_layers_att'] > 1:
for lidx in xrange(1, options['n_layers_att']):
pctx_ = tensor.dot(pctx_, tparams[_p(prefix,'W_att_%d'%lidx)])+tparams[_p(prefix, 'b_att_%d'%lidx)]
# note to self: this used to be options['n_layers_att'] - 1, so no extra non-linearity if n_layers_att < 3
if lidx < options['n_layers_att']:
pctx_ = tanh(pctx_)
# projected x
# state_below is timesteps*num samples by d in training (TODO change to notation of paper)
# this is n * d during sampling
state_below = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
# additional parameters for stochastic hard attention
if options['attn_type'] == 'stochastic':
# temperature for softmax
temperature = options.get("temperature", 1)
# [see (Section 4.1): Stochastic "Hard" Attention]
semi_sampling_p = options.get("semi_sampling_p", 0.5)
temperature_c = theano.shared(numpy.float32(temperature), name='temperature_c')
h_sampling_mask = trng.binomial((1,), p=semi_sampling_p, n=1, dtype=theano.config.floatX).sum()
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
def _step(m_, x_, h_, c_, a_, as_, ct_, pctx_, dp_=None, dp_att_=None):
""" Each variable is one time slice of the LSTM
m_ - (mask), x_- (previous word), h_- (hidden state), c_- (lstm memory),
a_ - (alpha distribution [eq (5)]), as_- (sample from alpha dist), ct_- (context),
pctx_ (projected context), dp_/dp_att_ (dropout masks)
"""
# attention computation
# [described in equations (4), (5), (6) in
# section "3.1.2 Decoder: Long Short Term Memory Network]
pstate_ = tensor.dot(h_, tparams[_p(prefix,'Wd_att')])
pctx_ = pctx_ + pstate_[:,None,:]
pctx_list = []
pctx_list.append(pctx_)
pctx_ = tanh(pctx_)
alpha = tensor.dot(pctx_, tparams[_p(prefix,'U_att')])+tparams[_p(prefix, 'c_tt')]
alpha_pre = alpha
alpha_shp = alpha.shape
if options['attn_type'] == 'deterministic':
alpha = tensor.nnet.softmax(alpha.reshape([alpha_shp[0],alpha_shp[1]])) # softmax
ctx_ = (context * alpha[:,:,None]).sum(1) # current context
alpha_sample = alpha # you can return something else reasonable here to debug
else:
alpha = tensor.nnet.softmax(temperature_c*alpha.reshape([alpha_shp[0],alpha_shp[1]])) # softmax
# TODO return alpha_sample
if sampling:
alpha_sample = h_sampling_mask * trng.multinomial(pvals=alpha,dtype=theano.config.floatX)\
+ (1.-h_sampling_mask) * alpha
else:
if argmax:
alpha_sample = tensor.cast(tensor.eq(tensor.arange(alpha_shp[1])[None,:],
tensor.argmax(alpha,axis=1,keepdims=True)), theano.config.floatX)
else:
alpha_sample = alpha
ctx_ = (context * alpha_sample[:,:,None]).sum(1) # current context
if options['selector']:
sel_ = tensor.nnet.sigmoid(tensor.dot(h_, tparams[_p(prefix, 'W_sel')])+tparams[_p(prefix,'b_sel')])
sel_ = sel_.reshape([sel_.shape[0]])
ctx_ = sel_[:,None] * ctx_
preact = tensor.dot(h_, tparams[_p(prefix, 'U')])
preact += x_
preact += tensor.dot(ctx_, tparams[_p(prefix, 'Wc')])
# Recover the activations to the lstm gates
# [equation (1)]
i = _slice(preact, 0, dim)
f = _slice(preact, 1, dim)
o = _slice(preact, 2, dim)
if options['use_dropout_lstm']:
i = i * _slice(dp_, 0, dim)
f = f * _slice(dp_, 1, dim)
o = o * _slice(dp_, 2, dim)
i = tensor.nnet.sigmoid(i)
f = tensor.nnet.sigmoid(f)
o = tensor.nnet.sigmoid(o)
c = tensor.tanh(_slice(preact, 3, dim))
# compute the new memory/hidden state
# if the mask is 0, just copy the previous state
c = f * c_ + i * c
c = m_[:,None] * c + (1. - m_)[:,None] * c_
h = o * tensor.tanh(c)
h = m_[:,None] * h + (1. - m_)[:,None] * h_
rval = [h, c, alpha, alpha_sample, ctx_]
if options['selector']:
rval += [sel_]
rval += [pstate_, pctx_, i, f, o, preact, alpha_pre]+pctx_list
return rval
if options['use_dropout_lstm']:
if options['selector']:
_step0 = lambda m_, x_, dp_, h_, c_, a_, as_, ct_, sel_, pctx_: \
_step(m_, x_, h_, c_, a_, as_, ct_, pctx_, dp_)
else:
_step0 = lambda m_, x_, dp_, h_, c_, a_, as_, ct_, pctx_: \
_step(m_, x_, h_, c_, a_, as_, ct_, pctx_, dp_)
dp_shape = state_below.shape
if one_step:
dp_mask = tensor.switch(use_noise,
trng.binomial((dp_shape[0], 3*dim),
p=0.5, n=1, dtype=state_below.dtype),
tensor.alloc(0.5, dp_shape[0], 3 * dim))
else:
dp_mask = tensor.switch(use_noise,
trng.binomial((dp_shape[0], dp_shape[1], 3*dim),
p=0.5, n=1, dtype=state_below.dtype),
tensor.alloc(0.5, dp_shape[0], dp_shape[1], 3*dim))
else:
if options['selector']:
_step0 = lambda m_, x_, h_, c_, a_, as_, ct_, sel_, pctx_: _step(m_, x_, h_, c_, a_, as_, ct_, pctx_)
else:
_step0 = lambda m_, x_, h_, c_, a_, as_, ct_, pctx_: _step(m_, x_, h_, c_, a_, as_, ct_, pctx_)
if one_step:
if options['use_dropout_lstm']:
if options['selector']:
rval = _step0(mask, state_below, dp_mask, init_state, init_memory, None, None, None, None, pctx_)
else:
rval = _step0(mask, state_below, dp_mask, init_state, init_memory, None, None, None, pctx_)
else:
if options['selector']:
rval = _step0(mask, state_below, init_state, init_memory, None, None, None, None, pctx_)
else:
rval = _step0(mask, state_below, init_state, init_memory, None, None, None, pctx_)
return rval
else:
seqs = [mask, state_below]
if options['use_dropout_lstm']:
seqs += [dp_mask]
outputs_info = [init_state,
init_memory,
tensor.alloc(0., n_samples, pctx_.shape[1]),
tensor.alloc(0., n_samples, pctx_.shape[1]),
tensor.alloc(0., n_samples, context.shape[2])]
if options['selector']:
outputs_info += [tensor.alloc(0., n_samples)]
outputs_info += [None,
None,
None,
None,
None,
None,
None] + [None] # *options['n_layers_att']
rval, updates = theano.scan(_step0,
sequences=seqs,
outputs_info=outputs_info,
non_sequences=[pctx_],
name=_p(prefix, '_layers'),
n_steps=nsteps, profile=False)
return rval, updates
# parameter initialization
# [roughly in the same order as presented in section 3.1.2]
def init_params(options):
params = OrderedDict()
# embedding: [matrix E in paper]
params['Wemb'] = norm_weight(options['n_words'], options['dim_word'])
ctx_dim = options['ctx_dim']
if options['lstm_encoder']: # potential feature that runs an LSTM over the annotation vectors
# encoder: LSTM
params = get_layer('lstm')[0](options, params, prefix='encoder',
nin=options['ctx_dim'], dim=options['dim'])
params = get_layer('lstm')[0](options, params, prefix='encoder_rev',
nin=options['ctx_dim'], dim=options['dim'])
ctx_dim = options['dim'] * 2
# init_state, init_cell: [top right on page 4]
for lidx in xrange(1, options['n_layers_init']):
params = get_layer('ff')[0](options, params, prefix='ff_init_%d'%lidx, nin=ctx_dim, nout=ctx_dim)
params = get_layer('ff')[0](options, params, prefix='ff_state', nin=ctx_dim, nout=options['dim'])
params = get_layer('ff')[0](options, params, prefix='ff_memory', nin=ctx_dim, nout=options['dim'])
# decoder: LSTM: [equation (1)/(2)/(3)]
params = get_layer('lstm_cond')[0](options, params, prefix='decoder',
nin=options['dim_word'], dim=options['dim'],
dimctx=ctx_dim)
# potentially deep decoder (warning: should work but somewhat untested)
if options['n_layers_lstm'] > 1:
for lidx in xrange(1, options['n_layers_lstm']):
params = get_layer('ff')[0](options, params, prefix='ff_state_%d'%lidx, nin=options['ctx_dim'], nout=options['dim'])
params = get_layer('ff')[0](options, params, prefix='ff_memory_%d'%lidx, nin=options['ctx_dim'], nout=options['dim'])
params = get_layer('lstm_cond')[0](options, params, prefix='decoder_%d'%lidx,
nin=options['dim'], dim=options['dim'],
dimctx=ctx_dim)
# readout: [equation (7)]
params = get_layer('ff')[0](options, params, prefix='ff_logit_lstm', nin=options['dim'], nout=options['dim_word'])
if options['ctx2out']:
params = get_layer('ff')[0](options, params, prefix='ff_logit_ctx', nin=ctx_dim, nout=options['dim_word'])
if options['n_layers_out'] > 1:
for lidx in xrange(1, options['n_layers_out']):
params = get_layer('ff')[0](options, params, prefix='ff_logit_h%d'%lidx, nin=options['dim_word'], nout=options['dim_word'])
params = get_layer('ff')[0](options, params, prefix='ff_logit', nin=options['dim_word'], nout=options['n_words'])
return params
# build a training model
def build_model(tparams, options, sampling=True):
""" Builds the entire computational graph used for training
[This function builds a model described in Section 3.1.2 onwards
as the convolutional feature are precomputed, some extra features
which were not used are also implemented here.]
Parameters
----------
tparams : OrderedDict
maps names of variables to theano shared variables
options : dict
big dictionary with all the settings and hyperparameters
sampling : boolean
[If it is true, when using stochastic attention, follows
the learning rule described in section 4. at the bottom left of
page 5]
Returns
-------
trng: theano random number generator
Used for dropout, stochastic attention, etc
use_noise: theano shared variable
flag that toggles noise on and off
[x, mask, ctx]: theano variables
Represent the captions, binary mask, and annotations
for a single batch (see dimensions below)
alphas: theano variables
Attention weights
alpha_sample: theano variable
Sampled attention weights used in REINFORCE for stochastic
attention: [see the learning rule in eq (12)]
cost: theano variable
negative log likelihood
opt_outs: OrderedDict
extra outputs required depending on configuration in options
"""
trng = RandomStreams(1234)
use_noise = theano.shared(numpy.float32(0.))
# description string: #words x #samples,
x = tensor.matrix('x', dtype='int64')
mask = tensor.matrix('mask', dtype='float32')
# context: #samples x #annotations x dim
ctx = tensor.tensor3('ctx', dtype='float32')
n_timesteps = x.shape[0]
n_samples = x.shape[1]
# index into the word embedding matrix, shift it forward in time
emb = tparams['Wemb'][x.flatten()].reshape([n_timesteps, n_samples, options['dim_word']])
emb_shifted = tensor.zeros_like(emb)
emb_shifted = tensor.set_subtensor(emb_shifted[1:], emb[:-1])
emb = emb_shifted
if options['lstm_encoder']:
# encoder
ctx_fwd = get_layer('lstm')[1](tparams, ctx.dimshuffle(1,0,2),
options, prefix='encoder')[0].dimshuffle(1,0,2)
ctx_rev = get_layer('lstm')[1](tparams, ctx.dimshuffle(1,0,2)[:,::-1,:],
options, prefix='encoder_rev')[0][:,::-1,:].dimshuffle(1,0,2)
ctx0 = tensor.concatenate((ctx_fwd, ctx_rev), axis=2)
else:
ctx0 = ctx
# initial state/cell [top right on page 4]
ctx_mean = ctx0.mean(1)
for lidx in xrange(1, options['n_layers_init']):
ctx_mean = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_init_%d'%lidx, activ='rectifier')
if options['use_dropout']:
ctx_mean = dropout_layer(ctx_mean, use_noise, trng)
init_state = get_layer('ff')[1](tparams, ctx_mean, options, prefix='ff_state', activ='tanh')
init_memory = get_layer('ff')[1](tparams, ctx_mean, options, prefix='ff_memory', activ='tanh')
# lstm decoder
# [equation (1), (2), (3) in section 3.1.2]
attn_updates = []
proj, updates = get_layer('lstm_cond')[1](tparams, emb, options,
prefix='decoder',
mask=mask, context=ctx0,
one_step=False,
init_state=init_state,
init_memory=init_memory,
trng=trng,
use_noise=use_noise,
sampling=sampling)
attn_updates += updates
proj_h = proj[0]
# optional deep attention
if options['n_layers_lstm'] > 1:
for lidx in xrange(1, options['n_layers_lstm']):
init_state = get_layer('ff')[1](tparams, ctx_mean, options, prefix='ff_state_%d'%lidx, activ='tanh')
init_memory = get_layer('ff')[1](tparams, ctx_mean, options, prefix='ff_memory_%d'%lidx, activ='tanh')
proj, updates = get_layer('lstm_cond')[1](tparams, proj_h, options,
prefix='decoder_%d'%lidx,
mask=mask, context=ctx0,
one_step=False,
init_state=init_state,
init_memory=init_memory,
trng=trng,
use_noise=use_noise,
sampling=sampling)
attn_updates += updates
proj_h = proj[0]
alphas = proj[2]
alpha_sample = proj[3]
ctxs = proj[4]
# [beta value explained in note 4.2.1 "doubly stochastic attention"]
if options['selector']:
sels = proj[5]
if options['use_dropout']:
proj_h = dropout_layer(proj_h, use_noise, trng)
# compute word probabilities
# [equation (7)]
logit = get_layer('ff')[1](tparams, proj_h, options, prefix='ff_logit_lstm', activ='linear')
if options['prev2out']:
logit += emb
if options['ctx2out']:
logit += get_layer('ff')[1](tparams, ctxs, options, prefix='ff_logit_ctx', activ='linear')
logit = tanh(logit)
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
if options['n_layers_out'] > 1:
for lidx in xrange(1, options['n_layers_out']):
logit = get_layer('ff')[1](tparams, logit, options, prefix='ff_logit_h%d'%lidx, activ='rectifier')
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
# compute softmax
logit = get_layer('ff')[1](tparams, logit, options, prefix='ff_logit', activ='linear')
logit_shp = logit.shape
probs = tensor.nnet.softmax(logit.reshape([logit_shp[0]*logit_shp[1], logit_shp[2]]))
# Index into the computed probability to give the log likelihood
x_flat = x.flatten()
p_flat = probs.flatten()
cost = -tensor.log(p_flat[tensor.arange(x_flat.shape[0])*probs.shape[1]+x_flat]+1e-8)
cost = cost.reshape([x.shape[0], x.shape[1]])
masked_cost = cost * mask
cost = (masked_cost).sum(0)
# optional outputs
opt_outs = dict()
if options['selector']:
opt_outs['selector'] = sels
if options['attn_type'] == 'stochastic':
opt_outs['masked_cost'] = masked_cost # need this for reinforce later
opt_outs['attn_updates'] = attn_updates # this is to update the rng
return trng, use_noise, [x, mask, ctx], alphas, alpha_sample, cost, opt_outs
# build a sampler
def build_sampler(tparams, options, use_noise, trng, sampling=True):
""" Builds a sampler used for generating from the model
Parameters
----------
See build_model function above
Returns
-------
f_init : theano function
Input: annotation, Output: initial lstm state and memory
(also performs transformation on ctx0 if using lstm_encoder)
f_next: theano function
Takes the previous word/state/memory + ctx0 and runs ne
step through the lstm (used for beam search)
"""
# context: #annotations x dim
ctx = tensor.matrix('ctx_sampler', dtype='float32')
if options['lstm_encoder']:
# encoder
ctx_fwd = get_layer('lstm')[1](tparams, ctx,
options, prefix='encoder')[0]
ctx_rev = get_layer('lstm')[1](tparams, ctx[::-1,:],
options, prefix='encoder_rev')[0][::-1,:]
ctx = tensor.concatenate((ctx_fwd, ctx_rev), axis=1)
# initial state/cell
ctx_mean = ctx.mean(0)
for lidx in xrange(1, options['n_layers_init']):
ctx_mean = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_init_%d'%lidx, activ='rectifier')
if options['use_dropout']:
ctx_mean = dropout_layer(ctx_mean, use_noise, trng)
init_state = [get_layer('ff')[1](tparams, ctx_mean, options, prefix='ff_state', activ='tanh')]
init_memory = [get_layer('ff')[1](tparams, ctx_mean, options, prefix='ff_memory', activ='tanh')]
if options['n_layers_lstm'] > 1:
for lidx in xrange(1, options['n_layers_lstm']):
init_state.append(get_layer('ff')[1](tparams, ctx_mean, options, prefix='ff_state_%d'%lidx, activ='tanh'))
init_memory.append(get_layer('ff')[1](tparams, ctx_mean, options, prefix='ff_memory_%d'%lidx, activ='tanh'))
print 'Building f_init...',
f_init = theano.function([ctx], [ctx]+init_state+init_memory, name='f_init', profile=False)
print 'Done'
# build f_next
ctx = tensor.matrix('ctx_sampler', dtype='float32')
x = tensor.vector('x_sampler', dtype='int64')
init_state = [tensor.matrix('init_state', dtype='float32')]
init_memory = [tensor.matrix('init_memory', dtype='float32')]
if options['n_layers_lstm'] > 1:
for lidx in xrange(1, options['n_layers_lstm']):
init_state.append(tensor.matrix('init_state', dtype='float32'))
init_memory.append(tensor.matrix('init_memory', dtype='float32'))
# for the first word (which is coded with -1), emb should be all zero
emb = tensor.switch(x[:,None] < 0, tensor.alloc(0., 1, tparams['Wemb'].shape[1]),
tparams['Wemb'][x])
proj = get_layer('lstm_cond')[1](tparams, emb, options,
prefix='decoder',
mask=None, context=ctx,
one_step=True,
init_state=init_state[0],
init_memory=init_memory[0],
trng=trng,
use_noise=use_noise,
sampling=sampling)
next_state, next_memory, ctxs = [proj[0]], [proj[1]], [proj[4]]
proj_h = proj[0]
if options['n_layers_lstm'] > 1:
for lidx in xrange(1, options['n_layers_lstm']):
proj = get_layer('lstm_cond')[1](tparams, proj_h, options,
prefix='decoder_%d'%lidx,
context=ctx,
one_step=True,
init_state=init_state[lidx],
init_memory=init_memory[lidx],
trng=trng,
use_noise=use_noise,
sampling=sampling)
next_state.append(proj[0])
next_memory.append(proj[1])
ctxs.append(proj[4])
proj_h = proj[0]
if options['use_dropout']:
proj_h = dropout_layer(proj[0], use_noise, trng)
else:
proj_h = proj[0]
logit = get_layer('ff')[1](tparams, proj_h, options, prefix='ff_logit_lstm', activ='linear')
if options['prev2out']:
logit += emb
if options['ctx2out']:
logit += get_layer('ff')[1](tparams, ctxs[-1], options, prefix='ff_logit_ctx', activ='linear')
logit = tanh(logit)
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
if options['n_layers_out'] > 1:
for lidx in xrange(1, options['n_layers_out']):
logit = get_layer('ff')[1](tparams, logit, options, prefix='ff_logit_h%d'%lidx, activ='rectifier')
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
logit = get_layer('ff')[1](tparams, logit, options, prefix='ff_logit', activ='linear')
logit_shp = logit.shape
next_probs = tensor.nnet.softmax(logit)
next_sample = trng.multinomial(pvals=next_probs).argmax(1)
# next word probability
f_next = theano.function([x, ctx]+init_state+init_memory, [next_probs, next_sample]+next_state+next_memory, name='f_next', profile=False)
return f_init, f_next
# generate sample
def gen_sample(tparams, f_init, f_next, ctx0, options,
trng=None, k=1, maxlen=30, stochastic=False):
"""Generate captions with beam search.
This function uses the beam search algorithm to conditionally
generate candidate captions. Supports beamsearch and stochastic
sampling.
Parameters
----------
tparams : OrderedDict()
dictionary of theano shared variables represented weight
matricies
f_init : theano function
input: annotation, output: initial lstm state and memory
(also performs transformation on ctx0 if using lstm_encoder)
f_next: theano function
takes the previous word/state/memory + ctx0 and runs one
step through the lstm
ctx0 : numpy array
annotation from convnet, of dimension #annotations x # dimension
[e.g (196 x 512)]
options : dict
dictionary of flags and options
trng : random number generator
k : int
size of beam search
maxlen : int
maximum allowed caption size
stochastic : bool
if True, sample stochastically
Returns
-------
sample : list of list
each sublist contains an (encoded) sample from the model
sample_score : numpy array
scores of each sample
"""
if k > 1:
assert not stochastic, 'Beam search does not support stochastic sampling'
sample = []
sample_score = []
if stochastic:
sample_score = 0
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_scores = numpy.zeros(live_k).astype('float32')
hyp_states = []
hyp_memories = []
# only matters if we use lstm encoder
rval = f_init(ctx0)
ctx0 = rval[0]
next_state = []
next_memory = []
# the states are returned as a: (dim,) and this is just a reshape to (1, dim)
for lidx in xrange(options['n_layers_lstm']):
next_state.append(rval[1+lidx])
next_state[-1] = next_state[-1].reshape([1, next_state[-1].shape[0]])
for lidx in xrange(options['n_layers_lstm']):
next_memory.append(rval[1+options['n_layers_lstm']+lidx])
next_memory[-1] = next_memory[-1].reshape([1, next_memory[-1].shape[0]])
# reminder: if next_w = -1, the switch statement
# in build_sampler is triggered -> (empty word embeddings)
next_w = -1 * numpy.ones((1,)).astype('int64')
for ii in xrange(maxlen):
# our "next" state/memory in our previous step is now our "initial" state and memory
rval = f_next(*([next_w, ctx0]+next_state+next_memory))
next_p = rval[0]
next_w = rval[1]
# extract all the states and memories
next_state = []
next_memory = []
for lidx in xrange(options['n_layers_lstm']):
next_state.append(rval[2+lidx])
next_memory.append(rval[2+options['n_layers_lstm']+lidx])
if stochastic:
sample.append(next_w[0]) # if we are using stochastic sampling this easy
sample_score += next_p[0,next_w[0]]
if next_w[0] == 0:
break
else:
cand_scores = hyp_scores[:,None] - numpy.log(next_p)
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k-dead_k)] # (k-dead_k) numpy array of with min nll
voc_size = next_p.shape[1]
# indexing into the correct selected captions
trans_indices = ranks_flat / voc_size
word_indices = ranks_flat % voc_size
costs = cand_flat[ranks_flat] # extract costs from top hypothesis
# a bunch of lists to hold future hypothesis
new_hyp_samples = []
new_hyp_scores = numpy.zeros(k-dead_k).astype('float32')
new_hyp_states = []
for lidx in xrange(options['n_layers_lstm']):
new_hyp_states.append([])
new_hyp_memories = []
for lidx in xrange(options['n_layers_lstm']):
new_hyp_memories.append([])
# get the corresponding hypothesis and append the predicted word
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
new_hyp_samples.append(hyp_samples[ti]+[wi])
new_hyp_scores[idx] = copy.copy(costs[idx]) # copy in the cost of that hypothesis
for lidx in xrange(options['n_layers_lstm']):
new_hyp_states[lidx].append(copy.copy(next_state[lidx][ti]))
for lidx in xrange(options['n_layers_lstm']):
new_hyp_memories[lidx].append(copy.copy(next_memory[lidx][ti]))
# check the finished samples for <eos> character
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_states = []
for lidx in xrange(options['n_layers_lstm']):
hyp_states.append([])
hyp_memories = []
for lidx in xrange(options['n_layers_lstm']):
hyp_memories.append([])
for idx in xrange(len(new_hyp_samples)):
if new_hyp_samples[idx][-1] == 0:
sample.append(new_hyp_samples[idx])
sample_score.append(new_hyp_scores[idx])
dead_k += 1 # completed sample!
else:
new_live_k += 1 # collect collect correct states/memories
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
for lidx in xrange(options['n_layers_lstm']):
hyp_states[lidx].append(new_hyp_states[lidx][idx])
for lidx in xrange(options['n_layers_lstm']):
hyp_memories[lidx].append(new_hyp_memories[lidx][idx])
hyp_scores = numpy.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_w = numpy.array([w[-1] for w in hyp_samples])
next_state = []
for lidx in xrange(options['n_layers_lstm']):
next_state.append(numpy.array(hyp_states[lidx]))
next_memory = []
for lidx in xrange(options['n_layers_lstm']):
next_memory.append(numpy.array(hyp_memories[lidx]))
if not stochastic:
# dump every remaining one
if live_k > 0:
for idx in xrange(live_k):
sample.append(hyp_samples[idx])
sample_score.append(hyp_scores[idx])
return sample, sample_score
def pred_probs(f_log_probs, options, worddict, prepare_data, data, iterator, verbose=False):
""" Get log probabilities of captions
Parameters
----------
f_log_probs : theano function
compute the log probability of a x given the context
options : dict
options dictionary
worddict : dict
maps words to one-hot encodings
prepare_data : function
see corresponding dataset class for details
data : numpy array
output of load_data, see corresponding dataset class
iterator : KFold
indices from scikit-learn KFold
verbose : boolean
if True print progress
Returns
-------
probs : numpy array
array of log probabilities indexed by example
"""
n_samples = len(data[0])
probs = numpy.zeros((n_samples, 1)).astype('float32')
n_done = 0
for _, valid_index in iterator:
x, mask, ctx = prepare_data([data[0][t] for t in valid_index],
data[1],
worddict,
maxlen=None,
n_words=options['n_words'])
pred_probs = f_log_probs(x,mask,ctx)
probs[valid_index] = pred_probs[:,None]
n_done += len(valid_index)
if verbose:
print '%d/%d samples computed'%(n_done,n_samples)
return probs
def validate_options(options):
# Put friendly reminders here
if options['dim_word'] > options['dim']:
warnings.warn('dim_word should only be as large as dim.')
if options['lstm_encoder']:
warnings.warn('Note that this is a 1-D bidirectional LSTM, not 2-D one.')
if options['use_dropout_lstm']:
warnings.warn('dropout in the lstm seems not to help')
# Other checks:
if options['attn_type'] not in ['stochastic', 'deterministic']:
raise ValueError("specified attention type is not correct")
return options
"""Note: all the hyperparameters are stored in a dictionary model_options (or options outside train).
train() then proceeds to do the following:
1. The params are initialized (or reloaded)
2. The computations graph is built symbolically using Theano.
3. A cost is defined, then gradient are obtained automatically with tensor.grad :D
4. With some helper functions, gradient descent + periodic saving/printing proceeds
"""
def train(dim_word=100, # word vector dimensionality
ctx_dim=512, # context vector dimensionality
dim=1000, # the number of LSTM units
attn_type='stochastic', # [see section 4 from paper]
n_layers_att=1, # number of layers used to compute the attention weights
n_layers_out=1, # number of layers used to compute logit
n_layers_lstm=1, # number of lstm layers
n_layers_init=1, # number of layers to initialize LSTM at time 0
lstm_encoder=False, # if True, run bidirectional LSTM on input units
prev2out=False, # Feed previous word into logit
ctx2out=False, # Feed attention weighted ctx into logit
alpha_entropy_c=0.002, # hard attn param
RL_sumCost=True, # hard attn param
semi_sampling_p=0.5, # hard attn param
temperature=1., # hard attn param
patience=10,
max_epochs=5000,
dispFreq=100,
decay_c=0., # weight decay coeff
alpha_c=0., # doubly stochastic coeff
lrate=0.01, # used only for SGD
selector=False, # selector (see paper)
n_words=10000, # vocab size
maxlen=100, # maximum length of the description
optimizer='rmsprop',
batch_size = 16,
valid_batch_size = 16,
saveto='model.npz', # relative path of saved model file
validFreq=1000,
saveFreq=1000, # save the parameters after every saveFreq updates
sampleFreq=100, # generate some samples after every sampleFreq updates
dataset='flickr8k',
dictionary=None, # word dictionary
use_dropout=False, # setting this true turns on dropout at various points
use_dropout_lstm=False, # dropout on lstm gates
reload_=False,
save_per_epoch=False): # this saves down the model every epoch
# hyperparam dict
model_options = locals().copy()
model_options = validate_options(model_options)
# reload options
if reload_ and os.path.exists(saveto):
print "Reloading options"
with open('%s.pkl'%saveto, 'rb') as f:
model_options = pkl.load(f)
print "Using the following parameters:"
print model_options
print 'Loading data'
load_data, prepare_data = get_dataset(dataset)
train, valid, test, worddict = load_data()
# index 0 and 1 always code for the end of sentence and unknown token
word_idict = dict()
for kk, vv in worddict.iteritems():
word_idict[vv] = kk
word_idict[0] = '<eos>'
word_idict[1] = 'UNK'
# Initialize (or reload) the parameters using 'model_options'
# then build the Theano graph
print 'Building model'
params = init_params(model_options)
if reload_ and os.path.exists(saveto):
print "Reloading model"
params = load_params(saveto, params)
# numpy arrays -> theano shared variables
tparams = init_tparams(params)
# In order, we get:
# 1) trng - theano random number generator
# 2) use_noise - flag that turns on dropout
# 3) inps - inputs for f_grad_shared
# 4) cost - log likelihood for each sentence
# 5) opts_out - optional outputs (e.g selector)
trng, use_noise, \
inps, alphas, alphas_sample,\
cost, \
opt_outs = \
build_model(tparams, model_options)
# To sample, we use beam search: 1) f_init is a function that initializes
# the LSTM at time 0 [see top right of page 4], 2) f_next returns the distribution over
# words and also the new "initial state/memory" see equation
print 'Buliding sampler'
f_init, f_next = build_sampler(tparams, model_options, use_noise, trng)
# we want the cost without any the regularizers
f_log_probs = theano.function(inps, -cost, profile=False,
updates=opt_outs['attn_updates']
if model_options['attn_type']=='stochastic'
else None)
cost = cost.mean()
# add L2 regularization costs
if decay_c > 0.:
decay_c = theano.shared(numpy.float32(decay_c), name='decay_c')
weight_decay = 0.
for kk, vv in tparams.iteritems():
weight_decay += (vv ** 2).sum()
weight_decay *= decay_c
cost += weight_decay
# Doubly stochastic regularization
if alpha_c > 0.:
alpha_c = theano.shared(numpy.float32(alpha_c), name='alpha_c')
alpha_reg = alpha_c * ((1.-alphas.sum(0))**2).sum(0).mean()
cost += alpha_reg
hard_attn_updates = []
# Backprop!
if model_options['attn_type'] == 'deterministic':
grads = tensor.grad(cost, wrt=itemlist(tparams))
else:
# shared variables for hard attention
baseline_time = theano.shared(numpy.float32(0.), name='baseline_time')
opt_outs['baseline_time'] = baseline_time
alpha_entropy_c = theano.shared(numpy.float32(alpha_entropy_c), name='alpha_entropy_c')
alpha_entropy_reg = alpha_entropy_c * (alphas*tensor.log(alphas)).mean()
# [see Section 4.1: Stochastic "Hard" Attention for derivation of this learning rule]
if model_options['RL_sumCost']:
grads = tensor.grad(cost, wrt=itemlist(tparams),
disconnected_inputs='raise',
known_grads={alphas:(baseline_time-opt_outs['masked_cost'].mean(0))[None,:,None]/10.*
(-alphas_sample/alphas) + alpha_entropy_c*(tensor.log(alphas) + 1)})
else:
grads = tensor.grad(cost, wrt=itemlist(tparams),
disconnected_inputs='raise',
known_grads={alphas:opt_outs['masked_cost'][:,:,None]/10.*
(alphas_sample/alphas) + alpha_entropy_c*(tensor.log(alphas) + 1)})
# [equation on bottom left of page 5]
hard_attn_updates += [(baseline_time, baseline_time * 0.9 + 0.1 * opt_outs['masked_cost'].mean())]
# updates from scan
hard_attn_updates += opt_outs['attn_updates']
# to getthe cost after regularization or the gradients, use this
# f_cost = theano.function([x, mask, ctx], cost, profile=False)
# f_grad = theano.function([x, mask, ctx], grads, profile=False)
# f_grad_shared computes the cost and updates adaptive learning rate variables
# f_update updates the weights of the model
lr = tensor.scalar(name='lr')
f_grad_shared, f_update = eval(optimizer)(lr, tparams, grads, inps, cost, hard_attn_updates)
print 'Optimization'
# [See note in section 4.3 of paper]
train_iter = HomogeneousData(train, batch_size=batch_size, maxlen=maxlen)
if valid:
kf_valid = KFold(len(valid[0]), n_folds=len(valid[0])/valid_batch_size, shuffle=False)
if test:
kf_test = KFold(len(test[0]), n_folds=len(test[0])/valid_batch_size, shuffle=False)
# history_errs is a bare-bones training log that holds the validation and test error
history_errs = []
# reload history
if reload_ and os.path.exists(saveto):
history_errs = numpy.load(saveto)['history_errs'].tolist()
best_p = None
bad_counter = 0
if validFreq == -1:
validFreq = len(train[0])/batch_size
if saveFreq == -1:
saveFreq = len(train[0])/batch_size
if sampleFreq == -1:
sampleFreq = len(train[0])/batch_size
uidx = 0
estop = False
for eidx in xrange(max_epochs):
n_samples = 0
print 'Epoch ', eidx
for caps in train_iter:
n_samples += len(caps)
uidx += 1
# turn on dropout
use_noise.set_value(1.)
# preprocess the caption, recording the
# time spent to help detect bottlenecks
pd_start = time.time()
x, mask, ctx = prepare_data(caps,
train[1],
worddict,
maxlen=maxlen,
n_words=n_words)
pd_duration = time.time() - pd_start
if x is None:
print 'Minibatch with zero sample under length ', maxlen
continue
# get the cost for the minibatch, and update the weights
ud_start = time.time()
cost = f_grad_shared(x, mask, ctx)
f_update(lrate)
ud_duration = time.time() - ud_start # some monitoring for each mini-batch
# Numerical stability check
if numpy.isnan(cost) or numpy.isinf(cost):
print 'NaN detected'
return 1., 1., 1.
if numpy.mod(uidx, dispFreq) == 0:
print 'Epoch ', eidx, 'Update ', uidx, 'Cost ', cost, 'PD ', pd_duration, 'UD ', ud_duration
# Checkpoint
if numpy.mod(uidx, saveFreq) == 0:
print 'Saving...',
if best_p is not None:
params = copy.copy(best_p)
else:
params = unzip(tparams)
numpy.savez(saveto, history_errs=history_errs, **params)
pkl.dump(model_options, open('%s.pkl'%saveto, 'wb'))
print 'Done'
# Print a generated sample as a sanity check
if numpy.mod(uidx, sampleFreq) == 0:
# turn off dropout first
use_noise.set_value(0.)
x_s = x
mask_s = mask
ctx_s = ctx
# generate and decode the a subset of the current training batch
for jj in xrange(numpy.minimum(10, len(caps))):
sample, score = gen_sample(tparams, f_init, f_next, ctx_s[jj], model_options,
trng=trng, k=5, maxlen=30, stochastic=False)
# Decode the sample from encoding back to words
print 'Truth ',jj,': ',
for vv in x_s[:,jj]:
if vv == 0:
break
if vv in word_idict:
print word_idict[vv],
else:
print 'UNK',
print
for kk, ss in enumerate([sample[0]]):
print 'Sample (', kk,') ', jj, ': ',
for vv in ss:
if vv == 0:
break
if vv in word_idict:
print word_idict[vv],
else:
print 'UNK',
print
# Log validation loss + checkpoint the model with the best validation log likelihood
if numpy.mod(uidx, validFreq) == 0:
use_noise.set_value(0.)
train_err = 0
valid_err = 0
test_err = 0
if valid:
valid_err = -pred_probs(f_log_probs, model_options, worddict, prepare_data, valid, kf_valid).mean()
if test:
test_err = -pred_probs(f_log_probs, model_options, worddict, prepare_data, test, kf_test).mean()
history_errs.append([valid_err, test_err])
# the model with the best validation long likelihood is saved seperately with a different name
if uidx == 0 or valid_err <= numpy.array(history_errs)[:,0].min():
best_p = unzip(tparams)
print 'Saving model with best validation ll'
params = copy.copy(best_p)
params = unzip(tparams)
numpy.savez(saveto+'_bestll', history_errs=history_errs, **params)
bad_counter = 0
# abort training if perplexity has been increasing for too long
if eidx > patience and len(history_errs) > patience and valid_err >= numpy.array(history_errs)[:-patience,0].min():
bad_counter += 1
if bad_counter > patience:
print 'Early Stop!'
estop = True
break
print 'Train ', train_err, 'Valid ', valid_err, 'Test ', test_err
print 'Seen %d samples' % n_samples
if estop:
break
if save_per_epoch:
numpy.savez(saveto + '_epoch_' + str(eidx + 1), history_errs=history_errs, **unzip(tparams))
# use the best nll parameters for final checkpoint (if they exist)
if best_p is not None:
zipp(best_p, tparams)
use_noise.set_value(0.)
train_err = 0
valid_err = 0
test_err = 0
if valid:
valid_err = -pred_probs(f_log_probs, model_options, worddict, prepare_data, valid, kf_valid)
if test:
test_err = -pred_probs(f_log_probs, model_options, worddict, prepare_data, test, kf_test)
print 'Train ', train_err, 'Valid ', valid_err, 'Test ', test_err
params = copy.copy(best_p)
numpy.savez(saveto, zipped_params=best_p, train_err=train_err,
valid_err=valid_err, test_err=test_err, history_errs=history_errs,
**params)
return train_err, valid_err, test_err
if __name__ == '__main__':
pass
|
lakehanne/ensenso
|
ensenso_detect/manikin/utils/capgen.py
|
Python
|
mit
| 56,722
|
[
"Gaussian"
] |
fffaa8a5727ec3ed4329c120b6b28b9c2e65ecbd37667a1dcf17b6261c51acb8
|
#!/usr/bin/env python
"""
Integrate a surface field
"""
from __future__ import print_function
import argparse
import time
import os
import re
import sys
from icqsol.shapes.icqShapeManager import ShapeManager
from icqsol import util
# time stamp
tid = re.sub(r'\.', '', str(time.time()))
parser = argparse.ArgumentParser(description='Color surface field')
parser.add_argument('--input', dest='input', default='',
help='VTK input file')
parser.add_argument('--name', dest='name', default='',
help='Set the name of the field')
parser.add_argument('--component', dest='component', type=int, default=0,
help='Set the component of the field')
parser.add_argument('--ascii', dest='ascii', action='store_true',
help='Save data in ASCII format (default is binary)')
args = parser.parse_args()
if not args.input:
print('ERROR: must specify input file: --input <file>')
sys.exit(3)
if not os.path.exists(args.input):
print('ERROR: file {0} does not exist'.format(args.input))
sys.exit(2)
file_format = util.getFileFormat(args.input)
if file_format != util.VTK_FORMAT:
print('ERROR: file {0} must be VTK format'.format(args.input))
sys.exit(2)
vtk_dataset_type = util.getVtkDatasetType(args.input)
if vtk_dataset_type not in util.VTK_DATASET_TYPES:
print('ERROR: invalid VTK dataset type {0}'.format(vtk_dataset_type))
sys.exit(2)
shape_mgr = ShapeManager(file_format=util.VTK_FORMAT, vtk_dataset_type=vtk_dataset_type)
pDataInput = shape_mgr.loadAsVtkPolyData(args.input)
integral = shape_mgr.integrateSurfaceField(pDataInput,
field_name=args.name,
field_component=args.component)
print('integral = {0}'.format(integral))
|
gregvonkuster/icqsol
|
examples/integrateSurfaceField.py
|
Python
|
mit
| 1,827
|
[
"VTK"
] |
8529116983e759e249711bc9f7151c3cff269b1fb3e7da9113af1e07553d5e39
|
import cv2
import matplotlib.pyplot as plt
import numpy as np
import UltraLibrary as ul
from FrameType import f_type
def hist_despeckle(img,goodImg):
dsimg, homog = quickieHomo(img)
return ul.filtered_match(img,dsimg,goodImg)
def detection(im):
detector = cv2.SimpleBlobDetector_create()
# Detect blobs.
keypoints = detector.detect(im)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(im, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Show keypoints
cv2.imshow("Keypoints", im_with_keypoints)
def hist_first(original, goodImg,*args, **kwargs):
img = ul.global_histogram(original,goodImg)
systolic = despeckle_thresh(original.astype(np.uint8), *args, **kwargs)
return systolic
def despeckle_thresh(img, countarray,diffarray,index, systracker):
dsimg, homog = quickHomog(img)
homog2 = homog[80:200,200:400]
#detection(homog)
#thresholding(dsimg,homog2)
#ret, threshUF = cv2.threshold(img,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
#cv2.imshow('Unfiltered', threshUF)
#kernel = np.ones((3,3),np.uint8)
#opening= cv2.morphologyEx(homog2,cv2.MORPH_OPEN,kernel, iterations = 1)
edges = cv2.Canny(homog2, threshold1 = 50, threshold2 = 100)
cv2.imshow('edges', edges)
#cv2.imshow('dialtion', opening)
countarray[index[0]] = np.sum(edges)/255
if index[0] > 5:
diffarray[index[0]] = np.mean(countarray[index[0]-5:index[0]])
else:
diffarray[index[0]] = np.mean(countarray[:index[0]])
systolic = countarray[index[0]] < diffarray[index[0]]
if systolic != systracker[0]:
if index[0] - systracker[1] <= 10:
systolic = not systolic
else:
systracker[0] = systolic
systracker[1] = index[0]
#print(systolic, countarray[index[0]], diffarray[index[0]])
image =np.zeros((120,200,3), np.uint8)
if systolic:
image[:,:,1] = 255
else:
image[:,:,2] = 255
index[0] = index[0] + 1
cv2.imshow('systolic',image)
#plt.cla()
#plt.plot(counts[:index[0]-1])
#plt.title(video)
#plt.pause(0.001)
return systolic
def thresholding(img,other):
# otsu thresh
ret, thresh = cv2.threshold(img,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
#thresh = cv2.adaptiveThreshold(img, 1, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 55, -3)
cv2.imshow('threshold',thresh)
thresh = other
# noise removal
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 5)
#closing = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel, iterations = 5)
#cv2.imshow('opening',opening)
# sure background area
#sure_bg = cv2.dilate(opening,kernel,iterations=3)
# Finding sure foreground area
#dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)
#ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
# Finding unknown region
#sure_fg = np.uint8(sure_fg)
#unknown = cv2.subtract(sure_bg,sure_fg)
#cv2.imshow('bg',sure_bg)
#cv2.imshow('fg',sure_fg)
# cv2.imshow('unknown',unknown)
#cv2.imshow('dist',dist_transform)
return thresh
def quickHomog(img):
hScaler = 2
wSize = 7
hMat = np.zeros(img.shape)
mean = cv2.blur(img.astype(np.float64),(wSize,wSize))
#cv2.imshow('mean', mean)
moment2 = cv2.blur(np.multiply(img,img).astype(np.float64), (wSize,wSize))
dev = moment2-np.multiply(mean,mean)
median = cv2.medianBlur(img,wSize)
gaussian = cv2.GaussianBlur(img,(wSize,wSize),sigmaX=1)
mean_mean = np.mean(mean)
mean_dev = np.mean(dev)
#
hthresh = np.ones(img.shape)*mean_dev**2/mean_mean/hScaler
hVal = np.divide( np.multiply(dev,dev), mean)
hMat = np.less_equal(hVal,hthresh)
zeromean = np.less(mean,3)
#hMat = np.multiply(hMat,np.logical_not(zeromean))
hMat = np.logical_or(hMat,zeromean)
gaussians = np.multiply(hMat,gaussian)
medians = np.multiply(np.logical_not(hMat),median)
newimg = gaussians+medians
cv2.imshow('homogeny',hMat.astype(np.uint8)*255)
return newimg, hMat.astype(np.uint8)*255
def despeckle(img):
winSize = 7 #nxn window for filtering
halfWin = winSize/2
#typical h for homogeneous region is 1.6 at window size 7x7
# can play with this value to determine optimal threshold
highThresh = 1
sigmaX = 1
sigmaY = 1
pad = halfWin + 1 #how many pixels to pad the image
hMat = np.zeros(img.shape)
img = cv2.copyMakeBorder(img,pad,pad,pad,pad,cv2.BORDER_REFLECT)
size = img.shape
newimg = np.zeros(size)
hMat = np.zeros(size)
#generating gaussian kernel
kernelX = cv2.getGaussianKernel(winSize, sigmaX)
kernelY = cv2.getGaussianKernel(winSize, sigmaY)
Gaussian = np.matmul(kernelX, np.transpose(kernelY))
#loop through all original pixels
for i in range(pad+1,size[0]-pad+1):
for j in range(pad+1,size[1]-pad+1):
W = img[i-halfWin:i+halfWin+1,j-halfWin:j+halfWin+1]
mean = np.mean(W)
vari = np.var(W)
if mean == 0:
h = 0
else:
h = vari/mean
if h> highThresh:
# newimg[i,j] = np.median(W)
pass
else:
#newimg[i,j] = np.sum(np.multiply(Gaussian,W))
hMat[i,j] = 1
#print(i,j, newimg[i,j])
newimg = newimg.astype(np.uint8)
newimg = newimg[pad+1:size[0]-pad+1, pad+1:size[1]-pad+1]
cv2.imshow('despeckled', newimg)
cv2.imshow('speckled', img)
#plt.imshow(newimg, cmap='gray')
#plt.xticks([])
#plt.yticks([])
#plt.show()
cv2.imshow('homogeny', hMat)
#gimg = cv2.GaussianBlur(img, (7,7),sigmaX = 1);
#cv2.imshow('gaussoan', gimg)
#mimg = cv2.medianBlur(img, 7);
#cv2.imshow('median', mimg)
cv2.waitKey(33)
return newimg.astype(np.uint8),hMat
if __name__ == "__main__":
goodImg = cv2.imread('GoodImages\\3-A.png')
vids = ['Videos/1-A.mp4', 'Videos/1-B.mp4', 'Videos/2-A.mp4', 'Videos/2-B.mp4',
'Videos/3-A.mp4', 'Videos/3-B.mp4', 'Videos/4-A.mp4', 'Videos/4-B.mp4',
'Videos/5-A.mp4', 'Videos/5-B.mp4', 'Videos/Varying.mp4']
goodImg = cv2.imread('GoodImages\\3-A.PNG',0)
vids =['Videos/Varying.mp4']
for video in vids:
cap = cv2.VideoCapture(video)
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
cap.release()
counts = np.zeros(length)
diffs = np.zeros(length)
index = [0]
#plt.ion()
systracker = np.zeros(2)
ul.runVideo(video, hist_first, goodImg, counts,diffs,index, systracker)
line_c, = plt.plot(counts, label = 'Pixel Counts')
mean = diffs
dev = np.var(counts)**0.5
hthresh = mean+dev/2
lthresh = mean-dev/2
line_m, = plt.plot(mean, label = 'Systoles Threshold')
systolic = np.less(counts,mean).astype(np.uint8)
systolic = systolic*(counts.max()-counts.min()) + counts.min()
#plt.plot(systolic)
#plt.plot(hthresh)
#plt.plot(lthresh)
#plt.plot(diffs)
plt.title('White Pixel Counts with Prior Histogram Equalization')
plt.xlabel('Frame #')
plt.ylabel('Number of White Pixels')
plt.legend(handles = [line_c, line_m])
plt.show()
"""
# otsu thresh
#img = cv2.imread('GoodImages\\5-A.png')
#img = ul.stripFrame(img)
img = cv2.imread('Segmentation\\despeckled_3-A_h1.7.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
cv2.imshow('threshold',thresh)
cv2.waitKey(0)
# noise removal
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)
# sure background area
sure_bg = cv2.dilate(opening,kernel,iterations=3)
# Finding sure foreground area
dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)
ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg,sure_fg)
cv2.imshow('bg',sure_bg)
cv2.imshow('fg',sure_fg)
cv2.imshow('unknown',unknown)
cv2.imshow('dist',dist_transform)
cv2.waitKey(0)
"""
"""
#find and save despeckled image
speckImg = cv2.imread('GoodImages\\5-A.PNG',0)
speckImg = ul.stripFrame(speckImg)
despeckImg, hmat= quickieHomo(speckImg)
hmat = hmat*255
cv2.imshow('homogeny', hmat)
cv2.imshow('orig', speckImg)
cv2.imshow('despeck', despeckImg)
cv2.waitKey(0)
#cv2.imwrite('Segmentation\\despeckled_3-A_h1.png', despeckImg)
"""
"""
cap = cv2.VideoCapture('Videos\\4-A.mp4')
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out1 = cv2.VideoWriter('Videos\\despeckle_4-A.avi',fourcc, 5, (640,480))
out2 = cv2.VideoWriter('Videos\\homo_4-A.avi',fourcc, 5, (640,480))
ret = True
i = 1
while i <= 5:
ret,frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
print(i)
i+=1
if ret == True:
frame = ul.stripFrame(frame)
newframe,hFrame = despeckle(frame)
out1.write(newframe)
out2.write(newframe)
else:
break
cap.release()
out2.release()
out1.release()
cv2.destroyAllWindows()
"""
"""
h0 =0
w0 = 0
goodImg = ul.stripFrame(cv2.imread('GoodImages\\3-A.png',0))
img = ul.stripFrame(cv2.imread('GoodImages\\5-A.png',0))
im2 = ul.stripFrame(cv2.imread('GoodImages\\5-A.png',0))
size = img.shape
img = cv2.GaussianBlur(img, (3,3),sigmaX = 1);
cv2.imshow('median', img)
cv2.waitKey(0)
img = ul.global_histogram(img,goodImg)
#img = cv2.copyMakeBorder(img,50,50,50,50,cv2.BORDER_REFLECT)
size = img.shape
cv2.imshow('hist_median', img)
cv2.waitKey(0)
im2 = ul.global_histogram(im2,goodImg)
cv2.imshow('no_median', im2)
cv2.waitKey(0)
"""
"""
for i in range(51,size[0]-50):
for j in range(51,size[1]-50):
homog = False
rSize = 11
while not homog:
W = img[i-rSize/2:i+rSize/2+1,j-rSize/2:j+rSize/2+1]
mean = np.sum(W)
var = np.var(W)
hij =var*var/mean
if hij < h0:
homog = True
"""
|
KristopherJH/ENGI7854US
|
Despeckle.py
|
Python
|
gpl-3.0
| 10,592
|
[
"Gaussian"
] |
570e853f38dc10ff67ecf72a8d98812a80c19dcddd4eb32f7c607e806603d184
|
#!/usr/bin/env python
""" MultiQC module to parse output from HTSeq Count """
from __future__ import print_function
from collections import OrderedDict
import logging
from multiqc import config
from multiqc.plots import bargraph
from multiqc.modules.base_module import BaseMultiqcModule
# Initialise the logger
log = logging.getLogger(__name__)
class MultiqcModule(BaseMultiqcModule):
def __init__(self):
# Initialise the parent object
super(MultiqcModule, self).__init__(name='HTSeq Count',
anchor='htseq', target='HTSeq Count',
href='http://www-huber.embl.de/HTSeq/doc/count.html',
info=" is part of the HTSeq Python package - it takes a file with aligned sequencing "\
"reads, plus a list of genomic features and counts how many reads map to each feature.")
# Find and load any HTSeq Count reports
self.htseq_data = dict()
self.htseq_keys = list()
for f in self.find_log_files(config.sp['htseq'], filehandles=True):
parsed_data = self.parse_htseq_report(f)
if parsed_data is not None:
self.htseq_data[f['s_name']] = parsed_data
if len(self.htseq_data) == 0:
log.debug("Could not find any reports in {}".format(config.analysis_dir))
raise UserWarning
log.info("Found {} reports".format(len(self.htseq_data)))
# Write parsed report data to a file
self.write_data_file(self.htseq_data, 'multiqc_htseq')
# Basic Stats Table
self.htseq_stats_table()
# Assignment bar plot
# Only one section, so add to the intro
self.intro += self.htseq_counts_chart()
def parse_htseq_report (self, f):
""" Parse the HTSeq Count log file. """
keys = [ '__no_feature', '__ambiguous', '__too_low_aQual', '__not_aligned', '__alignment_not_unique' ]
parsed_data = dict()
assigned_counts = 0
for l in f['f']:
s = l.split("\t")
if s[0] in keys:
parsed_data[s[0][2:]] = int(s[1])
else:
try:
assigned_counts += int(s[1])
except (ValueError, IndexError):
pass
if len(parsed_data) > 0:
parsed_data['assigned'] = assigned_counts
parsed_data['total_count'] = sum([v for v in parsed_data.values()])
parsed_data['percent_assigned'] = (float(parsed_data['assigned']) / float(parsed_data['total_count'])) * 100.0
return parsed_data
return None
def htseq_stats_table(self):
""" Take the parsed stats from the HTSeq Count report and add them to the
basic stats table at the top of the report """
headers = OrderedDict()
headers['percent_assigned'] = {
'title': '% Assigned',
'description': '% Assigned reads',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'RdYlGn',
'format': '{:.1f}%'
}
headers['assigned'] = {
'title': '{} Assigned'.format(config.read_count_prefix),
'description': 'Assigned Reads ({})'.format(config.read_count_desc),
'min': 0,
'scale': 'PuBu',
'modify': lambda x: float(x) * config.read_count_multiplier,
'shared_key': 'read_count'
}
self.general_stats_addcols(self.htseq_data, headers)
def htseq_counts_chart (self):
""" Make the HTSeq Count assignment rates plot """
cats = OrderedDict()
cats['assigned'] = { 'name': 'Assigned' }
cats['ambiguous'] = { 'name': 'Ambiguous' }
cats['alignment_not_unique'] = { 'name': 'Alignment Not Unique' }
cats['no_feature'] = { 'name': 'No Feature' }
cats['too_low_aQual'] = { 'name': 'Too Low aQual' }
cats['not_aligned'] = { 'name': 'Not Aligned' }
config = {
'id': 'htseq_assignment_plot',
'title': 'HTSeq Count Assignments',
'ylab': '# Reads',
'hide_zero_cats': False,
'cpswitch_counts_label': 'Number of Reads'
}
return bargraph.plot(self.htseq_data, cats, config)
|
ahvigil/MultiQC
|
multiqc/modules/htseq/htseq.py
|
Python
|
gpl-3.0
| 4,258
|
[
"HTSeq"
] |
a2e98cc34b98a647be479cb0bd064b793ac5c98abf6005bceb9de88552a2d53f
|
# Copyright 2008 by Michiel de Hoon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Parser for XML results returned by NCBI's Entrez Utilities. This
parser is used by the read() function in Bio.Entrez, and is not intended
be used directly.
"""
# The question is how to represent an XML file as Python objects. Some
# XML files returned by NCBI look like lists, others look like dictionaries,
# and others look like a mix of lists and dictionaries.
#
# My approach is to classify each possible element in the XML as a plain
# string, an integer, a list, a dictionary, or a structure. The latter is a
# dictionary where the same key can occur multiple times; in Python, it is
# represented as a dictionary where that key occurs once, pointing to a list
# of values found in the XML file.
#
# The parser then goes through the XML and creates the appropriate Python
# object for each element. The different levels encountered in the XML are
# preserved on the Python side. So a subelement of a subelement of an element
# is a value in a dictionary that is stored in a list which is a value in
# some other dictionary (or a value in a list which itself belongs to a list
# which is a value in a dictionary, and so on). Attributes encountered in
# the XML are stored as a dictionary in a member .attributes of each element,
# and the tag name is saved in a member .tag.
#
# To decide which kind of Python object corresponds to each element in the
# XML, the parser analyzes the DTD referred at the top of (almost) every
# XML file returned by the Entrez Utilities. This is preferred over a hand-
# written solution, since the number of DTDs is rather large and their
# contents may change over time. About half the code in this parser deals
# wih parsing the DTD, and the other half with the XML itself.
import os.path
import urlparse
import urllib
import warnings
from xml.parsers import expat
# The following four classes are used to add a member .attributes to integers,
# strings, lists, and dictionaries, respectively.
class IntegerElement(int):
def __repr__(self):
text = int.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "IntegerElement(%s, attributes=%s)" % (text, repr(attributes))
class StringElement(str):
def __repr__(self):
text = str.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "StringElement(%s, attributes=%s)" % (text, repr(attributes))
class UnicodeElement(unicode):
def __repr__(self):
text = unicode.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "UnicodeElement(%s, attributes=%s)" % (text, repr(attributes))
class ListElement(list):
def __repr__(self):
text = list.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "ListElement(%s, attributes=%s)" % (text, repr(attributes))
class DictionaryElement(dict):
def __repr__(self):
text = dict.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "DictElement(%s, attributes=%s)" % (text, repr(attributes))
# A StructureElement is like a dictionary, but some of its keys can have
# multiple values associated with it. These values are stored in a list
# under each key.
class StructureElement(dict):
def __init__(self, keys):
dict.__init__(self)
for key in keys:
dict.__setitem__(self, key, [])
self.listkeys = keys
def __setitem__(self, key, value):
if key in self.listkeys:
self[key].append(value)
else:
dict.__setitem__(self, key, value)
def __repr__(self):
text = dict.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "DictElement(%s, attributes=%s)" % (text, repr(attributes))
class NotXMLError(ValueError):
def __init__(self, message):
self.msg = message
def __str__(self):
return "Failed to parse the XML data (%s). Please make sure that the input data are in XML format." % self.msg
class CorruptedXMLError(ValueError):
def __init__(self, message):
self.msg = message
def __str__(self):
return "Failed to parse the XML data (%s). Please make sure that the input data are not corrupted." % self.msg
class ValidationError(ValueError):
"""Validating parsers raise this error if the parser finds a tag in the XML that is not defined in the DTD. Non-validating parsers do not raise this error. The Bio.Entrez.read and Bio.Entrez.parse functions use validating parsers by default (see those functions for more information)"""
def __init__(self, name):
self.name = name
def __str__(self):
return "Failed to find tag '%s' in the DTD. To skip all tags that are not represented in the DTD, please call Bio.Entrez.read or Bio.Entrez.parse with validate=False." % self.name
class DataHandler(object):
home = os.path.expanduser('~')
local_dtd_dir = os.path.join(home, '.biopython', 'Bio', 'Entrez', 'DTDs')
del home
from Bio import Entrez
global_dtd_dir = os.path.join(str(Entrez.__path__[0]), "DTDs")
del Entrez
def __init__(self, validate):
self.stack = []
self.errors = []
self.integers = []
self.strings = []
self.lists = []
self.dictionaries = []
self.structures = {}
self.items = []
self.dtd_urls = []
self.validating = validate
self.parser = expat.ParserCreate(namespace_separator=" ")
self.parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
self.parser.XmlDeclHandler = self.xmlDeclHandler
def read(self, handle):
"""Set up the parser and let it parse the XML results"""
try:
self.parser.ParseFile(handle)
except expat.ExpatError, e:
if self.parser.StartElementHandler:
# We saw the initial <!xml declaration, so we can be sure that
# we are parsing XML data. Most likely, the XML file is
# corrupted.
raise CorruptedXMLError(e)
else:
# We have not seen the initial <!xml declaration, so probably
# the input data is not in XML format.
raise NotXMLError(e)
try:
return self.object
except AttributeError:
if self.parser.StartElementHandler:
# We saw the initial <!xml declaration, and expat didn't notice
# any errors, so self.object should be defined. If not, this is
# a bug.
raise RuntimeError("Failed to parse the XML file correctly, possibly due to a bug in Bio.Entrez. Please contact the Biopython developers at biopython-dev@biopython.org for assistance.")
else:
# We did not see the initial <!xml declaration, so probably
# the input data is not in XML format.
raise NotXMLError("XML declaration not found")
def parse(self, handle):
BLOCK = 1024
while True:
#Read in another block of the file...
text = handle.read(BLOCK)
if not text:
# We have reached the end of the XML file
if self.stack:
# No more XML data, but there is still some unfinished
# business
raise CorruptedXMLError
try:
for record in self.object:
yield record
except AttributeError:
if self.parser.StartElementHandler:
# We saw the initial <!xml declaration, and expat
# didn't notice any errors, so self.object should be
# defined. If not, this is a bug.
raise RuntimeError("Failed to parse the XML file correctly, possibly due to a bug in Bio.Entrez. Please contact the Biopython developers at biopython-dev@biopython.org for assistance.")
else:
# We did not see the initial <!xml declaration, so
# probably the input data is not in XML format.
raise NotXMLError("XML declaration not found")
self.parser.Parse("", True)
self.parser = None
return
try:
self.parser.Parse(text, False)
except expat.ExpatError, e:
if self.parser.StartElementHandler:
# We saw the initial <!xml declaration, so we can be sure
# that we are parsing XML data. Most likely, the XML file
# is corrupted.
raise CorruptedXMLError(e)
else:
# We have not seen the initial <!xml declaration, so
# probably the input data is not in XML format.
raise NotXMLError(e)
if not self.stack:
# Haven't read enough from the XML file yet
continue
records = self.stack[0]
if not isinstance(records, list):
raise ValueError("The XML file does not represent a list. Please use Entrez.read instead of Entrez.parse")
while len(records) > 1: # Then the top record is finished
record = records.pop(0)
yield record
def xmlDeclHandler(self, version, encoding, standalone):
# XML declaration found; set the handlers
self.parser.StartElementHandler = self.startElementHandler
self.parser.EndElementHandler = self.endElementHandler
self.parser.CharacterDataHandler = self.characterDataHandler
self.parser.ExternalEntityRefHandler = self.externalEntityRefHandler
self.parser.StartNamespaceDeclHandler = self.startNamespaceDeclHandler
def startNamespaceDeclHandler(self, prefix, un):
raise NotImplementedError("The Bio.Entrez parser cannot handle XML data that make use of XML namespaces")
def startElementHandler(self, name, attrs):
self.content = ""
if name in self.lists:
object = ListElement()
elif name in self.dictionaries:
object = DictionaryElement()
elif name in self.structures:
object = StructureElement(self.structures[name])
elif name in self.items: # Only appears in ESummary
name = str(attrs["Name"]) # convert from Unicode
del attrs["Name"]
itemtype = str(attrs["Type"]) # convert from Unicode
del attrs["Type"]
if itemtype=="Structure":
object = DictionaryElement()
elif name in ("ArticleIds", "History"):
object = StructureElement(["pubmed", "medline"])
elif itemtype=="List":
object = ListElement()
else:
object = StringElement()
object.itemname = name
object.itemtype = itemtype
elif name in self.strings + self.errors + self.integers:
self.attributes = attrs
return
else:
# Element not found in DTD
if self.validating:
raise ValidationError(name)
else:
# this will not be stored in the record
object = ""
if object!="":
object.tag = name
if attrs:
object.attributes = dict(attrs)
if len(self.stack)!=0:
current = self.stack[-1]
try:
current.append(object)
except AttributeError:
current[name] = object
self.stack.append(object)
def endElementHandler(self, name):
value = self.content
if name in self.errors:
if value=="":
return
else:
raise RuntimeError(value)
elif name in self.integers:
value = IntegerElement(value)
elif name in self.strings:
# Convert Unicode strings to plain strings if possible
try:
value = StringElement(value)
except UnicodeEncodeError:
value = UnicodeElement(value)
elif name in self.items:
self.object = self.stack.pop()
if self.object.itemtype in ("List", "Structure"):
return
elif self.object.itemtype=="Integer" and value:
value = IntegerElement(value)
else:
# Convert Unicode strings to plain strings if possible
try:
value = StringElement(value)
except UnicodeEncodeError:
value = UnicodeElement(value)
name = self.object.itemname
else:
self.object = self.stack.pop()
return
value.tag = name
if self.attributes:
value.attributes = dict(self.attributes)
del self.attributes
current = self.stack[-1]
if current!="":
try:
current.append(value)
except AttributeError:
current[name] = value
def characterDataHandler(self, content):
self.content += content
def elementDecl(self, name, model):
"""This callback function is called for each element declaration:
<!ELEMENT name (...)>
encountered in a DTD. The purpose of this function is to determine
whether this element should be regarded as a string, integer, list
dictionary, structure, or error."""
if name.upper()=="ERROR":
self.errors.append(name)
return
if name=='Item' and model==(expat.model.XML_CTYPE_MIXED,
expat.model.XML_CQUANT_REP,
None, ((expat.model.XML_CTYPE_NAME,
expat.model.XML_CQUANT_NONE,
'Item',
()
),
)
):
# Special case. As far as I can tell, this only occurs in the
# eSummary DTD.
self.items.append(name)
return
# First, remove ignorable parentheses around declarations
while (model[0] in (expat.model.XML_CTYPE_SEQ,
expat.model.XML_CTYPE_CHOICE)
and model[1] in (expat.model.XML_CQUANT_NONE,
expat.model.XML_CQUANT_OPT)
and len(model[3])==1):
model = model[3][0]
# PCDATA declarations correspond to strings
if model[0] in (expat.model.XML_CTYPE_MIXED,
expat.model.XML_CTYPE_EMPTY):
self.strings.append(name)
return
# List-type elements
if (model[0] in (expat.model.XML_CTYPE_CHOICE,
expat.model.XML_CTYPE_SEQ) and
model[1] in (expat.model.XML_CQUANT_PLUS,
expat.model.XML_CQUANT_REP)):
self.lists.append(name)
return
# This is the tricky case. Check which keys can occur multiple
# times. If only one key is possible, and it can occur multiple
# times, then this is a list. If more than one key is possible,
# but none of them can occur multiple times, then this is a
# dictionary. Otherwise, this is a structure.
# In 'single' and 'multiple', we keep track which keys can occur
# only once, and which can occur multiple times.
single = []
multiple = []
# The 'count' function is called recursively to make sure all the
# children in this model are counted. Error keys are ignored;
# they raise an exception in Python.
def count(model):
quantifier, name, children = model[1:]
if name==None:
if quantifier in (expat.model.XML_CQUANT_PLUS,
expat.model.XML_CQUANT_REP):
for child in children:
multiple.append(child[2])
else:
for child in children:
count(child)
elif name.upper()!="ERROR":
if quantifier in (expat.model.XML_CQUANT_NONE,
expat.model.XML_CQUANT_OPT):
single.append(name)
elif quantifier in (expat.model.XML_CQUANT_PLUS,
expat.model.XML_CQUANT_REP):
multiple.append(name)
count(model)
if len(single)==0 and len(multiple)==1:
self.lists.append(name)
elif len(multiple)==0:
self.dictionaries.append(name)
else:
self.structures.update({name: multiple})
def open_dtd_file(self, filename):
path = os.path.join(DataHandler.local_dtd_dir, filename)
try:
handle = open(path, "rb")
except IOError:
pass
else:
return handle
path = os.path.join(DataHandler.global_dtd_dir, filename)
try:
handle = open(path, "rb")
except IOError:
pass
else:
return handle
return None
def externalEntityRefHandler(self, context, base, systemId, publicId):
"""The purpose of this function is to load the DTD locally, instead
of downloading it from the URL specified in the XML. Using the local
DTD results in much faster parsing. If the DTD is not found locally,
we try to download it. If new DTDs become available from NCBI,
putting them in Bio/Entrez/DTDs will allow the parser to see them."""
urlinfo = urlparse.urlparse(systemId)
#Following attribute requires Python 2.5+
#if urlinfo.scheme=='http':
if urlinfo[0]=='http':
# Then this is an absolute path to the DTD.
url = systemId
elif urlinfo[0]=='':
# Then this is a relative path to the DTD.
# Look at the parent URL to find the full path.
try:
url = self.dtd_urls[-1]
except IndexError:
# Assume the default URL for DTDs if the top parent
# does not contain an absolute path
source = "http://www.ncbi.nlm.nih.gov/dtd/"
else:
source = os.path.dirname(url)
url = os.path.join(source, systemId)
self.dtd_urls.append(url)
# First, try to load the local version of the DTD file
location, filename = os.path.split(systemId)
handle = self.open_dtd_file(filename)
if not handle:
# DTD is not available as a local file. Try accessing it through
# the internet instead.
message = """\
Unable to load DTD file %s.
Bio.Entrez uses NCBI's DTD files to parse XML files returned by NCBI Entrez.
Though most of NCBI's DTD files are included in the Biopython distribution,
sometimes you may find that a particular DTD file is missing. While we can
access the DTD file through the internet, the parser is much faster if the
required DTD files are available locally.
For this purpose, please download %s from
%s
and save it either in directory
%s
or in directory
%s
in order for Bio.Entrez to find it.
Alternatively, you can save %s in the directory
Bio/Entrez/DTDs in the Biopython distribution, and reinstall Biopython.
Please also inform the Biopython developers about this missing DTD, by
reporting a bug on http://bugzilla.open-bio.org/ or sign up to our mailing
list and emailing us, so that we can include it with the next release of
Biopython.
Proceeding to access the DTD file through the internet...
""" % (filename, filename, url, self.global_dtd_dir, self.local_dtd_dir, filename)
warnings.warn(message)
try:
handle = urllib.urlopen(url)
except IOError:
raise RuntimeException("Failed to access %s at %s" % (filename, url))
parser = self.parser.ExternalEntityParserCreate(context)
parser.ElementDeclHandler = self.elementDecl
parser.ParseFile(handle)
handle.close()
self.dtd_urls.pop()
return 1
|
LyonsLab/coge
|
bin/last_wrapper/Bio/Entrez/Parser.py
|
Python
|
bsd-2-clause
| 21,089
|
[
"Biopython"
] |
ab2bd0e220bc062f305b307b1172837d7b3cade075453db44636315ef4d8f4ae
|
# coding: utf-8
from __future__ import unicode_literals, division, print_function
import os
import tempfile
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.abinitio.abiinspect import *
#test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", 'test_files')
class YamlTokenizerTest(PymatgenTest):
"""Test YamlTokenizer."""
def test_base(self):
string = \
"""---
none: [~, null]
bool: [true, false, on, off]
int: 42
float: 3.14159
list: [LITE, RES_ACID, SUS_DEXT]
dict: {hp: 13, sp: 5}
...
this is not a YAML document!
and the tokenizer will ignore it
--- !Monster
name: Cave spider
hp: [2,6] # 2d6
ac: 16
attacks: [BITE, HURT]
...
This is not a proper document since it does not start with ---
the end tag below is ignored
...
--- !Monster
name: Dragon
hp: [2,6] # 2d6
ac: 32
attacks: [BITE, HURT]
...
"""
#for i, line in enumerate(string.splitlines()): print(i, line)
fd, filename = tempfile.mkstemp(text=True)
with open(filename, "w") as fh:
fh.write(string)
doc_tags = [None, "!Monster", "!Monster"]
doc_linenos = [1, 13, 23]
with YamlTokenizer(filename) as r:
# Iterate the docs
n = 0
for i, doc in enumerate(r):
n += 1
print("doc", doc)
self.assertTrue(doc.tag == doc_tags[i])
self.assertTrue(doc.lineno == doc_linenos[i])
self.assertTrue(n == len(doc_tags))
# Read all docs present in the file.
r.seek(0)
all_docs = r.all_yaml_docs()
#print(all_docs)
self.assertTrue(len(all_docs) == 3)
# We should be at the begining at the file.
self.assertTrue(all_docs == r.all_yaml_docs())
# Find documents by tag.
r.seek(0)
monster = r.next_doc_with_tag("!Monster")
#print("monster",monster)
self.assertTrue(monster == all_docs[1])
monster = r.next_doc_with_tag("!Monster")
self.assertTrue(monster == all_docs[2])
# this should raise StopIteration
with self.assertRaises(StopIteration):
monster = r.next_doc_with_tag("!Monster")
os.remove(filename)
if __name__ == '__main__':
import unittest
unittest.main()
|
Dioptas/pymatgen
|
pymatgen/io/abinitio/tests/test_abiinspect.py
|
Python
|
mit
| 2,376
|
[
"pymatgen"
] |
3e1d2af3a6bedd9a1dad6f18372bdee2ea9ed8ead49f22865ee8721c5cbc7170
|
# -*- coding: utf-8 -*-
u"""SRW execution template.
:copyright: Copyright (c) 2015 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkcompat
from pykern import pkio
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdexc, pkdlog, pkdp
from sirepo import crystal
from sirepo import job
from sirepo import simulation_db
from sirepo.template import srw_common
from sirepo.template import template_common
import array
import copy
import math
import numpy as np
import os
import pickle
import pykern.pkjson
import re
import sirepo.mpi
import sirepo.sim_data
import sirepo.uri_router
import sirepo.util
import srwl_bl
import srwlib
import time
import traceback
import uti_io
import uti_plot_com
import zipfile
_SIM_DATA, SIM_TYPE, SCHEMA = sirepo.sim_data.template_globals()
PARSED_DATA_ATTR = 'srwParsedData'
_CANVAS_MAX_SIZE = 65535
_OUTPUT_FOR_MODEL = PKDict(
coherenceXAnimation=PKDict(
title='',
filename='res_int_pr_me_dcx.dat',
dimensions=3,
labels=['(X1 + X2) / 2', '(X1 - X2) / 2', 'Degree of Coherence'],
units=['m', 'm', ''],
),
coherenceYAnimation=PKDict(
title='',
filename='res_int_pr_me_dcy.dat',
dimensions=3,
labels=['(Y1 + Y2) / 2', '(Y1 - Y2) / 2', 'Degree of Coherence'],
units=['m', 'm', ''],
),
coherentModesAnimation=PKDict(
title='E={photonEnergy} eV Modes {plotModesStart} - {plotModesEnd}',
basename='res_csd',
filename='res_csd_cm.h5',
dimensions=3,
labels=['Horizontal Position', 'Vertical Position', 'Intensity'],
units=['m', 'm', '{intensity_units}'],
),
fluxReport=PKDict(
title='Flux through Finite Aperture',
subtitle='{polarization} Polarization',
filename='res_spec_me.dat',
dimensions=2,
labels=['Photon Energy', '{flux_label}'],
units=['eV', '{flux_units}'],
),
initialIntensityReport=PKDict(
title='Before Propagation (E={photonEnergy} eV)',
subtitle='{characteristic}',
filename='res_int_se.dat',
dimensions=3,
labels=['Horizontal Position', 'Vertical Position', 'Intensity'],
units=['m', 'm', '{intensity_units}'],
),
intensityReport=PKDict(
title='On-Axis Spectrum from Filament Electron Beam',
subtitle='{polarization} Polarization',
filename='res_spec_se.dat',
dimensions=2,
labels=['Photon Energy', 'Intensity'],
units=['eV', '{intensity_units}'],
),
mirrorReport=PKDict(
title='Optical Path Difference',
filename='res_mirror.dat',
dimensions=3,
labels=['Horizontal Position', 'Vertical Position', 'Optical Path Difference'],
units=['m', 'm', 'm'],
),
multiElectronAnimation=PKDict(
title='E={photonEnergy} eV',
filename='res_int_pr_me.dat',
dimensions=3,
labels=['Horizontal Position', 'Vertical Position', 'Intensity'],
units=['m', 'm', '{intensity_units}'],
),
powerDensityReport=PKDict(
title='Power Density',
filename='res_pow.dat',
dimensions=3,
labels=['Horizontal Position', 'Vertical Position', 'Power Density'],
units=['m', 'm', 'W/mm^2'],
),
brillianceReport=PKDict(
filename='res_brilliance.dat',
dimensions=2,
),
trajectoryReport=PKDict(
filename='res_trj.dat',
dimensions=2,
),
beamline3DReport=PKDict(
filename='beamline_orient.dat',
dimensions=2,
),
watchpointReport=PKDict(
title='After Propagation (E={photonEnergy} eV)',
subtitle='{characteristic}',
filename='res_int_pr_se.dat',
dimensions=3,
labels=['Horizontal Position', 'Vertical Position', 'Intensity'],
units=['m', 'm', '{intensity_units}'],
),
)
_OUTPUT_FOR_MODEL.fluxAnimation = copy.deepcopy(_OUTPUT_FOR_MODEL.fluxReport)
_OUTPUT_FOR_MODEL.beamlineAnimation = copy.deepcopy(_OUTPUT_FOR_MODEL.watchpointReport)
_OUTPUT_FOR_MODEL.beamlineAnimation.filename='res_int_pr_se{watchpoint_id}.dat'
_OUTPUT_FOR_MODEL.sourceIntensityReport = copy.deepcopy(_OUTPUT_FOR_MODEL.initialIntensityReport)
_OUTPUT_FOR_MODEL.sourceIntensityReport.title = 'E={sourcePhotonEnergy} eV'
_LOG_DIR = '__srwl_logs__'
_JSON_MESSAGE_EXPANSION = 20
_RSOPT_PARAMS = {
i for sublist in [v for v in [list(SCHEMA.constants.rsOptElements[k].keys()) for
k in SCHEMA.constants.rsOptElements]] for i in sublist
}
_RSOPT_PARAMS_NO_ROTATION = [p for p in _RSOPT_PARAMS if p != 'rotation']
_TABULATED_UNDULATOR_DATA_DIR = 'tabulatedUndulator'
_USER_MODEL_LIST_FILENAME = PKDict(
electronBeam='_user_beam_list.json',
tabulatedUndulator='_user_undulator_list.json',
)
_IMPORT_PYTHON_POLLS = 60
class MagnMeasZip:
def __init__(self, archive_name):
"""The class for convenient operation with an archive with the magnetic measurements.
Args:
archive_name: the name of the archive.
"""
self.z = zipfile.ZipFile(archive_name)
self.index_dir = None
self.index_file = None
self.gaps = None
self.dat_files = None
self._find_index_file()
self._find_dat_files_from_index_file()
def find_closest_gap(self, gap):
gap = float(gap)
indices_previous = []
indices_next = []
for i in range(len(self.gaps)):
if self.gaps[i] <= gap:
indices_previous.append(i)
else:
indices_next.append(i)
assert indices_previous or indices_next
idx_previous = indices_previous[-1] if indices_previous else indices_next[0]
idx_next = indices_next[0] if indices_next else indices_previous[-1]
idx = idx_previous if abs(self.gaps[idx_previous] - gap) <= abs(self.gaps[idx_next] - gap) else idx_next
dat_file = self.dat_files[idx]
dat_content = self._get_file_content(dat_file)
dat_file_step = float(dat_content[8].split('#')[1].strip())
dat_file_number_of_points = int(dat_content[9].split('#')[1].strip())
return round(dat_file_step * dat_file_number_of_points, 6)
def _find_dat_files_from_index_file(self):
self.gaps = []
self.dat_files = []
for row in self._get_file_content(self.index_file):
v = row.strip()
if v:
v = v.split()
self.gaps.append(float(v[0]))
self.dat_files.append(v[3])
def _find_index_file(self):
# finds an index file (``*.txt``) in the provided zip-object.
for f in self.z.namelist():
if re.search(r'\.txt', f):
self.index_file = os.path.basename(f)
self.index_dir = os.path.dirname(f)
break
assert self.index_file is not None
def _get_file_content(self, file_name):
with self.z.open(os.path.join(self.index_dir, file_name)) as f:
return self._normalize_eol(f)
def _normalize_eol(self, file_desc):
s = file_desc.read().decode().replace('\r\n', '\n').replace('\r', '\n')
content = s.split('\n')
return content
def background_percent_complete(report, run_dir, is_running):
res = PKDict(
percentComplete=0,
frameCount=0,
)
if report == 'beamlineAnimation':
return _beamline_animation_percent_complete(run_dir, res)
status = PKDict(
progress=0,
particle_number=0,
total_num_of_particles=0,
)
filename = run_dir.join(get_filename_for_model(report))
if filename.exists():
status.progress = 100
t = int(filename.mtime())
if not is_running and report == 'fluxAnimation':
# let the client know which flux method was used for the output
data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
res.method = data.models.fluxAnimation.method
if report == 'multiElectronAnimation':
# let client know that degree of coherence reports are also available
res.calcCoherence = run_dir.join(get_filename_for_model('coherenceXAnimation')).exists()
res.update(PKDict(
frameCount=t + 1,
frameIndex=t,
lastUpdateTime=t,
))
status_files = pkio.sorted_glob(run_dir.join(_LOG_DIR, 'srwl_*.json'))
if status_files: # Read the status file if SRW produces the multi-e logs
progress_file = pkio.py_path(status_files[-1])
if progress_file.exists():
status = simulation_db.read_json(progress_file)
res.update(PKDict(
percentComplete=status.progress,
particleNumber=status.particle_number,
particleCount=status.total_num_of_particles,
))
return res
def calculate_beam_drift(ebeam_position, source_type, undulator_type, undulator_length, undulator_period):
if ebeam_position.driftCalculationMethod == 'auto':
"""Calculate drift for ideal undulator."""
if _SIM_DATA.srw_is_idealized_undulator(source_type, undulator_type):
# initial drift = 1/2 undulator length + 2 periods
return -0.5 * float(undulator_length) - 2 * float(undulator_period)
return 0
return ebeam_position.drift
def compute_crl_focus(model):
import bnlcrl.pkcli.simulate
d = bnlcrl.pkcli.simulate.calc_ideal_focus(
radius=float(model.tipRadius) * 1e-6, # um -> m
n=model.numberOfLenses,
delta=model.refractiveIndex,
p0=model.position
)
model.focalDistance = d['ideal_focus']
model.absoluteFocusPosition = d['p1_ideal_from_source']
return model
def compute_undulator_length(model):
if model.undulatorType == 'u_i':
return PKDict()
if _SIM_DATA.lib_file_exists(model.magneticFile):
z = _SIM_DATA.lib_file_abspath(model.magneticFile)
return PKDict(
length=_SIM_DATA.srw_format_float(
MagnMeasZip(str(z)).find_closest_gap(model.gap),
),
)
return PKDict()
def clean_run_dir(run_dir):
zip_dir = run_dir.join(_TABULATED_UNDULATOR_DATA_DIR)
if zip_dir.exists():
zip_dir.remove()
def _extract_coherent_modes(model, out_info):
out_file = 'combined-modes.dat'
wfr = srwlib.srwl_uti_read_wfr_cm_hdf5(_file_path=out_info.filename)
if model.plotModesEnd > len(wfr):
model.plotModesEnd = len(wfr)
if model.plotModesStart > model.plotModesEnd:
model.plotModesStart = model.plotModesEnd
if model.plotModesStart == model.plotModesEnd:
out_info.title = 'E={photonEnergy} eV Mode {plotModesStart}'
mesh = wfr[0].mesh
arI = array.array('f', [0] * mesh.nx * mesh.ny)
for i in range(model.plotModesStart, model.plotModesEnd + 1):
srwlib.srwl.CalcIntFromElecField(
arI,
wfr[i - 1],
int(model.polarization),
int(model.characteristic),
3, mesh.eStart, 0, 0, [2])
srwlib.srwl_uti_save_intens_ascii(
arI,
mesh,
out_file,
_arLabels=['Photon Energy', 'Horizontal Position', 'Vertical Position', 'Intensity'],
_arUnits=['eV', 'm', 'm', 'ph/s/.1%bw/mm^2'],
)
return out_file
def extract_report_data(sim_in):
r = sim_in.report
out = copy.deepcopy(_OUTPUT_FOR_MODEL[re.sub(r'\d+$', '', r)])
dm = sim_in.models
if r == 'beamline3DReport':
return _extract_beamline_orientation(out.filename)
if r == 'brillianceReport':
return _extract_brilliance_report(dm.brillianceReport, out.filename)
if r == 'trajectoryReport':
return _extract_trajectory_report(dm.trajectoryReport, out.filename)
#TODO(pjm): remove fixup after dcx/dcy files can be read by uti_plot_com
if r in ('coherenceXAnimation', 'coherenceYAnimation'):
_fix_file_header(out.filename)
if r == 'coherentModesAnimation':
out.filename = _extract_coherent_modes(dm[r], out)
_update_report_labels(out, PKDict(
photonEnergy=dm.simulation.photonEnergy,
sourcePhotonEnergy=dm.sourceIntensityReport.photonEnergy,
polarization=_enum_text('Polarization', dm[r], 'polarization'),
characteristic=_enum_text('Characteristic', dm[r], 'characteristic'),
intensity_units=_intensity_units(sim_in),
flux_label=_flux_label(dm[r]),
flux_units=_flux_units(dm[r]),
watchpoint_id=dm[r].get('id', 0),
plotModesStart=dm[r].get('plotModesStart', ''),
plotModesEnd=dm[r].get('plotModesEnd', ''),
))
if out.units[1] == 'm':
out.units[1] = '[m]'
else:
out.units[1] = '({})'.format(out.units[1])
data, _, allrange, _, _ = uti_plot_com.file_load(out.filename)
res = PKDict(
title=out.title,
subtitle=out.get('subtitle', ''),
x_range=[allrange[0], allrange[1]],
y_label=_superscript(out.labels[1] + ' ' + out.units[1]),
x_label=out.labels[0] + ' [' + out.units[0] + ']',
x_units=out.units[0],
y_units=out.units[1],
points=data,
z_range=[np.min(data), np.max(data)],
# send the full plot ranges as summaryData
summaryData=PKDict(
fieldRange=allrange,
fieldIntensityRange=dm[r].get('summaryData', {}).get(
'fieldIntensityRange',
[np.min(data), np.max(data)],
),
),
)
if out.dimensions == 3:
res = _remap_3d(res, allrange, out, dm[r])
return res
def export_rsopt_config(data, filename):
v = _rsopt_jinja_context(data.models.exportRsOpt)
fz = pkio.py_path(filename)
f = re.sub(r'[^\w.]+', '-', fz.purebasename).strip('-')
v.runDir = f'{f}_scan'
v.fileBase = f
tf = {k: PKDict(file=f'{f}.{k}') for k in ['py', 'sh', 'yml']}
for t in tf:
v[f'{t}FileName'] = tf[t].file
v.outFileName = f'{f}.out'
v.readmeFileName = 'README.txt'
v.libFiles = [f.basename for f in _SIM_DATA.lib_files_for_export(data)]
v.hasLibFiles = len(v.libFiles) > 0
v.randomSeed = data.models.exportRsOpt.randomSeed if \
data.models.exportRsOpt.randomSeed is not None else ''
# do this in a second loop so v is fully updated
# note that the rsopt context is regenerated in python_source_for_model()
for t in tf:
tf[t].content = python_source_for_model(data, 'rsoptExport', plot_reports=False) \
if t == 'py' else \
template_common.render_jinja(SIM_TYPE, v, f'rsoptExport.{t}')
readme = template_common.render_jinja(SIM_TYPE, v, v.readmeFileName)
with zipfile.ZipFile(
fz,
mode='w',
compression=zipfile.ZIP_DEFLATED,
allowZip64=True,
) as z:
for t in tf:
z.writestr(tf[t].file, tf[t].content)
z.writestr(v.readmeFileName, readme)
for d in _SIM_DATA.lib_files_for_export(data):
z.write(d, d.basename)
return fz
def get_application_data(data, **kwargs):
if data.method == 'model_list':
res = []
model_name = data.model_name
if model_name == 'electronBeam':
res.extend(get_predefined_beams())
res.extend(_load_user_model_list(model_name))
if model_name == 'electronBeam':
for beam in res:
srw_common.process_beam_parameters(beam)
return PKDict(
modelList=res
)
if data.method == 'create_shadow_simulation':
from sirepo.template.srw_shadow_converter import SRWShadowConverter
return SRWShadowConverter().srw_to_shadow(data)
if data.method == 'delete_user_models':
return _delete_user_models(data.electron_beam, data.tabulated_undulator)
elif data.method == 'compute_undulator_length':
return compute_undulator_length(data.tabulated_undulator)
elif data.method == 'processedImage':
try:
return _process_image(data, kwargs['tmp_dir'])
except Exception as e:
pkdlog('exception during processedImage: {}', pkdexc())
return PKDict(
error=str(e),
)
raise RuntimeError('unknown application data method: {}'.format(data.method))
def get_data_file(run_dir, model, frame, **kwargs):
return get_filename_for_model(model)
def get_filename_for_model(model):
if _SIM_DATA.is_watchpoint(model):
model = _SIM_DATA.WATCHPOINT_REPORT
if model == 'beamlineAnimation0':
model = 'initialIntensityReport'
m = re.search(r'(beamlineAnimation)(\d+)', model)
if m:
return _OUTPUT_FOR_MODEL[m.group(1)].filename.format(watchpoint_id=m.group(2))
return _OUTPUT_FOR_MODEL[model].filename
def get_predefined_beams():
return _SIM_DATA.srw_predefined().beams
def _copy_frame_args_into_model(frame_args, name):
m = frame_args.sim_in.models[frame_args.frameReport]
m_schema = SCHEMA.model[name]
for f in frame_args:
if f in m and f in m_schema:
m[f] = frame_args[f]
if m_schema[f][1] == 'Float':
m[f] = re.sub(r'\s', '+', m[f])
m[f] = float(m[f])
elif m_schema[f][1] == 'Integer':
m[f] = int(m[f])
return m
def sim_frame(frame_args):
r = frame_args.frameReport
frame_args.sim_in.report = r
if r == 'multiElectronAnimation':
m = frame_args.sim_in.models[r]
m.intensityPlotsWidth = frame_args.intensityPlotsWidth
if frame_args.get('rotateAngle', 0):
m.rotateAngle = float(frame_args.rotateAngle)
m.rotateReshape = frame_args.rotateReshape
else:
m.rotateAngle = 0
elif r == 'coherentModesAnimation':
_copy_frame_args_into_model(frame_args, r)
elif 'beamlineAnimation' in r:
wid = int(re.search(r'.*?(\d+)$', r)[1])
fn = _wavefront_pickle_filename(wid)
with open(fn, 'rb') as f:
wfr = pickle.load(f)
m = _copy_frame_args_into_model(frame_args, 'watchpointReport')
if wid:
m.id = wid
frame_args.sim_in.report = 'beamlineAnimation'
frame_args.sim_in.models.beamlineAnimation = m
data_file = _OUTPUT_FOR_MODEL.beamlineAnimation.filename.format(
watchpoint_id=wid)
else:
frame_args.sim_in.report = 'initialIntensityReport'
frame_args.sim_in.models.initialIntensityReport = m
data_file = _OUTPUT_FOR_MODEL.initialIntensityReport.filename
srwl_bl.SRWLBeamline().calc_int_from_wfr(
wfr,
_pol=int(frame_args.polarization),
_int_type=int(frame_args.characteristic),
_fname=data_file,
_pr=False,
)
if 'beamlineAnimation' not in r:
# some reports may be written at the same time as the reader
# if the file is invalid, wait a bit and try again
for i in (1, 2, 3):
try:
return extract_report_data(frame_args.sim_in)
except Exception:
# sleep and retry to work-around concurrent file read/write
pkdlog('sleep and retry simulation frame read: {} {}', i, r)
time.sleep(2)
return extract_report_data(frame_args.sim_in)
def import_file(req, tmp_dir, **kwargs):
import sirepo.server
i = None
try:
r = kwargs['reply_op'](simulation_db.default_data(SIM_TYPE))
d = pykern.pkjson.load_any(r.data)
i = d.models.simulation.simulationId
b = d.models.backgroundImport = PKDict(
arguments=req.import_file_arguments,
python=pkcompat.from_bytes(req.file_stream.read()),
userFilename=req.filename,
)
# POSIT: import.py uses ''', but we just don't allow quotes in names
if "'" in b.arguments:
raise sirepo.util.UserAlert('arguments may not contain quotes')
if "'" in b.userFilename:
raise sirepo.util.UserAlert('filename may not contain quotes')
d.pkupdate(
report='backgroundImport',
forceRun=True,
simulationId=i,
)
r = sirepo.uri_router.call_api('runSimulation', data=d)
for _ in range(_IMPORT_PYTHON_POLLS):
if r.status_code != 200:
raise sirepo.util.UserAlert(
'error parsing python',
'unexpected response status={} data={}',
r.status_code,
r.data,
)
try:
r = pykern.pkjson.load_any(r.data)
except Exception as e:
raise sirepo.util.UserAlert(
'error parsing python',
'error={} parsing response data={}',
e,
r.data,
)
if 'error' in r:
pkdc('runSimulation error msg={}', r)
raise sirepo.util.UserAlert(r.get('error'))
if PARSED_DATA_ATTR in r:
break
if 'nextRequest' not in r:
raise sirepo.util.UserAlert(
'error parsing python',
'unable to find nextRequest in response={}',
PARSED_DATA_ATTR,
r,
)
time.sleep(r.nextRequestSeconds)
r = sirepo.uri_router.call_api('runStatus', data=r.nextRequest)
else:
raise sirepo.util.UserAlert(
'error parsing python',
'polled too many times, last response={}',
r,
)
r = r.get(PARSED_DATA_ATTR)
r.models.simulation.simulationId = i
r = simulation_db.save_simulation_json(r, do_validate=True, fixup=True)
except Exception:
#TODO(robnagler) need to clean up simulations except in dev
raise
if i:
try:
simulation_db.delete_simulation(req.type, i)
except Exception:
pass
raise
raise sirepo.util.Response(sirepo.server.api_simulationData(r.simulationType, i, pretty=False))
def new_simulation(data, new_simulation_data):
sim = data.models.simulation
sim.sourceType = new_simulation_data.sourceType
if _SIM_DATA.srw_is_gaussian_source(sim):
data.models.initialIntensityReport.sampleFactor = 0
elif _SIM_DATA.srw_is_dipole_source(sim):
data.models.intensityReport.method = "2"
elif _SIM_DATA.srw_is_arbitrary_source(sim):
data.models.sourceIntensityReport.method = "2"
elif _SIM_DATA.srw_is_tabulated_undulator_source(sim):
data.models.undulator.length = compute_undulator_length(data.models.tabulatedUndulator).length
data.models.electronBeamPosition.driftCalculationMethod = 'manual'
def post_execution_processing(
success_exit=True,
is_parallel=True,
run_dir=None,
**kwargs
):
if success_exit:
return None
return _parse_srw_log(run_dir)
def prepare_for_client(data):
save = False
for model_name in _USER_MODEL_LIST_FILENAME.keys():
if model_name == 'tabulatedUndulator' and not _SIM_DATA.srw_is_tabulated_undulator_source(data.models.simulation):
# don't add a named undulator if tabulated is not the current source type
continue
model = data.models[model_name]
if _SIM_DATA.srw_is_user_defined_model(model):
user_model_list = _load_user_model_list(model_name)
search_model = None
models_by_id = _user_model_map(user_model_list, 'id')
if 'id' in model and model.id in models_by_id:
search_model = models_by_id[model.id]
if search_model:
data.models[model_name] = search_model
if model_name == 'tabulatedUndulator':
del data.models[model_name]['undulator']
else:
pkdc('adding model: {}', model.name)
if model.name in _user_model_map(user_model_list, 'name'):
model.name = _unique_name(user_model_list, 'name', model.name + ' {}')
selectorName = 'beamSelector' if model_name == 'electronBeam' else 'undulatorSelector'
model[selectorName] = model.name
model.id = _unique_name(user_model_list, 'id', data.models.simulation.simulationId + ' {}')
user_model_list.append(_create_user_model(data, model_name))
_save_user_model_list(model_name, user_model_list)
save = True
if save:
pkdc("save simulation json with sim_data_template_fixup={}", data.get('sim_data_template_fixup', None))
simulation_db.save_simulation_json(data, fixup=True)
return data
def prepare_for_save(data):
for model_name in _USER_MODEL_LIST_FILENAME.keys():
if model_name == 'tabulatedUndulator' and not _SIM_DATA.srw_is_tabulated_undulator_source(data.models.simulation):
# don't add a named undulator if tabulated is not the current source type
continue
model = data.models[model_name]
if _SIM_DATA.srw_is_user_defined_model(model):
user_model_list = _load_user_model_list(model_name)
models_by_id = _user_model_map(user_model_list, 'id')
if model.id not in models_by_id:
pkdc('adding new model: {}', model.name)
user_model_list.append(_create_user_model(data, model_name))
_save_user_model_list(model_name, user_model_list)
elif models_by_id[model.id] != model:
pkdc('replacing beam: {}: {}', model.id, model.name)
for i,m in enumerate(user_model_list):
if m.id == model.id:
pkdc('found replace beam, id: {}, i: {}', m.id, i)
user_model_list[i] = _create_user_model(data, model_name)
_save_user_model_list(model_name, user_model_list)
break
return data
def prepare_sequential_output_file(run_dir, sim_in):
m = sim_in.report
if m in ('brillianceReport', 'mirrorReport'):
return
#TODO(pjm): only need to rerun extract_report_data() if report style fields have changed
fn = simulation_db.json_filename(template_common.OUTPUT_BASE_NAME, run_dir)
if fn.exists():
fn.remove()
output_file = run_dir.join(get_filename_for_model(m))
if output_file.exists():
res = extract_report_data(sim_in)
template_common.write_sequential_result(res, run_dir=run_dir)
def process_undulator_definition(model):
"""Convert K -> B and B -> K."""
try:
if model.undulator_definition == 'B':
# Convert B -> K:
und = srwlib.SRWLMagFldU([srwlib.SRWLMagFldH(1, 'v', float(model.amplitude), 0, 1)], float(model.undulator_period))
model.undulator_parameter = _SIM_DATA.srw_format_float(und.get_K())
elif model.undulator_definition == 'K':
# Convert K to B:
und = srwlib.SRWLMagFldU([], float(model.undulator_period))
model.amplitude = _SIM_DATA.srw_format_float(
und.K_2_B(float(model.undulator_parameter)),
)
return model
except Exception:
return model
def python_source_for_model(data, model, plot_reports=True):
data.report = model or _SIM_DATA.SRW_RUN_ALL_MODEL
data.report = re.sub('beamlineAnimation0', 'initialIntensityReport', data.report)
data.report = re.sub('beamlineAnimation', 'watchpointReport', data.report)
return _generate_parameters_file(data, plot_reports=plot_reports)
def run_epilogue():
# POSIT: only called from template.run_epilogue
def _op():
from pykern import pkio
from sirepo import simulation_db
from sirepo.template import template_common
sim_in = simulation_db.read_json(template_common.INPUT_BASE_NAME)
if sim_in.report == 'coherentModesAnimation':
# this sim creates _really_ large intermediate files which should get removed
for p in pkio.sorted_glob('*_mi.h5'):
p.remove()
import sirepo.mpi
sirepo.mpi.restrict_op_to_first_rank(_op)
def stateful_compute_compute_undulator_length(data):
return compute_undulator_length(data['tabulated_undulator'])
def stateful_compute_create_shadow_simulation(data):
from sirepo.template.srw_shadow_converter import SRWShadowConverter
return SRWShadowConverter().srw_to_shadow(data)
def stateful_compute_delete_user_models(data):
return _delete_user_models(data['electron_beam'], data['tabulated_undulator'])
def stateful_compute_model_list(data):
res = []
model_name = data['model_name']
if model_name == 'electronBeam':
res.extend(get_predefined_beams())
res.extend(_load_user_model_list(model_name))
if model_name == 'electronBeam':
for beam in res:
srw_common.process_beam_parameters(beam)
return PKDict(modelList=res)
def stateless_compute_compute_PGM_value(data):
return _compute_PGM_value(data.optical_element)
def stateless_compute_compute_crl_characteristics(data):
return compute_crl_focus(_compute_material_characteristics(
data.optical_element,
data.photon_energy,
))
def stateless_compute_compute_crystal_init(data):
return _compute_crystal_init(data.optical_element)
def stateless_compute_compute_crystal_orientation(data):
return _compute_crystal_orientation(data.optical_element)
def stateless_compute_compute_delta_atten_characteristics(data):
return _compute_material_characteristics(
data.optical_element,
data.photon_energy,
)
def stateless_compute_compute_dual_characteristics(data):
return _compute_material_characteristics(
_compute_material_characteristics(
data.optical_element,
data.photon_energy,
prefix=data.prefix1,
),
data.photon_energy,
prefix=data.prefix2,
)
def stateless_compute_compute_grazing_orientation(data):
return _compute_grazing_orientation(data.optical_element)
def stateless_compute_process_beam_parameters(data):
data.ebeam = srw_common.process_beam_parameters(data.ebeam)
data.ebeam.drift = calculate_beam_drift(
data.ebeam_position,
data.source_type,
data.undulator_type,
data.undulator_length,
data.undulator_period,
)
return data.ebeam
def stateless_compute_process_undulator_definition(data):
return process_undulator_definition(data)
def validate_file(file_type, path):
"""Ensure the data file contains parseable rows data"""
import srwl_uti_smp
if not _SIM_DATA.srw_is_valid_file_type(file_type, path):
return 'invalid file type: {}'.format(path.ext)
if file_type == 'mirror':
# mirror file
try:
count = 0
with open(str(path)) as f:
for line in f.readlines():
parts = line.split("\t")
if len(parts) > 0:
float(parts[0])
if len(parts) > 1:
float(parts[1])
count += 1
if count == 0:
return 'no data rows found in file'
except ValueError as e:
return 'invalid file format: {}'.format(e)
elif file_type == 'undulatorTable':
# undulator magnetic data file
try:
_validate_safe_zip(str(path), '.', validate_magnet_data_file)
except AssertionError as err:
return err.message
elif file_type == 'sample':
srwl_uti_smp.SRWLUtiSmp(
file_path=str(path),
# srw processes the image so we save to tmp location
is_save_images=True,
prefix=path.purebasename,
)
if not _SIM_DATA.srw_is_valid_file(file_type, path):
return 'Column count is incorrect for file type: {}'.format(file_type)
return None
def validate_magnet_data_file(zf):
"""Validate a zip file containing tabulated magentic data
Performs the following checks:
- Only .txt and .dat files are allowed
- Zip file must contain one and only one .txt file to use as an index
- The index file must list the data files with the name in the 4th column
- Zip file must contain only the index file and the data files it lists
Args:
zf (zipfile.ZipFile): the zip file to examine
Returns:
True if all conditions are met, False otherwise
A string for debugging purposes
"""
import collections
def index_file_name(zf):
# Apparently pkio.has_file_extension will return true for any extension if fed a directory path ('some_dir/')
text_files = [f for f in zf.namelist() if not f.endswith('/') and pkio.has_file_extension(f, 'txt')]
if len(text_files) != 1:
return None
return text_files[0]
# Check against whitelist
for f in zf.namelist():
# allow directories
if f.endswith('/'):
continue
if not template_common.file_extension_ok(f, white_list=['txt', 'dat']):
return False, 'File {} has forbidden type'.format(f)
file_name_column = 3
# Assure unique index exists
if index_file_name(zf) is None:
return False, 'Zip file has no unique index'
# Validate correct number of columns (plus other format validations if needed)
index_file = zf.open(index_file_name(zf))
lines = index_file.readlines()
file_names_in_index = []
for line in lines:
cols = line.split()
if len(cols) <= file_name_column:
return False, 'Index file {} has bad format'.format(index_file_name())
file_names_in_index.append(cols[file_name_column].decode())
# Compare index and zip contents
# Does not include the index itself, nor any directories
# also extract the filename since the index does not include path info
file_names_in_zip = list(map(lambda path: os.path.basename(path), [f for f in zf.namelist() if not f.endswith('/') and f != index_file_name(zf)]))
files_match = collections.Counter(file_names_in_index) == collections.Counter(file_names_in_zip)
return files_match, '' if files_match else 'Files in index {} do not match files in zip {}'.format(file_names_in_index, file_names_in_zip)
def write_parameters(data, run_dir, is_parallel):
"""Write the parameters file
Args:
data (dict): input
run_dir (py.path): where to write
is_parallel (bool): run in background?
"""
pkio.write_text(
run_dir.join(template_common.PARAMETERS_PYTHON_FILE),
_trim(_generate_parameters_file(data, run_dir=run_dir))
)
if is_parallel:
return template_common.get_exec_parameters_cmd(_SIM_DATA.is_run_mpi(data))
return None
def _beamline_animation_percent_complete(run_dir, res):
res.outputInfo = [
PKDict(
modelKey='beamlineAnimation0',
filename=_wavefront_pickle_filename(0),
id=0,
),
]
dm = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME)).models
for item in dm.beamline:
if 'isDisabled' in item and item.isDisabled:
continue
if item.type == 'watch':
res.outputInfo.append(PKDict(
modelKey=f'beamlineAnimation{item.id}',
filename=_wavefront_pickle_filename(item.id),
id=item.id,
))
count = 0
for info in res.outputInfo:
try:
with open(info.filename, 'rb') as f:
#TODO(pjm): instead look at last byte == pickle.STOP, see template_common.read_last_csv_line()
wfr = pickle.load(f)
count += 1
except Exception as e:
break
res.frameCount = count
res.percentComplete = 100 * count / len(res.outputInfo)
return res
def _compute_material_characteristics(model, photon_energy, prefix=''):
import bnlcrl.pkcli.simulate
fields_with_prefix = PKDict(
material='material',
refractiveIndex='refractiveIndex',
attenuationLength='attenuationLength',
)
if prefix:
for k in fields_with_prefix.keys():
fields_with_prefix[k] = '{}{}{}'.format(
prefix,
fields_with_prefix[k][0].upper(),
fields_with_prefix[k][1:],
)
if model[fields_with_prefix.material] == 'User-defined':
return model
# Index of refraction:
kwargs = PKDict(
energy=photon_energy,
)
if model.method == 'server':
kwargs.precise = True
kwargs.formula = model[fields_with_prefix.material]
elif model.method == 'file':
kwargs.precise = True
kwargs.data_file = '{}_delta.dat'.format(model[fields_with_prefix.material])
else:
kwargs.calc_delta = True
kwargs.formula = model[fields_with_prefix.material]
delta = bnlcrl.pkcli.simulate.find_delta(**kwargs)
model[fields_with_prefix.refractiveIndex] = delta['characteristic_value']
# Attenuation length:
kwargs.characteristic = 'atten'
if model.method == 'file':
kwargs.precise = True
kwargs.data_file = '{}_atten.dat'.format(model[fields_with_prefix.material])
if model.method == 'calculation':
# The method 'calculation' in bnlcrl library is not supported yet for attenuation length calculation.
pass
else:
atten = bnlcrl.pkcli.simulate.find_delta(**kwargs)
model[fields_with_prefix.attenuationLength] = atten['characteristic_value']
return model
def _compute_PGM_value(model):
parms_list = ['energyAvg', 'cff', 'grazingAngle']
try:
mirror = srwlib.SRWLOptMirPl(
_size_tang=model.tangentialSize,
_size_sag=model.sagittalSize,
_nvx=model.nvx,
_nvy=model.nvy,
_nvz=model.nvz,
_tvx=model.tvx,
_tvy=model.tvy,
_x=model.horizontalOffset,
_y=model.verticalOffset,
)
# existing data may have photonEnergy as a string
model.energyAvg = float(model.energyAvg)
if model.computeParametersFrom == '1':
opGr = srwlib.SRWLOptG(
_mirSub=mirror,
_m=model.diffractionOrder,
_grDen=model.grooveDensity0,
_grDen1=model.grooveDensity1,
_grDen2=model.grooveDensity2,
_grDen3=model.grooveDensity3,
_grDen4=model.grooveDensity4,
_e_avg=model.energyAvg,
_cff=model.cff,
_ang_graz=0,
_ang_roll=model.rollAngle,
)
grAng, defAng = opGr.cff2ang(_en=model.energyAvg, _cff=model.cff)
model.grazingAngle = grAng * 1000.0
elif model.computeParametersFrom == '2':
opGr = srwlib.SRWLOptG(
_mirSub=mirror,
_m=model.diffractionOrder,
_grDen=model.grooveDensity0,
_grDen1=model.grooveDensity1,
_grDen2=model.grooveDensity2,
_grDen3=model.grooveDensity3,
_grDen4=model.grooveDensity4,
_e_avg=model.energyAvg,
_cff=1.5, # model['cff'],
_ang_graz=model.grazingAngle,
_ang_roll=model.rollAngle,
)
cff, defAng = opGr.ang2cff(_en=model.energyAvg, _ang_graz=model.grazingAngle/1000.0)
model.cff = cff
angroll = model.rollAngle
if abs(angroll) < np.pi/4 or abs(angroll-np.pi) < np.pi/4:
model.orientation = 'y'
else:
model.orientation = 'x'
_compute_grating_orientation(model)
except Exception:
pkdlog('\n{}', traceback.format_exc())
if model.computeParametersFrom == '1': model.grazingAngle = None
elif model.computeParametersFrom == '2': model.cff = None
return model
def _compute_grating_orientation(model):
if not model.grazingAngle:
pkdlog("grazingAngle is missing, return old data")
return model
parms_list = ['nvx', 'nvy', 'nvz', 'tvx', 'tvy', 'outoptvx', 'outoptvy', 'outoptvz', 'outframevx', 'outframevy']
try:
mirror = srwlib.SRWLOptMirPl(
_size_tang=model.tangentialSize,
_size_sag=model.sagittalSize,
_nvx=model.nvx,
_nvy=model.nvy,
_nvz=model.nvz,
_tvx=model.tvx,
_tvy=model.tvy,
_x=model.horizontalOffset,
_y=model.verticalOffset,
)
opGr = srwlib.SRWLOptG(
_mirSub=mirror,
_m=model.diffractionOrder,
_grDen=model.grooveDensity0,
_grDen1=model.grooveDensity1,
_grDen2=model.grooveDensity2,
_grDen3=model.grooveDensity3,
_grDen4=model.grooveDensity4,
_e_avg=model.energyAvg,
_cff=model.cff,
_ang_graz=model.grazingAngle,
_ang_roll=model.rollAngle,
)
pkdc("updating nvz from {} to {} with grazingAngle= {}mrad", model.nvz, opGr.mirSub.nvz, model.grazingAngle)
model.nvx = opGr.mirSub.nvx
model.nvy = opGr.mirSub.nvy
model.nvz = opGr.mirSub.nvz
model.tvx = opGr.mirSub.tvx
model.tvy = opGr.mirSub.tvy
orientDataGr_pp = opGr.get_orient(_e=model.energyAvg)[1]
tGr_pp = orientDataGr_pp[0] # Tangential Vector to Grystal surface
nGr_pp = orientDataGr_pp[2] # Normal Vector to Grystal surface
model.outoptvx = nGr_pp[0]
model.outoptvy = nGr_pp[1]
model.outoptvz = nGr_pp[2]
model.outframevx = tGr_pp[0]
model.outframevy = tGr_pp[1]
except Exception:
pkdlog('\n{}', traceback.format_exc())
for key in parms_list:
model[key] = None
return model
def _compute_crystal_init(model):
import srwl_uti_cryst
parms_list = ['dSpacing', 'psi0r', 'psi0i', 'psiHr', 'psiHi', 'psiHBr', 'psiHBi']
try:
material_raw = model.material # name contains either "(SRW)" or "(X0h)"
material = material_raw.split()[0] # short name for SRW (e.g., Si), long name for X0h (e.g., Silicon)
h = int(model.h)
k = int(model.k)
l = int(model.l)
millerIndices = [h, k, l]
energy = model.energy
if re.search('(X0h)', material_raw):
crystal_parameters = crystal.get_crystal_parameters(material, energy, h, k, l)
dc = crystal_parameters['d']
xr0 = crystal_parameters['xr0']
xi0 = crystal_parameters['xi0']
xrh = crystal_parameters['xrh']
xih = crystal_parameters['xih']
elif re.search('(SRW)', material_raw):
dc = srwl_uti_cryst.srwl_uti_cryst_pl_sp(millerIndices, material)
xr0, xi0, xrh, xih = srwl_uti_cryst.srwl_uti_cryst_pol_f(energy, millerIndices, material)
else:
dc = xr0 = xi0 = xrh = xih = None
model.dSpacing = dc
model.psi0r = xr0
model.psi0i = xi0
model.psiHr = xrh
model.psiHi = xih
model.psiHBr = xrh
model.psiHBi = xih
if model.diffractionAngle == '-1.57079632' or model.diffractionAngle == '1.57079632':
model.orientation = 'x'
else:
model.orientation = 'y'
except Exception:
pkdlog('{https://github.com/ochubar/SRW/blob/master/env/work/srw_python/srwlib.py}: error: {}', material_raw)
for key in parms_list:
model[key] = None
return model
def _compute_crystal_orientation(model):
if not model.dSpacing:
return model
parms_list = ['nvx', 'nvy', 'nvz', 'tvx', 'tvy', 'outoptvx', 'outoptvy', 'outoptvz', 'outframevx', 'outframevy']
try:
opCr = srwlib.SRWLOptCryst(
_d_sp=model.dSpacing,
_psi0r=model.psi0r,
_psi0i=model.psi0i,
_psi_hr=model.psiHr,
_psi_hi=model.psiHi,
_psi_hbr=model.psiHBr,
_psi_hbi=model.psiHBi,
_tc=model.crystalThickness,
_uc=float(model.useCase),
_ang_as=model.asymmetryAngle,
_e_avg=model.energy,
_ang_roll=float(model.diffractionAngle),
)
model.nvx = opCr.nvx
model.nvy = opCr.nvy
model.nvz = opCr.nvz
model.tvx = opCr.tvx
model.tvy = opCr.tvy
orientDataCr_pp = opCr.get_orient(_e=model.energy)[1]
tCr_pp = orientDataCr_pp[0] # Tangential Vector to Crystal surface
nCr_pp = orientDataCr_pp[2] # Normal Vector to Crystal surface
model.outoptvx = nCr_pp[0]
model.outoptvy = nCr_pp[1]
model.outoptvz = nCr_pp[2]
model.outframevx = tCr_pp[0]
model.outframevy = tCr_pp[1]
_SIM_DATA.srw_compute_crystal_grazing_angle(model)
except Exception:
pkdlog('\n{}', traceback.format_exc())
for key in parms_list:
model[key] = None
return model
def _compute_grazing_orientation(model):
def preserve_sign(item, field, new_value):
old_value = item[field] if field in item else 0
was_negative = float(old_value) < 0
item[field] = float(new_value)
if (was_negative and item[field] > 0) or item[field] < 0:
item[field] = - item[field]
grazing_angle = float(model.grazingAngle) / 1000.0
# z is always negative
model.normalVectorZ = - abs(math.sin(grazing_angle))
if model.autocomputeVectors == 'horizontal':
preserve_sign(model, 'normalVectorX', math.cos(grazing_angle))
preserve_sign(model, 'tangentialVectorX', math.sin(grazing_angle))
model.normalVectorY = 0
model.tangentialVectorY = 0
elif model.autocomputeVectors == 'vertical':
preserve_sign(model, 'normalVectorY', math.cos(grazing_angle))
preserve_sign(model, 'tangentialVectorY', math.sin(grazing_angle))
model.normalVectorX = 0
model.tangentialVectorX = 0
return model
def _create_user_model(data, model_name):
model = data.models[model_name]
if model_name == 'tabulatedUndulator':
model = model.copy()
model.undulator = data.models.undulator
return model
def _delete_user_models(electron_beam, tabulated_undulator):
"""Remove the beam and undulator user model list files"""
for model_name in _USER_MODEL_LIST_FILENAME.keys():
model = electron_beam if model_name == 'electronBeam' else tabulated_undulator
if not model or 'id' not in model:
continue
user_model_list = _load_user_model_list(model_name)
for i,m in enumerate(user_model_list):
if m.id == model.id:
del user_model_list[i]
_save_user_model_list(model_name, user_model_list)
break
return PKDict()
def _enum_text(name, model, field):
if field in model:
return template_common.enum_text(SCHEMA, name, model[field])
return ''
def _extend_plot(ar2d, x_range, y_range, horizontalStart, horizontalEnd, verticalStart, verticalEnd):
x_step = (x_range[1] - x_range[0]) / x_range[2]
y_step = (y_range[1] - y_range[0]) / y_range[2]
if horizontalStart < x_range[0]:
b = np.zeros((np.shape(ar2d)[0], int((x_range[0] - horizontalStart) / x_step)))
ar2d = np.hstack((b, ar2d))
x_range[0] = horizontalStart
if horizontalEnd > x_range[1]:
b = np.zeros((np.shape(ar2d)[0], int((horizontalEnd - x_range[1]) / x_step)))
ar2d = np.hstack((ar2d, b))
x_range[1] = horizontalEnd
if verticalStart < y_range[0]:
b = np.zeros((int((y_range[0] - verticalStart) / y_step), np.shape(ar2d)[1]))
ar2d = np.vstack((ar2d, b))
y_range[0] = verticalStart
if verticalEnd > y_range[1]:
b = np.zeros((int((verticalEnd - y_range[1]) / y_step), np.shape(ar2d)[1]))
ar2d = np.vstack((b, ar2d))
y_range[1] = verticalEnd
y_range[2], x_range[2] = np.shape(ar2d)
return (ar2d, x_range, y_range)
def _extract_beamline_orientation(filename):
cols = np.array(uti_io.read_ascii_data_cols(filename, '\t', _i_col_start=1, _n_line_skip=1))
rows = list(reversed(np.rot90(cols).tolist()))
rows = np.reshape(rows, (len(rows), 4, 3))
res = []
for row in rows:
# the vtk client renders x axis flipped, so update x position and rotation
p = row[0].tolist()
p[0] = -p[0]
orient = row[1:].tolist()
orient[1][0] = -orient[1][0]
orient[1][1] = -orient[1][1]
orient[1][2] = -orient[1][2]
res.append(PKDict(
point=p,
orient=orient,
))
return PKDict(
x_range=[],
elements=res,
)
def _extract_brilliance_report(model, filename):
data, _, _, _, _ = uti_plot_com.file_load(filename, multicolumn_data=True)
label = _enum_text('BrillianceReportType', model, 'reportType')
if model.reportType in ('3', '4'):
label += ' [rad]'
elif model.reportType in ('5', '6'):
label += ' [m]'
x_points = []
points = []
scale_adjustment = 1000.0
if 'brightnessComponent' in model and model.brightnessComponent == 'spectral-detuning':
scale_adjustment = 1.0
for f in data:
m = re.search(r'^f(\d+)', f)
if m:
x_points.append((np.array(data[f]['data']) * scale_adjustment).tolist())
points.append(data['e{}'.format(m.group(1))]['data'])
title = _enum_text('BrightnessComponent', model, 'brightnessComponent')
if model.brightnessComponent == 'k-tuning':
if model.initialHarmonic == model.finalHarmonic:
title += ', Harmonic {}'.format(model.initialHarmonic)
else:
title += ', Harmonic {} - {}'.format(model.initialHarmonic, model.finalHarmonic)
else:
title += ', Harmonic {}'.format(model.harmonic)
return PKDict(
title=title,
y_label=label,
x_label='Photon Energy [eV]',
x_range=[np.amin(x_points), np.amax(x_points)],
y_range=[np.amin(points), np.amax(points)],
x_points=x_points,
points=points,
)
def _extract_trajectory_report(model, filename):
data, _, _, _, _ = uti_plot_com.file_load(filename, multicolumn_data=True)
available_axes = PKDict()
for s in SCHEMA.enum.TrajectoryPlotAxis:
available_axes[s[0]] = s[1]
x_points = data[model.plotAxisX]['data']
plots = []
y_range = []
for f in ('plotAxisY', 'plotAxisY2'):
if model[f] != 'None':
points = data[model[f]]['data']
if y_range:
y_range = [min(y_range[0], min(points)), max(y_range[1], max(points))]
else:
y_range = [min(points), max(points)]
plots.append(PKDict(
points=points,
label=available_axes[model[f]],
#TODO(pjm): refactor with template_common.compute_plot_color_and_range()
color='#ff7f0e' if plots else '#1f77b4',
))
return PKDict(
title='Electron Trajectory',
x_range=[min(x_points), max(x_points)],
x_points=x_points,
y_label='[{}]'.format(data[model.plotAxisY]['units']),
x_label=available_axes[model.plotAxisX] + ' [' + data[model.plotAxisX]['units'] + ']',
y_range=y_range,
plots=plots,
)
def _fix_file_header(filename):
# fixes file header for coherenceXAnimation and coherenceYAnimation reports
rows = []
pkdc('fix header filename: {}', filename)
with pkio.open_text(filename) as f:
for line in f:
rows.append(line)
if len(rows) == 11:
pkdc('before header changed rows4: {}',rows[4])
pkdc('before header changed rows5: {}',rows[5])
pkdc('before header changed rows6: {}',rows[6])
pkdc('before header changed rows7: {}',rows[7])
pkdc('before header changed rows8: {}',rows[8])
pkdc('before header changed rows9: {}',rows[9])
#if rows[4] == rows[7]:
if rows[6].split()[0] == rows[9].split()[0] and rows[6].split()[0] != 1:
# already fixed up
return
col4 = rows[4].split()
col5 = rows[5].split()
col6 = rows[6].split()
col7 = rows[7].split()
col8 = rows[8].split()
col9 = rows[9].split()
#if re.search(r'^\#0 ', rows[4]):
if re.search(r'^\#1 ', rows[6]):
col4[0] = col7[0]
rows[4] = ' '.join(col4)+'\n'
col5[0] = col8[0]
rows[5] = ' '.join(col5)+'\n'
col6[0] = col9[0]
rows[6] = ' '.join(col6)+'\n'
else:
col7[0] = col4[0]
rows[7] = ' '.join(col7)+'\n'
col8[0] = col5[0]
rows[8] = ' '.join(col8)+'\n'
col9[0] = col6[0]
rows[9] = ' '.join(col9)+'\n'
Vmin = float(rows[7].split()[0][1:])
Vmax = float(rows[8].split()[0][1:])
rows[7] = '#'+str((Vmin-Vmax)/2)+' '+' '.join(rows[7].split()[1:])+'\n'
rows[8] = '#'+str((Vmax-Vmin)/2)+' '+' '.join(rows[8].split()[1:])+'\n'
pkdc('after header changed rows4:{}',rows[4])
pkdc('after header changed rows5:{}',rows[5])
pkdc('after header changed rows6:{}',rows[6])
pkdc('after header changed rows7:{}',rows[7])
pkdc('after header changed rows8:{}',rows[8])
pkdc('after header changed rows9:{}',rows[9])
pkio.write_text(filename, ''.join(rows))
def _flux_label(model):
if 'fluxType' not in model:
return ''
return 'Flux' if int(model.fluxType) == 1 else 'Intensity'
def _flux_units(model):
if 'fluxType' not in model:
return ''
return 'ph/s/.1%bw' if int(model.fluxType) == 1 else 'ph/s/.1%bw/mm^2'
def _generate_beamline_optics(report, data):
res = PKDict(
names=[],
last_id=None,
watches=PKDict()
)
models = data.models
if len(models.beamline) == 0 \
or not (_SIM_DATA.srw_is_beamline_report(report) or report == 'beamlineAnimation'):
return '', '', res
if _SIM_DATA.is_watchpoint(report):
res.last_id = _SIM_DATA.watchpoint_id(report)
if report == 'multiElectronAnimation':
res.last_id = models.multiElectronAnimation.watchpointId
has_beamline_elements = len(models.beamline) > 0
if has_beamline_elements and not res.last_id:
res.last_id = models.beamline[-1].id
items = []
prev = None
propagation = models.propagation
max_name_size = 0
for item in models.beamline:
is_disabled = 'isDisabled' in item and item.isDisabled
name = _safe_beamline_item_name(item.title, res.names)
max_name_size = max(max_name_size, len(name))
if prev:
size = item.position - prev.position
if size != 0:
# add a drift
drift_name = _safe_beamline_item_name('{}_{}'.format(prev.name, name), res.names)
max_name_size = max(max_name_size, len(drift_name))
res.names.append(drift_name)
items.append(PKDict(
name=drift_name,
type='drift',
position=prev.position,
propagation=prev.drift_propagation,
length=size,
))
pp = propagation[str(item.id)]
item.propagation = pp[0]
item.drift_propagation = pp[1]
item.name = name
if not is_disabled:
if item.type == 'watch' and not items:
# first item is a watch, insert a 0 length drift in front
items.append(PKDict(
name='zero_drift',
type='drift',
position=item.position,
propagation=item.propagation,
length=0,
))
res.names.append(items[-1].name)
if 'heightProfileFile' in item:
item.heightProfileDimension = _height_profile_dimension(item, data)
items.append(item)
res.names.append(name)
if item.type == 'watch':
res.watches[name] = item.id
if int(res.last_id) == int(item.id):
break
prev = item
args = PKDict(
report=report,
items=items,
names=res.names,
postPropagation=models.postPropagation,
maxNameSize=max_name_size,
nameMap=PKDict(
apertureShape='ap_shape',
asymmetryAngle='ang_as',
attenuationLength='atten_len',
complementaryAttenuationLength='atLen2',
complementaryRefractiveIndex='delta2',
coreAttenuationLength='atten_len_core',
coreDiameter='diam_core',
coreRefractiveIndex='delta_core',
crystalThickness='tc',
dSpacing='d_sp',
diffractionOrder='m',
externalAttenuationLength='atten_len_ext',
externalRefractiveIndex='delta_ext',
energyAvg='e_avg',
firstFocusLength='p',
focalLength='q',
focalPlane='foc_plane',
grazingAngle='ang',
gridShape='grid_sh',
grooveDensity0='grDen',
grooveDensity1='grDen1',
grooveDensity2='grDen2',
grooveDensity3='grDen3',
grooveDensity4='grDen4',
heightAmplification='amp_coef',
heightProfileFile='hfn',
horizontalApertureSize='apert_h',
horizontalCenterCoordinate='xc',
horizontalCenterPosition='xc',
horizontalFocalLength='Fx',
horizontalGridDimension='grid_dx',
horizontalGridPitch='pitch_x',
horizontalGridsNumber='grid_nx',
horizontalMaskCoordinate='mask_x0',
horizontalOffset='x',
horizontalPixelsNumber='mask_Nx',
horizontalSamplingInterval='hx',
horizontalSize='Dx',
horizontalTransverseSize='size_x',
imageFile='file_path',
length='L',
mainAttenuationLength='atLen1',
mainRefractiveIndex='delta1',
maskThickness='thick',
normalVectorX='nvx',
normalVectorY='nvy',
normalVectorZ='nvz',
numberOfLenses='n',
numberOfZones='nZones',
orientation='dim',
outerRadius='rn',
radius='r',
refractiveIndex='delta',
sagittalRadius='rs',
sagittalSize='size_sag',
tangentialRadius='rt',
tangentialSize='size_tang',
tangentialVectorX='tvx',
tangentialVectorY='tvy',
thickness='thick',
tipRadius='r_min',
tipWallThickness='wall_thick',
transmissionImage='extTransm',
useCase='uc',
verticalApertureSize='apert_v',
verticalCenterCoordinate='yc',
verticalCenterPosition='yc',
verticalFocalLength='Fy',
verticalGridDimension='grid_dy',
verticalGridPitch='pitch_y',
verticalGridsNumber='grid_ny',
verticalMaskCoordinate='mask_y0',
verticalOffset='y',
verticalPixelsNumber='mask_Ny',
verticalSamplingInterval='hy',
verticalSize='Dy',
verticalTransverseSize='size_y',
),
)
optics = template_common.render_jinja(SIM_TYPE, args, 'beamline_optics.py')
prop = template_common.render_jinja(SIM_TYPE, args, 'beamline_parameters.py')
return optics, prop, res
def _generate_parameters_file(data, plot_reports=False, run_dir=None):
report = data.report
is_for_rsopt = _is_for_rsopt(report)
dm = data.models
# do this before validation or arrays get turned into strings
if is_for_rsopt:
rsopt_ctx = _rsopt_jinja_context(dm.exportRsOpt)
_validate_data(data, SCHEMA)
_update_model_fields(dm)
_update_models_for_report(report, dm)
res, v = template_common.generate_parameters_file(
data,
is_run_mpi=_SIM_DATA.is_run_mpi(data),
)
v.rs_type = dm.simulation.sourceType
if v.rs_type == 't' and dm.tabulatedUndulator.undulatorType == 'u_i':
v.rs_type = 'u'
if is_for_rsopt:
v.update(rsopt_ctx)
# rsopt uses this as a lookup param so want it in one place
v.ws_fni_desc = 'file name for saving propagated single-e intensity distribution vs horizontal and vertical position'
if report == 'mirrorReport':
v.mirrorOutputFilename = _OUTPUT_FOR_MODEL[report].filename
return template_common.render_jinja(SIM_TYPE, v, 'mirror.py')
if report == 'brillianceReport':
v.brillianceOutputFilename = _OUTPUT_FOR_MODEL[report].filename
return template_common.render_jinja(SIM_TYPE, v, 'brilliance.py')
if report == 'backgroundImport':
v.tmp_dir = str(run_dir)
v.python_file = run_dir.join('user_python.py')
pkio.write_text(v.python_file, dm.backgroundImport.python)
return template_common.render_jinja(SIM_TYPE, v, 'import.py')
_set_parameters(v, data, plot_reports, run_dir)
v.in_server = run_dir is not None
return _trim(res + template_common.render_jinja(SIM_TYPE, v))
def _generate_srw_main(data, plot_reports, beamline_info):
report = data.report
is_for_rsopt = _is_for_rsopt(report)
source_type = data.models.simulation.sourceType
run_all = report == _SIM_DATA.SRW_RUN_ALL_MODEL or is_for_rsopt
vp_var = 'vp' if is_for_rsopt else 'varParam'
content = [
f'v = srwl_bl.srwl_uti_parse_options(srwl_bl.srwl_uti_ext_options({vp_var}), use_sys_argv={plot_reports})',
]
if (plot_reports or is_for_rsopt) and _SIM_DATA.srw_uses_tabulated_zipfile(data):
content.append('setup_magnetic_measurement_files("{}", v)'.format(data.models.tabulatedUndulator.magneticFile))
if report == 'beamlineAnimation':
content.append("v.si_fn = ''")
content.append("v.ws_fni = ''")
if len(beamline_info.watches):
content.append('v.ws = True')
else:
content.append('v.si = True')
content.append('op = None')
content.append("v.ws_fne = '{}'".format(_wavefront_pickle_filename(0)))
prev_wavefront = None
names = []
for n in beamline_info.names:
names.append(n)
if n in beamline_info.watches:
is_last_watch = n == beamline_info.names[-1]
content.append("names = ['" + "','".join(names) + "']")
names = []
if prev_wavefront:
content.append("v.ws_fnei = '{}'".format(prev_wavefront))
prev_wavefront = _wavefront_pickle_filename(beamline_info.watches[n])
content.append("v.ws_fnep = '{}'".format(prev_wavefront))
content.append('op = set_optics(v, names, {})'.format(is_last_watch))
if not is_last_watch:
content.append('srwl_bl.SRWLBeamline(_name=v.name).calc_all(v, op)')
elif run_all or (_SIM_DATA.srw_is_beamline_report(report) and len(data.models.beamline)):
content.append('names = [{}]'.format(
','.join(["'{}'".format(name) for name in beamline_info.names]),
))
content.append('op = set_optics(v, names, {})'.format(
beamline_info.last_id and int(beamline_info.last_id) == int(data.models.beamline[-1].id)))
content.append('v.ws = True')
if plot_reports:
content.append("v.ws_pl = 'xy'")
else:
content.append('op = None')
if is_for_rsopt:
content.extend([
'v.ss = False',
'v.sm = False',
'v.pw = False',
'v.si = False',
'v.tr = False'
])
else:
if (run_all and source_type != 'g') or report == 'intensityReport':
content.append('v.ss = True')
if plot_reports:
content.append("v.ss_pl = 'e'")
if (run_all and source_type not in ('g', 'm')) or report in 'fluxReport':
content.append('v.sm = True')
if plot_reports:
content.append("v.sm_pl = 'e'")
if (run_all and source_type != 'g') or report == 'powerDensityReport':
content.append('v.pw = True')
if plot_reports:
content.append("v.pw_pl = 'xy'")
if run_all or report in ['initialIntensityReport', 'sourceIntensityReport']:
content.append('v.si = True')
if plot_reports:
content.append("v.si_pl = 'xy'")
if (run_all and source_type != 'g') or report == 'trajectoryReport':
content.append('v.tr = True')
if plot_reports:
content.append("v.tr_pl = 'xz'")
content.append('srwl_bl.SRWLBeamline(_name=v.name).calc_all(v, op)')
return '\n'.join([f' {x}' for x in content] + [''] + ([] if is_for_rsopt \
else ['main()', '']))
def _get_first_element_position(report, data):
dm = data.models
if report in dm and 'distanceFromSource' in dm[report]:
return dm[report].distanceFromSource
if dm.beamline:
return dm.beamline[0].position
if 'distanceFromSource' in dm.simulation:
return dm.simulation.distanceFromSource
return template_common.DEFAULT_INTENSITY_DISTANCE
def _height_profile_dimension(item, data):
"""Find the dimension of the provided height profile .dat file.
1D files have 2 columns, 2D - 8 columns.
"""
dimension = 0
if item.heightProfileFile and item.heightProfileFile != 'None':
with _SIM_DATA.lib_file_abspath(item.heightProfileFile, data=data).open('r') as f:
header = f.readline().strip().split()
dimension = 1 if len(header) == 2 else 2
return dimension
def _intensity_units(sim_in):
if 'models' in sim_in and _SIM_DATA.srw_is_gaussian_source(sim_in.models.simulation):
if 'report' in sim_in \
and sim_in.report in ('intensityReport', 'sourceIntensityReport'):
i = sim_in.models[sim_in.report].fieldUnits
else:
i = sim_in.models.simulation.fieldUnits
return SCHEMA.enum.FieldUnits[int(i)][1]
return 'ph/s/.1%bw/mm^2'
def _is_for_rsopt(report):
return report == 'rsoptExport'
def _load_user_model_list(model_name):
f = _SIM_DATA.lib_file_write_path(_USER_MODEL_LIST_FILENAME[model_name])
try:
if f.exists():
return simulation_db.read_json(f)
except Exception:
pkdlog('user list read failed, resetting contents: {}', f)
_save_user_model_list(model_name, [])
return _load_user_model_list(model_name)
def _parse_srw_log(run_dir):
res = ''
p = run_dir.join(template_common.RUN_LOG)
if not p.exists():
return res
with pkio.open_text(p) as f:
for line in f:
m = re.search(r'Error: (.*)', line)
if m:
res += m.group(1) + '\n'
if res:
return res
return 'An unknown error occurred'
def _process_image(data, tmp_dir):
"""Process image and return
Args:
data (dict): description of simulation
Returns:
py.path.local: file to return
"""
# This should just be a basename, but this ensures it.
import srwl_uti_smp
path = str(_SIM_DATA.lib_file_abspath(sirepo.util.secure_filename(data.baseImage)))
m = data.model
with pkio.save_chdir(tmp_dir):
if m.sampleSource == 'file':
s = srwl_uti_smp.SRWLUtiSmp(
file_path=path,
area=None if not int(m.cropArea) else (m.areaXStart, m.areaXEnd, m.areaYStart, m.areaYEnd),
rotate_angle=float(m.rotateAngle),
rotate_reshape=int(m.rotateReshape),
cutoff_background_noise=float(m.cutoffBackgroundNoise),
background_color=int(m.backgroundColor),
invert=int(m.invert),
tile=None if not int(m.tileImage) else (m.tileRows, m.tileColumns),
shift_x=m.shiftX,
shift_y=m.shiftY,
is_save_images=True,
prefix=str(tmp_dir),
output_image_format=m.outputImageFormat,
)
return pkio.py_path(s.processed_image_name)
assert m.sampleSource == 'randomDisk'
s = srwl_uti_smp.srwl_opt_setup_smp_rnd_obj2d(
_thickness=0,
_delta=0,
_atten_len=0,
_dens=m.dens,
_rx=m.rx,
_ry=m.ry,
_obj_type=int(m.obj_type),
_r_min_bw_obj=m.r_min_bw_obj,
_obj_size_min=m.obj_size_min,
_obj_size_max=m.obj_size_max,
_size_dist=int(m.size_dist),
_ang_min=m.ang_min,
_ang_max=m.ang_max,
_ang_dist=int(m.ang_dist),
_rand_alg=int(m.rand_alg),
_obj_par1=m.obj_size_ratio if m.obj_type in ('1', '2', '3') \
else m.poly_sides if m.obj_type == '4' \
else m.rand_shapes,
_obj_par2=m.rand_obj_size == '1' if m.obj_type in ('1', '2', '3') \
else m.rand_poly_side == '1' if m.obj_type == '4' \
else None,
_ret='img',
)
filename = 'sample_processed.{}'.format(m.outputImageFormat)
s.save(filename)
return pkio.py_path(filename)
def _process_rsopt_elements(els):
x = [e for e in els if e.enabled and e.enabled != '0']
for e in x:
for p in _RSOPT_PARAMS:
if p in e:
e[p].offsets = sirepo.util.split_comma_delimited_string(e[f'{p}Offsets'], float)
return x
def _remap_3d(info, allrange, out, report):
x_range = [allrange[3], allrange[4], allrange[5]]
y_range = [allrange[6], allrange[7], allrange[8]]
ar2d = info.points
totLen = int(x_range[2] * y_range[2])
n = len(ar2d) if totLen > len(ar2d) else totLen
ar2d = np.reshape(ar2d[0:n], (int(y_range[2]), int(x_range[2])))
if report.get('usePlotRange', '0') == '1':
ar2d, x_range, y_range = _update_report_range(report, ar2d, x_range, y_range)
if report.get('useIntensityLimits', '0') == '1':
ar2d[ar2d < report.minIntensityLimit] = report.minIntensityLimit
ar2d[ar2d > report.maxIntensityLimit] = report.maxIntensityLimit
ar2d, x_range, y_range = _resize_report(report, ar2d, x_range, y_range)
if report.get('rotateAngle', 0):
ar2d, x_range, y_range = _rotate_report(report, ar2d, x_range, y_range, info)
if out.units[2]:
out.labels[2] = u'{} [{}]'.format(out.labels[2], out.units[2])
if report.get('useIntensityLimits', '0') == '1':
z_range = [report.minIntensityLimit, report.maxIntensityLimit]
else:
z_range = [np.min(ar2d), np.max(ar2d)]
return PKDict(
x_range=x_range,
y_range=y_range,
x_label=info.x_label,
y_label=info.y_label,
z_label=_superscript(out.labels[2]),
title=info.title,
subtitle=_superscript_2(info.subtitle),
z_matrix=ar2d.tolist(),
z_range=z_range,
summaryData=info.summaryData,
)
def _resize_report(report, ar2d, x_range, y_range):
width_pixels = int(report.intensityPlotsWidth)
if not width_pixels:
# upper limit is browser's max html canvas size
width_pixels = _CANVAS_MAX_SIZE
job.init()
# roughly 20x size increase for json
if ar2d.size * _JSON_MESSAGE_EXPANSION > job.cfg.max_message_bytes:
max_width = int(math.sqrt(job.cfg.max_message_bytes / _JSON_MESSAGE_EXPANSION))
if max_width < width_pixels:
pkdc(
'auto scaling dimensions to fit message size. size: {}, max_width: {}',
ar2d.size,
max_width,
)
width_pixels = max_width
# rescale width and height to maximum of width_pixels
if width_pixels and (width_pixels < x_range[2] or width_pixels < y_range[2]):
from scipy import ndimage
x_resize = 1.0
y_resize = 1.0
if width_pixels < x_range[2]:
x_resize = float(width_pixels) / float(x_range[2])
if width_pixels < y_range[2]:
y_resize = float(width_pixels) / float(y_range[2])
pkdc('Size before: {} Dimensions: {}, Resize: [{}, {}]', ar2d.size, ar2d.shape, y_resize, x_resize)
ar2d = ndimage.zoom(ar2d, [y_resize, x_resize], order=1)
pkdc('Size after : {} Dimensions: {}', ar2d.size, ar2d.shape)
x_range[2] = ar2d.shape[1]
y_range[2] = ar2d.shape[0]
return ar2d, x_range, y_range
def _rotate_report(report, ar2d, x_range, y_range, info):
from scipy import ndimage
rotate_angle = report.rotateAngle
rotate_reshape = report.get('rotateReshape', '0') == '1'
pkdc('Size before: {} Dimensions: {}', ar2d.size, ar2d.shape)
shape_before = list(ar2d.shape)
ar2d = ndimage.rotate(ar2d, float(rotate_angle), reshape = rotate_reshape, mode='constant', order = 3)
pkdc('Size after rotate: {} Dimensions: {}', ar2d.size, ar2d.shape)
shape_rotate = list(ar2d.shape)
pkdc('x_range and y_range before rotate is [{},{}] and [{},{}]', x_range[0], x_range[1], y_range[0], y_range[1])
x_range[0] = shape_rotate[0]/shape_before[0]*x_range[0]
x_range[1] = shape_rotate[0]/shape_before[0]*x_range[1]
y_range[0] = shape_rotate[1]/shape_before[1]*y_range[0]
y_range[1] = shape_rotate[1]/shape_before[1]*y_range[1]
pkdc('x_range and y_range after rotate is [{},{}] and [{},{}]', x_range[0], x_range[1], y_range[0], y_range[1])
x_range[2] = ar2d.shape[1]
y_range[2] = ar2d.shape[0]
if info.title != 'Power Density':
info.subtitle = info.subtitle + ' Image Rotate {}^0'.format(rotate_angle)
return ar2d, x_range, y_range
def _rsopt_jinja_context(model):
import multiprocessing
e = _process_rsopt_elements(model.elements)
return PKDict(
forRSOpt=True,
numCores=int(model.numCores),
numWorkers=max(1, multiprocessing.cpu_count() - 1),
numSamples=int(model.numSamples),
rsOptElements=e,
rsOptParams=_RSOPT_PARAMS,
rsOptParamsNoRot=_RSOPT_PARAMS_NO_ROTATION,
rsOptOutFileName='scan_results',
scanType=model.scanType,
totalSamples=model.totalSamples,
)
def _rsopt_main():
return [
'import sys',
'if len(sys.argv[1:]) > 0:',
' set_rsopt_params(*sys.argv[1:])',
' del sys.argv[1:]',
'else:',
' exit(0)'
]
def _safe_beamline_item_name(name, names):
name = re.sub(r'\W+', '_', name)
name = re.sub(r'_+', '_', name)
name = re.sub(r'^_|_$', '', name)
name = re.sub(r'^_+', '', name)
name = re.sub(r'_+$', '', name)
name = re.sub(r'^op_', '', name)
if not name or name == 'fin':
name = 'element'
idx = 2
current = name
while current in names:
current = '{}{}'.format(name, idx)
idx += 1
return current
def _save_user_model_list(model_name, beam_list):
pkdc('saving {} list', model_name)
simulation_db.write_json(
_SIM_DATA.lib_file_write_path(_USER_MODEL_LIST_FILENAME[model_name]),
beam_list,
)
def _set_magnetic_measurement_parameters(run_dir, v):
src_zip = str(run_dir.join(v.tabulatedUndulator_magneticFile)) if run_dir else \
str(_SIM_DATA.lib_file_abspath(v.tabulatedUndulator_magneticFile))
target_dir = str(run_dir.join(_TABULATED_UNDULATOR_DATA_DIR))
# The MagnMeasZip class defined above has convenient properties we can use here
mmz = MagnMeasZip(src_zip)
zindex = _zip_path_for_file(mmz.z, mmz.index_file)
zdata = map(lambda fn: _zip_path_for_file(mmz.z, fn), mmz.dat_files)
# extract only the index file and the data files it lists
mmz.z.extract(zindex, target_dir)
for df in zdata:
mmz.z.extract(df, target_dir)
v.magneticMeasurementsDir = _TABULATED_UNDULATOR_DATA_DIR + '/' + mmz.index_dir
v.magneticMeasurementsIndexFile = mmz.index_file
def _set_parameters(v, data, plot_reports, run_dir):
report = data.report
is_for_rsopt = _is_for_rsopt(report)
dm = data.models
v.beamlineOptics, v.beamlineOpticsParameters, beamline_info = _generate_beamline_optics(report, data)
v.beamlineFirstElementPosition = _get_first_element_position(report, data)
# 1: auto-undulator 2: auto-wiggler
v.energyCalculationMethod = 1 if _SIM_DATA.srw_is_undulator_source(dm.simulation) else 2
v[report] = 1
for k in _OUTPUT_FOR_MODEL:
v['{}Filename'.format(k)] = _OUTPUT_FOR_MODEL[k].filename
v.setupMagneticMeasurementFiles = (plot_reports or is_for_rsopt) and _SIM_DATA.srw_uses_tabulated_zipfile(data)
v.srwMain = _generate_srw_main(data, plot_reports, beamline_info)
if (run_dir or is_for_rsopt) and _SIM_DATA.srw_uses_tabulated_zipfile(data):
_set_magnetic_measurement_parameters(run_dir or '', v)
if _SIM_DATA.srw_is_background_report(report) and 'beamlineAnimation' not in report:
if report in dm and dm[report].get('jobRunMode', '') == 'sbatch':
v.sbatchBackup = '1'
# Number of "iterations" per save is best set to num processes
v.multiElectronNumberOfIterations = sirepo.mpi.cfg.cores
if report == 'multiElectronAnimation':
if dm.multiElectronAnimation.calcCoherence == '1':
v.multiElectronCharacteristic = 41
if dm.multiElectronAnimation.wavefrontSource == 'cmd':
if not dm.multiElectronAnimation.coherentModesFile:
raise AssertionError('No Coherent Modes File selected')
v.coherentModesFile = dm.multiElectronAnimation.coherentModesFile
elif report == 'coherentModesAnimation':
v.multiElectronAnimation = 1
v.multiElectronCharacteristic = 61
v.mpiMasterCount = max(2, int(sirepo.mpi.cfg.cores / 4))
v.multiElectronFileFormat = 'h5'
v.multiElectronAnimationFilename = _OUTPUT_FOR_MODEL[report].basename
def _superscript(val):
return re.sub(r'\^2', u'\u00B2', val)
def _superscript_2(val):
return re.sub(r'\^0', u'\u00B0', val)
def _trim(v):
res = ''
for l in v.split('\n'):
res += l.rstrip() + '\n'
x = res.rstrip('\n') + '\n'
return x
def _unique_name(items, field, template):
#TODO(pjm): this is the same logic as sirepo.js uniqueName()
values = PKDict()
for item in items:
values[item[field]] = True
index = 1
while True:
found_it = False
id = template.replace('{}', str(index))
if id in values:
index += 1
else:
return id
def _update_model_fields(models):
# Ensure method and magnetic field values are valid
st = models.simulation.sourceType
ut = models.tabulatedUndulator.undulatorType
magnetic_field = 1
if st == 'a' \
or _SIM_DATA.srw_is_tabulated_undulator_with_magnetic_file(st, ut):
magnetic_field = 2
models.intensityReport.magneticField = magnetic_field
models.sourceIntensityReport.magneticField = magnetic_field
models.trajectoryReport.magneticField = magnetic_field
models.powerDensityReport.magneticField = magnetic_field
is_ideal_undulator = _SIM_DATA.srw_is_idealized_undulator(st, ut)
if is_ideal_undulator:
models.fluxAnimation.magneticField = magnetic_field
if _SIM_DATA.srw_is_tabulated_undulator_source(models.simulation):
if is_ideal_undulator:
models.tabulatedUndulator.gap = 0.0
if int(models.simulation.samplingMethod) == 2:
models.simulation.sampleFactor = 0
if int(models.sourceIntensityReport.samplingMethod) == 2:
models.sourceIntensityReport.sampleFactor = 0
# und_g and und_ph API units are mm rather than m
models.tabulatedUndulator.gap *= 1000
models.tabulatedUndulator.phase *= 1000
def _update_models_for_report(report, models):
if report == 'fluxAnimation':
models.fluxReport = models[report].copy()
elif _SIM_DATA.is_watchpoint(report) or report == 'sourceIntensityReport':
# render the watchpoint report settings in the initialIntensityReport template slot
models.initialIntensityReport = models[report].copy()
if report == 'sourceIntensityReport':
models.simulation.update(models.sourceIntensityReport)
elif report == 'coherentModesAnimation':
models.simulation.update(models.coherentModesAnimation)
models.multiElectronAnimation.numberOfMacroElectrons = models.coherentModesAnimation.numberOfMacroElectrons
if report == 'multiElectronAnimation' and models.multiElectronAnimation.photonEnergyBandWidth > 0:
models.multiElectronAnimation.photonEnergyIntegration = 1
half_width = float(models.multiElectronAnimation.photonEnergyBandWidth) / 2.0
models.simulation.photonEnergy = float(models.simulation.photonEnergy)
models.simulation.finalPhotonEnergy = models.simulation.photonEnergy + half_width
models.simulation.photonEnergy -= half_width
else:
models.multiElectronAnimation.photonEnergyIntegration = 0
models.simulation.finalPhotonEnergy = -1.0
def _update_report_labels(out, vals):
def _template_text(text):
return text.format(**vals)
for f in ('title', 'subtitle', 'units', 'labels', 'filename'):
if f not in out:
continue
if type(out[f]) == list:
for idx in range(len(out[f])):
out[f][idx] = _template_text(out[f][idx])
else:
out[f] = _template_text(out[f])
def _update_report_range(report, ar2d, x_range, y_range):
horizontalStart = (report.horizontalOffset - report.horizontalSize/2) * 1e-3
horizontalEnd = (report.horizontalOffset + report.horizontalSize/2) * 1e-3
verticalStart = (report.verticalOffset - report.verticalSize/2) * 1e-3
verticalEnd = (report.verticalOffset + report.verticalSize/2) * 1e-3
ar2d, x_range, y_range = _extend_plot(ar2d, x_range, y_range, horizontalStart, horizontalEnd, verticalStart, verticalEnd)
x_left, x_right = np.clip(x_range[:2], horizontalStart, horizontalEnd)
y_left, y_right = np.clip(y_range[:2], verticalStart, verticalEnd)
x = np.linspace(x_range[0], x_range[1], int(x_range[2]))
y = np.linspace(y_range[0], y_range[1], int(y_range[2]))
xsel = ((x >= x_left) & (x <= x_right))
ysel = ((y >= y_left) & (y <= y_right))
ar2d = np.compress(xsel, np.compress(ysel, ar2d, axis=0), axis=1)
return (
ar2d,
[x_left, x_right, np.shape(ar2d)[1]],
[y_left, y_right, np.shape(ar2d)[0]],
)
def _user_model_map(model_list, field):
res = PKDict()
for model in model_list:
res[model[field]] = model
return res
def _validate_data(data, schema):
# ensure enums match, convert ints/floats, apply scaling
template_common.validate_models(data, schema)
for item_id in data.models.propagation:
_validate_propagation(data.models.propagation[item_id][0])
_validate_propagation(data.models.propagation[item_id][1])
_validate_propagation(data.models.postPropagation)
def _validate_propagation(prop):
for i in range(len(prop)):
prop[i] = int(prop[i]) if i in (0, 1, 3, 4) else float(prop[i])
def _validate_safe_zip(zip_file_name, target_dir='.', *args):
"""Determine whether a zip file is safe to extract from
Performs the following checks:
- Each file must end up at or below the target directory
- Files must be 100MB or smaller
- If possible to determine, disallow "non-regular" and executable files
- Existing files cannot be overwritten
Args:
zip_file_name (str): name of the zip file to examine
target_dir (str): name of the directory to extract into (default to current directory)
*args: list of validator functions taking a zip file as argument and returning True or False and a string
Throws:
AssertionError if any test fails, otherwise completes silently
"""
def path_is_sub_path(path, dir_name):
real_dir = os.path.realpath(dir_name)
end_path = os.path.realpath(real_dir + '/' + path)
return end_path.startswith(real_dir)
def file_exists_in_dir(file_name, dir_name):
return os.path.exists(os.path.realpath(dir_name + '/' + file_name))
def file_attrs_ok(attrs):
# ms-dos attributes only use two bytes and don't contain much useful info, so pass them
if attrs < 2 << 16:
return True
# UNIX file attributes live in the top two bytes
mask = attrs >> 16
is_file_or_dir = mask & (0o0100000 | 0o0040000) != 0
no_exec = mask & (0o0000100 | 0o0000010 | 0o0000001) == 0
return is_file_or_dir and no_exec
# 100MB
max_file_size = 100000000
zip_file = zipfile.ZipFile(zip_file_name)
for f in zip_file.namelist():
i = zip_file.getinfo(f)
s = i.file_size
attrs = i.external_attr
assert path_is_sub_path(f, target_dir), 'Cannot extract {} above target directory'.format(f)
assert s <= max_file_size, '{} too large ({} > {})'.format(f, str(s), str(max_file_size))
assert file_attrs_ok(attrs), '{} not a normal file or is executable'.format(f)
assert not file_exists_in_dir(f, target_dir), 'Cannot overwrite file {} in target directory {}'.format(f, target_dir)
for validator in args:
res, err_string = validator(zip_file)
assert res, '{} failed validator: {}'.format(os.path.basename(zip_file_name), err_string)
def _wavefront_pickle_filename(el_id):
if el_id:
return f'wid-{el_id}.pkl'
return 'initial.pkl'
def _zip_path_for_file(zf, file_to_find):
"""Find the full path of the specified file within the zip.
For a zip zf containing:
foo1
foo2
bar/
bar/foo3
_zip_path_for_file(zf, 'foo3') will return 'bar/foo3'
Args:
zf(zipfile.ZipFile): the zip file to examine
file_to_find (str): name of the file to find
Returns:
The first path in the zip that matches the file name, or None if no match is found
"""
# Get the base file names from the zip (directories have a basename of '')
file_names_in_zip = [os.path.basename(x) for x in zf.namelist()]
return zf.namelist()[file_names_in_zip.index(file_to_find)]
|
radiasoft/sirepo
|
sirepo/template/srw.py
|
Python
|
apache-2.0
| 87,600
|
[
"CRYSTAL",
"VTK"
] |
a7e942a2e9e8391116b520e8b5121e9aeb215c180febcf922a1924372fa69853
|
#
# Copyright (C) 2010, 2015, 2016, 2017, 2018, 2019, 2020, 2021
# Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import logging
import os
import sys
import warnings
import numpy
import sherpa.ui.utils
from sherpa.astro.instrument import create_arf, create_delta_rmf, \
create_non_delta_rmf, has_pha_response
from sherpa.ui.utils import _argument_type_error, _check_type
from sherpa.utils import SherpaInt, SherpaFloat, sao_arange, \
send_to_pager
from sherpa.utils.err import ArgumentErr, ArgumentTypeErr, DataErr, \
IdentifierErr, ImportErr, IOErr, ModelErr
from sherpa.data import Data1D, Data1DAsymmetricErrs
import sherpa.astro.all
import sherpa.astro.plot
from sherpa.astro.ui import serialize
from sherpa.sim import NormalParameterSampleFromScaleMatrix
from sherpa.stats import Cash, CStat, WStat
from sherpa.models.basic import TableModel
from sherpa.astro import fake
from sherpa.astro.data import DataPHA
warning = logging.getLogger(__name__).warning
info = logging.getLogger(__name__).info
string_types = (str, )
__all__ = ('Session',)
class Session(sherpa.ui.utils.Session):
###########################################################################
# Standard methods
###########################################################################
def __init__(self):
self.clean()
sherpa.ui.utils.Session.__init__(self)
###########################################################################
# High-level utilities
###########################################################################
def _fix_background_id(self, id, bkg_id):
"""Validate the background id.
The identifier has the same restrictions as the dataset
identifier.
Parameters
----------
id : int or str or None
The dataset identifier. This is only used if bkg_id is
None and must refer to a DataPHA dataset.
bkg_id : int or str or None
The identifier to check. If None then the default backround
identifier will be used, taken from the id dataset.
Returns
-------
bkg_id : int or str
The background identifier to use (it will only differ from
the input parameter was set to None).
Raises
------
sherpa.utils.err.ArgumentTypeErr
If the identifier was not a string or an integer.
sherpa.utils.err.IdentifierErr
If the identifier was invalid.
See Also
--------
_fix_id
Notes
-----
Since there is currently no way to set the default background
id of the DataPHA class (e.g. in unpack_pha) we do not use the
_default_id setting here.
"""
if bkg_id is None:
# The assumption here is that if we are asking about a
# background identifier then there must already be a
# loaded PHA dataset.
data = self._get_pha_data(id)
return data.default_background_id
# return self._default_id
# We rely on the validation made by _fix_id
return self._fix_id(bkg_id)
def __setstate__(self, state):
if '_background_sources' not in state:
self.__dict__['_background_sources'] = state.pop(
'_background_models')
sherpa.ui.utils.Session.__setstate__(self, state)
def clean(self):
"""Clear out the current Sherpa session.
The `clean` function removes all data sets and model
assignments, and restores the default settings for the
optimisation and fit statistic.
See Also
--------
save : Save the current Sherpa session to a file.
restore : Load in a Sherpa session from a file.
save_all : Save the Sherpa session as an ASCII file.
Examples
--------
>>> clean()
"""
self._pileup_models = {}
self._background_models = {}
self._background_sources = {}
self._dataphaplot = sherpa.astro.plot.DataPHAPlot()
self._astrosourceplot = sherpa.astro.plot.SourcePlot()
self._astrocompsrcplot = sherpa.astro.plot.ComponentSourcePlot()
self._astrocompmdlplot = sherpa.astro.plot.ComponentModelPlot()
self._modelhisto = sherpa.astro.plot.ModelHistogram()
self._bkgmodelhisto = sherpa.astro.plot.BkgModelHistogram()
# self._bkgdataplot = sherpa.astro.plot.DataPHAPlot()
self._bkgdataplot = sherpa.astro.plot.BkgDataPlot()
self._bkgmodelplot = sherpa.astro.plot.BkgModelPHAHistogram()
self._bkgfitplot = sherpa.astro.plot.BkgFitPlot()
self._bkgchisqrplot = sherpa.astro.plot.BkgChisqrPlot()
self._bkgdelchiplot = sherpa.astro.plot.BkgDelchiPlot()
self._bkgresidplot = sherpa.astro.plot.BkgResidPlot()
self._bkgratioplot = sherpa.astro.plot.BkgRatioPlot()
self._bkgsourceplot = sherpa.astro.plot.BkgSourcePlot()
self._arfplot = sherpa.astro.plot.ARFPlot()
self._orderplot = sherpa.astro.plot.OrderPlot()
self._energyfluxplot = sherpa.astro.plot.EnergyFluxHistogram()
self._photonfluxplot = sherpa.astro.plot.PhotonFluxHistogram()
# This is a new dictionary of XSPEC module settings. It
# is meant only to be populated by the save function, so
# that the user's XSPEC settings can be saved in the pickle
# file. Then, restore can peel out settings from the
# restored _xspec_state variable, and set abundance,
# cross-section, etc. in the XSPEC module.
#
# TODO: it should probably not be reset by clean since there's
# no way to clear the XSPEC state (we could try and
# unset all the changes but it's not guaranteed we can
# do so).
#
self._xspec_state = None
sherpa.ui.utils.Session.clean(self)
self._pyblocxs = sherpa.astro.sim.MCMC()
self._plot_types['order'] = [self._orderplot]
self._plot_types['energy'] = [self._energyfluxplot]
self._plot_types['photon'] = [self._photonfluxplot]
self._plot_types['compsource'].append(self._astrocompsrcplot)
self._plot_types['compmodel'].append(self._astrocompmdlplot)
self._plot_types['data'].append(self._dataphaplot)
self._plot_types['source'].append(self._astrosourceplot)
self._plot_types['model'].append(self._modelhisto)
self._plot_types['arf'] = [self._arfplot]
self._plot_types['bkg'] = [self._bkgdataplot]
self._plot_types['bkgmodel'] = [self._bkgmodelhisto]
self._plot_types['bkgfit'] = [self._bkgfitplot]
self._plot_types['bkgsource'] = [self._bkgsourceplot]
self._plot_types['bkgratio'] = [self._bkgratioplot]
self._plot_types['bkgresid'] = [self._bkgresidplot]
self._plot_types['bkgdelchi'] = [self._bkgdelchiplot]
self._plot_types['bkgchisqr'] = [self._bkgchisqrplot]
self._plot_type_names['order'] = 'order'
# self._plot_type_names['energy'] = 'energy' - how to do energy/flux plots?
# self._plot_type_names['photon'] = 'photon'
self._plot_type_names['astrocompsource'] = 'source_component'
self._plot_type_names['astrocompmodel'] = 'model_componentl'
self._plot_type_names['astrodata'] = 'data'
self._plot_type_names['astrosource'] = 'source' # is this meaningful anymore
self._plot_type_names['astromodel'] = 'model' # is this meaningful anymore
self._plot_type_names['arf'] = 'arf'
self._plot_type_names['bkg'] = 'bkg'
self._plot_type_names['bkgmodel'] = 'bkg_model'
self._plot_type_names['bkgfit'] = 'bkg_fit'
self._plot_type_names['bkgsource'] = 'bkg_source'
self._plot_type_names['bkgratio'] = 'bkg_ratio'
self._plot_type_names['bkgresid'] = 'bkg_resid'
self._plot_type_names['bkgdelchi'] = 'bkg_delchi'
self._plot_type_names['bkgchisqr'] = 'bkg_chisqr'
# Add ability to save attributes sepcific to the astro package.
# Save XSPEC module settings that need to be restored.
def save(self, filename='sherpa.save', clobber=False):
"""Save the current Sherpa session to a file.
Parameters
----------
filename : str, optional
The name of the file to write the results to. The default
is 'sherpa.save'.
clobber : bool, optional
This flag controls whether an existing file can be
overwritten (``True``) or if it raises an exception (``False``,
the default setting).
Raises
------
sherpa.utils.err.IOErr
If `filename` already exists and `clobber` is ``False``.
See Also
--------
clean : Clear all stored session data.
restore : Load in a Sherpa session from a file.
save_all : Save the Sherpa session as an ASCII file.
Notes
-----
The current Sherpa session is saved using the Python `pickle`
module. The output is a binary file, which may not be portable
between versions of Sherpa, but is platform independent, and
contains all the data. This means that files created by `save`
can be sent to collaborators to share results.
Examples
--------
Save the current session to the file 'sherpa.save'.
>>> save()
Save the current session to the file 'bestfit.sherpa',
overwriting any existing version of the file.
>>> save('bestfit.sherpa', clobber=True)
"""
if hasattr(sherpa.astro, "xspec"):
self._xspec_state = sherpa.astro.xspec.get_xsstate()
else:
self._xspec_state = None
sherpa.ui.utils.Session.save(self, filename, clobber)
def restore(self, filename='sherpa.save'):
"""Load in a Sherpa session from a file.
Parameters
----------
filename : str, optional
The name of the file to read the results from. The default
is 'sherpa.save'.
Raises
------
IOError
If `filename` does not exist.
See Also
--------
clean : Clear all stored session data.
save : Save the current Sherpa session to a file.
Notes
-----
The input to `restore` must have been created with the `save`
command. This is a binary file, which may not be portable
between versions of Sherpa, but is platform independent. A
warning message may be created if a file saved by an older
(or newer) version of Sherpa is loaded. An example of such
a message is::
WARNING: Could not determine whether the model is discrete.
This probably means that you have restored a session saved with a previous version of Sherpa.
Falling back to assuming that the model is continuous.
Examples
--------
Load in the Sherpa session from 'sherpa.save'.
>>> restore()
Load in the session from the given file:
>>> restore('/data/m31/setup.sherpa')
"""
sherpa.ui.utils.Session.restore(self, filename)
if hasattr(sherpa.astro, "xspec"):
if self._xspec_state is not None:
sherpa.astro.xspec.set_xsstate(self._xspec_state)
self._xspec_state = None
def _get_show_data(self, id=None):
data_str = ''
ids = self.list_data_ids()
if id is not None:
ids = [self._fix_id(id)]
for id in ids:
data = self.get_data(id)
data_str += 'Data Set: %s\n' % id
data_str += 'Filter: %s\n' % data.get_filter_expr()
if isinstance(data, sherpa.astro.data.DataPHA):
nbkg = len(data.background_ids)
for bkg_id in data.background_ids:
# Apply grouping/filtering if set
scale = data.get_background_scale(bkg_id)
if scale is None:
continue
data_str += 'Bkg Scale'
if nbkg > 1 or bkg_id != 1:
data_str += ' {}'.format(bkg_id)
data_str += ': '
if numpy.isscalar(scale):
data_str += '{:g}'.format(float(scale))
else:
# would like to use sherpa.utils/print_fields style output
# but not available and I don't feel like it's
# worth it
data_str += '{}[{}]'.format(scale.dtype, scale.size)
data_str += '\n'
data_str += 'Noticed Channels: %s\n' % data.get_noticed_expr()
data_str += data.__str__() + '\n\n'
if isinstance(data, sherpa.astro.data.DataPHA):
for resp_id in data.response_ids:
# ARF or RMF could be None
arf, rmf = data.get_response(resp_id)
if rmf is not None:
data_str += 'RMF Data Set: %s:%s\n' % (id, resp_id)
data_str += rmf.__str__() + '\n\n'
if arf is not None:
data_str += 'ARF Data Set: %s:%s\n' % (id, resp_id)
data_str += arf.__str__() + '\n\n'
data_str += self._get_show_bkg(id)
return data_str
def _get_show_bkg(self, id=None, bkg_id=None):
data_str = ''
ids = self.list_data_ids()
if id is not None:
ids = [self._fix_id(id)]
for id in ids:
data = self.get_data(id)
if not isinstance(data, sherpa.astro.data.DataPHA):
continue
bkg_ids = data.background_ids
if bkg_id is not None:
bkg_ids = [data._fix_background_id(bkg_id)]
for bkg_id in bkg_ids:
bkg = self.get_bkg(id, bkg_id)
data_str += 'Background Data Set: %s:%s\n' % (id, bkg_id)
data_str += 'Filter: %s\n' % bkg.get_filter_expr()
data_str += 'Noticed Channels: %s\n' % bkg.get_noticed_expr()
data_str += bkg.__str__() + '\n\n'
for bk_rp_id in bkg.response_ids:
# ARF or RMF could be None
arf, rmf = bkg.get_response(bk_rp_id)
if rmf is not None:
data_str += ('Background RMF Data Set: %s:%s\n' %
(id, bkg_id))
data_str += rmf.__str__() + '\n\n'
if arf is not None:
data_str += ('Background ARF Data Set: %s:%s\n' %
(id, bkg_id))
data_str += arf.__str__() + '\n\n'
return data_str
def _get_show_bkg_model(self, id=None, bkg_id=None):
model_str = ''
ids = self.list_data_ids()
if id is not None:
ids = [self._fix_id(id)]
for id in ids:
if bkg_id is not None:
bkg_ids = [bkg_id]
else:
bkg_ids = list(self._background_models.get(id, {}).keys())
bkg_ids.extend(self._background_sources.get(id, {}).keys())
bkg_ids = list(set(bkg_ids))
for bkg_id in bkg_ids:
model_str += 'Background Model: %s:%s\n' % (id, bkg_id)
model_str += self.get_bkg_model(id, bkg_id).__str__() + '\n\n'
return model_str
def _get_show_bkg_source(self, id=None, bkg_id=None):
model_str = ''
ids = self.list_data_ids()
if id is not None:
ids = [self._fix_id(id)]
for id in ids:
if bkg_id is not None:
bkg_ids = [bkg_id]
else:
bkg_ids = self._background_sources.get(id, {}).keys()
for bkg_id in bkg_ids:
model_str += 'Background Source: %s:%s\n' % (id, bkg_id)
model_str += self.get_bkg_source(id, bkg_id).__str__() + '\n\n'
return model_str
def show_bkg(self, id=None, bkg_id=None, outfile=None, clobber=False):
"""Show the details of the PHA background data sets.
This displays information about the background, or
backgrounds, for the loaded data sets. This includes: any
filters, the grouping settings, mission-specific header
keywords, and the details of any associated instrument
responses files (ARF, RMF).
Parameters
----------
id : int or str, optional
The data set. If not given then all background data sets
are displayed.
bkg_id : int or str, optional
The background component to display. The default is all
components.
outfile : str, optional
If not given the results are displayed to the screen,
otherwise it is taken to be the name of the file to
write the results to.
clobber : bool, optional
If `outfile` is not ``None``, then this flag controls
whether an existing file can be overwritten (``True``)
or if it raises an exception (``False``, the default
setting).
Raises
------
sherpa.utils.err.IOErr
If `outfile` already exists and `clobber` is ``False``.
See Also
--------
list_model_ids : List of all the data sets with a source expression.
load_bkg : Load the background from a file and add it to a PHA data set.
show_all : Report the current state of the Sherpa session.
"""
txt = self._get_show_bkg(id, bkg_id)
send_to_pager(txt, outfile, clobber)
def show_bkg_source(self, id=None, bkg_id=None, outfile=None, clobber=False):
"""Display the background model expression for a data set.
This displays the background model for a data set, that is,
the expression set by `set_bkg_model` or `set_bkg_source`, as
well as the parameter values for the model. The
`show_bkg_model` function displays the model that is fit to
the data; that is, it includes any instrument responses.
Parameters
----------
id : int or str, optional
The data set. If not given then all background expressions
are displayed.
bkg_id : int or str, optional
The background component to display. The default is all
components.
outfile : str, optional
If not given the results are displayed to the screen,
otherwise it is taken to be the name of the file to
write the results to.
clobber : bool, optional
If `outfile` is not ``None``, then this flag controls
whether an existing file can be overwritten (``True``)
or if it raises an exception (``False``, the default
setting).
Raises
------
sherpa.utils.err.IOErr
If `outfile` already exists and `clobber` is ``False``.
See Also
--------
list_model_ids : List of all the data sets with a source expression.
set_bkg_model : Set the background model expression for a data set.
show_all : Report the current state of the Sherpa session.
show_model : Display the model expression used to fit a data set.
show_bkg_model : Display the background model expression used to fit a data set.
"""
txt = self._get_show_bkg_source(id, bkg_id)
send_to_pager(txt, outfile, clobber)
def show_bkg_model(self, id=None, bkg_id=None, outfile=None, clobber=False):
"""Display the background model expression used to fit a data set.
This displays the model used to the the background data set,
that is, the expression set by `set_bkg_model` or
`set_bkg_source` combined with any instrumental responses,
together with the parameter values for the model. The
`show_bkg_source` function displays just the background model,
without the instrument components (if any).
Parameters
----------
id : int or str, optional
The data set. If not given then all background expressions are
displayed.
bkg_id : int or str, optional
The background component to display. The default is all
components.
outfile : str, optional
If not given the results are displayed to the screen,
otherwise it is taken to be the name of the file to
write the results to.
clobber : bool, optional
If `outfile` is not ``None``, then this flag controls
whether an existing file can be overwritten (``True``)
or if it raises an exception (``False``, the default
setting).
Raises
------
sherpa.utils.err.IOErr
If `outfile` already exists and `clobber` is ``False``.
See Also
--------
list_model_ids : List of all the data sets with a source expression.
set_bkg_model : Set the background model expression for a data set.
show_all : Report the current state of the Sherpa session.
show_model : Display the model expression used to fit a data set.
show_bkg_source : Display the background model expression for a data set.
"""
txt = self._get_show_bkg_model(id, bkg_id)
send_to_pager(txt, outfile, clobber)
###########################################################################
# Data
###########################################################################
# DOC-NOTE: also in sherpa.utils
def dataspace1d(self, start, stop, step=1, numbins=None,
id=None, bkg_id=None, dstype=sherpa.data.Data1DInt):
"""Create the independent axis for a 1D data set.
Create an "empty" one-dimensional data set by defining the
grid on which the points are defined (the independent axis).
The values are set to 0.
Parameters
----------
start : number
The minimum value of the axis.
stop : number
The maximum value of the axis.
step : number, optional
The separation between each grid point. This is not used if
``numbins`` is set.
numbins : int, optional
The number of grid points. This over-rides the ``step``
setting.
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
bkg_id : int or str, optional
If set, the grid is for the background component of the
data set.
dstype : data class to use, optional
What type of data is to be used. Supported values include
`Data1DInt` (the default), `Data1D`, and `DataPHA`.
See Also
--------
dataspace2d : Create the independent axis for a 2D data set.
get_dep : Return the dependent axis of a data set.
get_indep : Return the independent axes of a data set.
set_dep : Set the dependent axis of a data set.
Notes
-----
The meaning of the ``stop`` parameter depends on whether it is a
binned or unbinned data set (as set by the ``dstype``
parameter).
Examples
--------
Create a binned data set, starting at 1 and with a
bin-width of 1.
>>> dataspace1d(1, 5, 1)
>>> print(get_indep())
(array([ 1., 2., 3., 4.]), array([ 2., 3., 4., 5.]))
This time for an un-binned data set:
>>> dataspace1d(1, 5, 1, dstype=Data1D)
>>> print(get_indep())
(array([ 1., 2., 3., 4., 5.]),)
Specify the number of bins rather than the grid spacing:
>>> dataspace1d(1, 5, numbins=5, id=2)
>>> (xlo, xhi) = get_indep(2)
>>> xlo
array([ 1. , 1.8, 2.6, 3.4, 4.2])
>>> xhi
array([ 1.8, 2.6, 3.4, 4.2, 5. ])
>>> dataspace1d(1, 5, numbins=5, id=3, dstype=Data1D)
>>> (x, ) = get_indep(3)
>>> x
array([ 1., 2., 3., 4., 5.])
Create a grid for a PHA data set called 'jet', and
for its background component:
>>> dataspace1d(0.01, 11, 0.01, id='jet', dstype=DataPHA)
>>> dataspace1d(0.01, 11, 0.01, id='jet', bkg_id=1,
... dstype=DataPHA)
"""
# support non-integrated grids with inclusive boundaries
# We do NOT want to use an isinstance check since Data1DInt is
# derived from Data1D.
if dstype in (sherpa.data.Data1D, sherpa.astro.data.DataPHA):
stop += step
xlo, xhi, y = sherpa.utils.dataspace1d(start, stop, step=step,
numbins=numbins)
args = [xlo, xhi, y]
kwargs = {}
if dstype is sherpa.astro.data.DataPHA:
channel = numpy.arange(1, len(xlo) + 1, dtype=float)
args = [channel, y]
# kwargs['bin_lo'] = xlo
# kwargs['bin_hi'] = xhi
elif dstype is not sherpa.data.Data1DInt:
args = [xlo, y]
if bkg_id is not None:
self._get_pha_data(id).set_background(dstype('bkg_dataspace1d',
*args, **kwargs),
bkg_id)
else:
self.set_data(id, dstype('dataspace1d', *args, **kwargs))
# DOC-NOTE: also in sherpa.utils
def dataspace2d(self, dims, id=None, dstype=sherpa.astro.data.DataIMG):
"""Create the independent axis for a 2D data set.
Create an "empty" two-dimensional data set by defining the
grid on which the points are defined (the independent axis).
The values are set to 0.
Parameters
----------
dims : sequence of 2 number
The dimensions of the grid in ``(width,height)`` order.
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
dstype : data class to use, optional
What type of data is to be used. Supported values include
`DataIMG` (the default), `Data2D`, and `Data2DInt`.
See Also
--------
dataspace1d : Create the independent axis for a 1D data set.
get_dep : Return the dependent axis of a data set.
get_indep : Return the independent axes of a data set.
set_dep : Set the dependent axis of a data set.
Examples
--------
Create a 200 pixel by 150 pixel grid (number of columns by
number of rows) and display it (each pixel has a value of 0):
>>> dataspace2d([200, 150])
>>> image_data()
Create a data space called "fakeimg":
>>> dataspace2d([nx, ny], id="fakeimg")
"""
x0, x1, y, shape = sherpa.utils.dataspace2d(dims)
dataset = None
if issubclass(dstype, (sherpa.astro.data.DataIMGInt,
sherpa.data.Data2DInt)):
dataset = dstype('dataspace2d', x0 - 0.5, x1 - 0.5, x0 + 0.5, x1 + 0.5,
y, shape)
else:
dataset = dstype('dataspace2d', x0, x1, y, shape)
self.set_data(id, dataset)
# DOC-NOTE: also in sherpa.utils
# DOC-TODO: how to describe Crates and/or AstroPy?
def unpack_arrays(self, *args):
"""Create a sherpa data object from arrays of data.
The object returned by `unpack_arrays` can be used in a
`set_data` call.
Parameters
----------
args : array_like
Arrays of data. The order, and number, is determined by
the `dstype` parameter, and listed in the `load_arrays`
routine.
dstype
The data set type. The default is `Data1D` and values
include: `Data1D`, `Data1DInt`, `Data2D`, `Data2DInt`,
`DataPHA`, and `DataIMG`. The class is expected to
be derived from `sherpa.data.BaseData`.
Returns
-------
instance
The data set object matching the requested `dstype`
parameter.
See Also
--------
get_data : Return the data set by identifier.
load_arrays : Create a data set from array values.
set_data : Set a data set.
unpack_data : Create a sherpa data object from a file.
Examples
--------
Create a 1D (unbinned) data set from the values in
the x and y arrays. Use the returned object to create
a data set labelled "oned":
>>> x = [1, 3, 7, 12]
>>> y = [2.3, 3.2, -5.4, 12.1]
>>> dat = unpack_arrays(x, y)
>>> set_data("oned", dat)
Include statistical errors on the data:
>>> edat = unpack_arrays(x, y, dy)
Create a "binned" 1D data set, giving the low,
and high edges of the independent axis (xlo
and xhi respectively) and the dependent values
for this grid (y):
>>> hdat = unpack_arrays(xlo, xhi, y, Data1DInt)
Create a 3 column by 4 row image:
>>> ivals = np.arange(12)
>>> y, x = np.mgrid[0:3, 0:4]
>>> x = x.flatten()
>>> y = y.flatten()
>>> idat = unpack_arrays(x, y, ivals, (3, 4), DataIMG)
"""
dataset = None
try:
dataset = sherpa.astro.io.read_arrays(*args)
except AttributeError:
# if the astro backend is not set, fall back on io module version.
dataset = sherpa.io.read_arrays(*args)
return dataset
# DOC-NOTE: also in sherpa.utils
# DOC-TODO: rework the Data type notes section (also needed for
# unpack_arrays)
def load_arrays(self, id, *args):
"""Create a data set from array values.
Parameters
----------
id : int or str
The identifier for the data set to use.
*args
Two or more arrays, followed by the type of data set to
create.
Warnings
--------
Sherpa currently does not support numpy masked arrays. Use the
set_filter function and note that it follows a different convention by
default (a positive value or True for a "bad" channel, 0 or False for
a good channel).
See Also
--------
copy_data : Copy a data set to a new identifier.
delete_data : Delete a data set by identifier.
get_data : Return the data set by identifier.
load_data : Create a data set from a file.
set_data : Set a data set.
unpack_arrays : Create a sherpa data object from arrays of data.
Notes
-----
The data type identifier, which defaults to `Data1D`,
determines the number, and order, of the required inputs.
+------------+-----------------+--------------------+
| Identifier | Required Fields | Optional Fields |
+============+=================+====================+
| Data1D | x, y | statistical error, |
| | | systematic error |
+------------+-----------------+--------------------+
| Data1DInt | xlo, xhi, y | statistical error, |
| | | systematic error |
+------------+-----------------+--------------------+
| Data2D | x0, x1, y | shape, |
| | | statistical error, |
| | | systematic error |
+------------+-----------------+--------------------+
| Data2DInt | x0lo, x1lo, | shape, |
| | x0hi, x1hi, y | statistical error, |
| | | systematic error |
+------------+-----------------+--------------------+
| DataPHA | channel, counts | statistical error, |
| | | systematic error, |
| | | bin_lo, bin_hi, |
| | | grouping, quality |
+------------+-----------------+--------------------+
| DataIMG | x0, x1, y | shape, |
| | | statistical error, |
| | | systematic error |
+------------+-----------------+--------------------+
The ``shape`` argument should be a tuple giving the size of
the data ``(ny,nx)``, and for the ``DataIMG`` case the arrays
are 1D, not 2D.
Examples
--------
Create a 1D data set with three points:
>>> load_arrays(1, [10, 12, 15], [4.2, 12.1, 8.4])
Create a 1D data set, with the identifier 'prof', from the
arrays ``x`` (independent axis), ``y`` (dependent axis), and
``dy`` (statistical error on the dependent axis):
>>> load_arrays('prof', x, y, dy)
Explicitly define the type of the data set:
>>> load_arrays('prof', x, y, dy, Data1D)
Data set 1 is a histogram, where the bins cover the range
1-3, 3-5, and 5-7 with values 4, 5, and 9 respectively.
>>> load_arrays(1, [1, 3, 5], [3, 5, 7], [4, 5, 9], Data1DInt)
Create an image data set:
>>> ivals = np.arange(12)
>>> y, x = np.mgrid[0:3, 0:4]
>>> x = x.flatten()
>>> y = y.flatten()
>>> load_arrays('img', x, y, ivals, (3, 4), DataIMG)
"""
self.set_data(id, self.unpack_arrays(*args))
# DOC-TODO: should unpack_ascii be merged into this?
def unpack_table(self, filename, ncols=2, colkeys=None, dstype=Data1D):
"""Unpack a FITS binary file into a data structure.
Parameters
----------
filename
Identify the file to read: a file name, or a data structure
representing the data to use, as used by the I/O backend in
use by Sherpa: a ``TABLECrate`` for crates, as used by CIAO,
or a list of AstroPy HDU objects.
ncols : int, optional
The number of columns to read in (the first `ncols` columns
in the file). The meaning of the columns is determined by
the `dstype` parameter.
colkeys : array of str, optional
An array of the column name to read in. The default is
``None``.
dstype : optional
The data class to use. The default is `Data1D` and it
is expected to be derived from `sherpa.data.BaseData`.
Returns
-------
instance
The class of the returned object is controlled by the
`dstype` parameter.
See Also
--------
load_table : Load a FITS binary file as a data set.
set_data : Set a data set.
unpack_ascii : Unpack an ASCII file into a data structure.
Examples
--------
Read in the first two columns of the file, as the independent
(X) and dependent (Y) columns of a data set:
>>> d = unpack_table('sources.fits')
Read in the first three columns (the third column is taken to
be the error on the dependent variable):
>>> d = unpack_table('sources.fits', ncols=3)
Read in from columns 'RMID' and 'SUR_BRI':
>>> d = unpack_table('rprof.fits', colkeys=['RMID', 'SUR_BRI'])
The first three columns are taken to be the two independent
axes of a two-dimensional data set (``x0`` and ``x1``) and
the dependent value (``y``):
>>> d = unpack_table('fields.fits', ncols=3,
... dstype=sherpa.astro.data.Data2D)
When using the Crates I/O library, the file name can include
CIAO Data Model syntax, such as column selection. This can
also be done using the `colkeys` parameter, as shown above:
>>> d = unpack_table('rprof.fits[cols rmid,sur_bri,sur_bri_err]',
... ncols=3)
"""
return sherpa.astro.io.read_table(filename, ncols, colkeys, dstype)
# DOC-TODO: the field listing really should be somewhere else
# as it's needed in multiple places (ideally in the
# DataX class documentation, but users may not find it)
# DOC-TODO: what do the shape arguments for Data2D/Data2DInt mean?
def load_table(self, id, filename=None, ncols=2, colkeys=None,
dstype=Data1D):
"""Load a FITS binary file as a data set.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filename
Identify the file to read: a file name, or a data structure
representing the data to use, as used by the I/O backend in
use by Sherpa: a ``TABLECrate`` for crates, as used by CIAO,
or a list of AstroPy HDU objects.
ncols : int, optional
The number of columns to read in (the first ``ncols`` columns
in the file). The meaning of the columns is determined by
the ``dstype`` parameter.
colkeys : array of str, optional
An array of the column name to read in. The default is
``None``.
dstype : optional
The data class to use. The default is `Data1D`.
See Also
--------
load_arrays : Create a data set from array values.
load_ascii : Load an ASCII file as a data set.
load_image : Load an image as a data set.
set_data : Set a data set.
unpack_table : Unpack a FITS binary table into a data structure.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `filename` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `filename` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
The column order for the different data types are as follows,
where ``x`` indicates an independent axis and ``y`` the dependent
axis:
+------------+-----------------+--------------------+
| Identifier | Required Fields | Optional Fields |
+============+=================+====================+
| Data1D | x, y | statistical error, |
| | | systematic error |
+------------+-----------------+--------------------+
| Data1DInt | xlo, xhi, y | statistical error, |
| | | systematic error |
+------------+-----------------+--------------------+
| Data2D | x0, x1, y | shape, |
| | | statistical error, |
| | | systematic error |
+------------+-----------------+--------------------+
| Data2DInt | x0lo, x1lo, | shape, |
| | x0hi, x1hi, y | statistical error, |
| | | systematic error |
+------------+-----------------+--------------------+
Examples
--------
Read in the first two columns of the file, as the independent
(X) and dependent (Y) columns of the default data set:
>>> load_table('sources.fits')
Read in the first three columns (the third column is taken to
be the error on the dependent variable):
>>> load_table('sources.fits', ncols=3)
Read in from columns 'RMID' and 'SUR_BRI' into data set
'prof':
>>> load_table('prof', 'rprof.fits',
... colkeys=['RMID', 'SUR_BRI'])
The first three columns are taken to be the two independent
axes of a two-dimensional data set (``x0`` and ``x1``) and
the dependent value (``y``):
>>> load_table('fields.fits', ncols=3,
... dstype=sherpa.astro.data.Data2D)
When using the Crates I/O library, the file name can include
CIAO Data Model syntax, such as column selection. This can
also be done using the ``colkeys`` parameter, as shown above:
>>> load_table('prof',
... 'rprof.fits[cols rmid,sur_bri,sur_bri_err]',
... ncols=3)
Read in a data set using Crates:
>>> cr = pycrates.read_file('table.fits')
>>> load_table(cr)
Read in a data set using AstroPy:
>>> hdus = astropy.io.fits.open('table.fits')
>>> load_table(hdus)
"""
if filename is None:
id, filename = filename, id
self.set_data(id, self.unpack_table(filename, ncols, colkeys, dstype))
# DOC-TODO: should unpack_ascii be merged into unpack_table?
# DOC-TODO: I am going to ignore the crates support here as
# it is somewhat meaningless, since the crate could
# have been read from a FITS binary table.
def unpack_ascii(self, filename, ncols=2, colkeys=None,
dstype=Data1D, sep=' ', comment='#'):
"""Unpack an ASCII file into a data structure.
Parameters
----------
filename : str
The name of the file to read in. Selection of the relevant
column depends on the I/O library in use (Crates or
AstroPy).
ncols : int, optional
The number of columns to read in (the first `ncols` columns
in the file). The meaning of the columns is determined by
the `dstype` parameter.
colkeys : array of str, optional
An array of the column name to read in. The default is
``None``.
sep : str, optional
The separator character. The default is ``' '``.
comment : str, optional
The comment character. The default is ``'#'``.
dstype : optional
The data class to use. The default is `Data1D` and it
is expected to be derived from `sherpa.data.BaseData`.
Returns
-------
instance
The type of the returned object is controlled by the
`dstype` parameter.
See Also
--------
load_ascii : Load an ASCII file as a data set.
set_data : Set a data set.
unpack_table : Unpack a FITS binary file into a data structure.
Examples
--------
Read in the first two columns of the file, as the independent
(X) and dependent (Y) columns of a data set:
>>> d = unpack_ascii('sources.dat')
Read in the first three columns (the third column is taken to
be the error on the dependent variable):
>>> d = unpack_ascii('sources.dat', ncols=3)
Read in from columns 'col2' and 'col3':
>>> d = unpack_ascii('tbl.dat', colkeys=['col2', 'col3'])
The first three columns are taken to be the two independent
axes of a two-dimensional data set (``x0`` and ``x1``) and
the dependent value (``y``):
>>> d = unpack_ascii('fields.dat', ncols=3,
... dstype=sherpa.astro.data.Data2D)
When using the Crates I/O library, the file name can include
CIAO Data Model syntax, such as column selection. This can
also be done using the `colkeys` parameter, as shown above:
>>> d = unpack_ascii('tbl.dat[cols rmid,sur_bri,sur_bri_err]',
... ncols=3)
"""
return sherpa.astro.io.read_ascii(filename, ncols, colkeys, dstype,
sep=sep, comment=comment)
# DOC-TODO: I am going to ignore the crates support here as
# it is somewhat meaningless, since the crate could
# have been read from a FITS binary table.
# DOC-TODO: how best to include datastack support?
# DOC-TODO: what does shape mean here (how is it encoded)?
def load_ascii(self, id, filename=None, ncols=2, colkeys=None,
dstype=Data1D, sep=' ', comment='#'):
"""Load an ASCII file as a data set.
The standard behavior is to create a single data set, but
multiple data sets can be loaded with this command, as
described in the `sherpa.astro.datastack` module.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filename : str
The name of the file to read in. Selection of the relevant
column depends on the I/O library in use (Crates or
AstroPy).
ncols : int, optional
The number of columns to read in (the first ``ncols`` columns
in the file). The meaning of the columns is determined by
the ``dstype`` parameter.
colkeys : array of str, optional
An array of the column name to read in. The default is
``None``.
sep : str, optional
The separator character. The default is ``' '``.
comment : str, optional
The comment character. The default is ``'#'``.
dstype : optional
The data class to use. The default is `Data1D`.
See Also
--------
load_ascii_with_errors : Load an ASCII file with asymmetric errors as a data set.
load_table : Load a FITS binary file as a data set.
load_image : Load an image as a data set.
set_data : Set a data set.
unpack_ascii : Unpack an ASCII file into a data structure.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `filename` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `filename` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
The column order for the different data types are as follows,
where ``x`` indicates an independent axis and ``y`` the
dependent axis.
+------------+-----------------+--------------------+
| Identifier | Required Fields | Optional Fields |
+============+=================+====================+
| Data1D | x, y | statistical error, |
| | | systematic error |
+------------+-----------------+--------------------+
| Data1DInt | xlo, xhi, y | statistical error, |
| | | systematic error |
+------------+-----------------+--------------------+
| Data2D | x0, x1, y | shape, |
| | | statistical error, |
| | | systematic error |
+------------+-----------------+--------------------+
| Data2DInt | x0lo, x1lo, | shape, |
| | x0hi, x1hi, y | statistical error, |
| | | systematic error |
+------------+-----------------+--------------------+
Examples
--------
Read in the first two columns of the file, as the independent
(X) and dependent (Y) columns of the default data set:
>>> load_ascii('sources.dat')
Read in the first three columns (the third column is taken to
be the error on the dependent variable):
>>> load_ascii('sources.dat', ncols=3)
Read in from columns 'RMID' and 'SUR_BRI' into data set
'prof':
>>> load_ascii('prof', 'rprof.dat',
... colkeys=['RMID', 'SUR_BRI'])
The first three columns are taken to be the two independent
axes of a two-dimensional data set (``x0`` and ``x1``) and
the dependent value (``y``):
>>> load_ascii('fields.txt', ncols=3,
... dstype=sherpa.astro.data.Data2D)
When using the Crates I/O library, the file name can include
CIAO Data Model syntax, such as column selection. This can
also be done using the ``colkeys`` parameter, as shown above:
>>> load_ascii('prof',
... 'rprof.dat[cols rmid,sur_bri,sur_bri_err]',
... ncols=3)
"""
if filename is None:
id, filename = filename, id
self.set_data(id, self.unpack_ascii(filename, ncols=ncols,
colkeys=colkeys, dstype=dstype,
sep=sep, comment=comment))
# DOC-NOTE: also in sherpa.utils
def unpack_data(self, filename, *args, **kwargs):
"""Create a sherpa data object from a file.
The object returned by `unpack_data` can be used in a
`set_data` call. The data types supported are those
supported by `unpack_pha`, `unpack_image`, `unpack_table`,
and `unpack_ascii`.
Parameters
----------
filename
A file name or a data structure representing the data to
use, as used by the I/O backend in use by Sherpa: e.g. a
``PHACrateDataset``, ``TABLECrate``, or ``IMAGECrate`` for
crates, as used by CIAO, or a list of AstroPy HDU objects.
args
The arguments supported by `unpack_pha`, `unpack_image`,
`unpack_table`, and `unpack_ascii`.
kwargs
The keyword arguments supported by `unpack_pha`, `unpack_image`,
`unpack_table`, and `unpack_ascii`.
Returns
-------
instance
The data set object.
See Also
--------
get_data : Return the data set by identifier.
load_arrays : Create a data set from array values.
set_data : Set a data set.
unpack_arrays : Create a sherpa data object from arrays of data.
unpack_ascii : Unpack an ASCII file into a data structure.
unpack_image : Create an image data structure.
unpack_pha : Create a PHA data structure.
unpack_table : Unpack a FITS binary file into a data structure.
Examples
--------
Create a data object from the contents of the file "src.dat"
and use it to create a Sherpa data set called "src":
>>> dat = unpack_data('src.dat')
>>> set_data('src', dat)
"""
try:
data = self.unpack_pha(filename, *args, **kwargs)
except:
try:
data = self.unpack_image(filename, *args, **kwargs)
except:
try:
data = self.unpack_table(filename, *args, **kwargs)
except:
# If this errors out then so be it
data = self.unpack_ascii(filename, *args, **kwargs)
return data
def load_ascii_with_errors(self, id, filename=None, colkeys=None, sep=' ',
comment='#', func=numpy.average, delta=False):
"""Load an ASCII file with asymmetric errors as a data set.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filename : str
The name of the file to read in. Selection of the relevant
column depends on the I/O library in use (Crates or
AstroPy).
sep : str, optional
The separator character. The default is ``' '``.
comment : str, optional
The comment character. The default is ``'#'``.
func: python function, optional
The function used to combine the lo and hi values to estimate
an error. The function should take two arguments ``(lo, hi)``
and return a single NumPy array, giving the per-bin error.
The default function used is numpy.average.
delta: boolean, optional
The flag is used to indicate if the asymmetric errors for the
third and fourth columns are delta values from the second (y)
column or not.
The default value is False
See Also
--------
load_ascii: Load an ASCII file as a data set.
load_arrays : Create a data set from array values.
load_table : Load a FITS binary file as a data set.
load_image : Load an image as a data set.
resample_data : Resample data with asymmetric error bars.
set_data : Set a data set.
unpack_ascii : Unpack an ASCII file into a data structure.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `filename` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `filename` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
The column order for the different data types are as follows,
where ``x`` indicates an independent axis, ``y`` the dependent
axis, the asymmetric errors ``elo`` and ``ehi``.
+----------------------+-----------------+--------------------+
| Identifier | Required Fields | Optional Fields |
+======================+=================+====================+
| Data1DAsymmetricErrs | x, y, elo, ehi | |
+----------------------+-----------------+--------------------+
Examples
--------
Read in the first four columns of the file, as the independent
(X), dependent (Y), error low (ELO) and error high (EHI) columns
of the default data set:
>>> load_ascii_with_errors('sources.dat')
Read in the first four columns (x, y, elo, ehi) where elo and ehi
are of the form y - delta_lo and y + delta_hi, respectively.
>>> load_ascii_with_errors('sources.dat', delta=True)
Read in the first four columns (x, y, elo, ehi) where elo and ehi
are of the form delta_lo and delta_hi, respectively.
>>> def rms(lo, hi):
... return numpy.sqrt(lo * lo + hi * hi)
...
>>> load_ascii_with_errors('sources.dat', func=rms)
Read in the first four columns (x, y, elo, ehi) where elo and ehi
are of the form delta_lo and delta_hi, respectively. The `func`
argument is used to calculate the error based on the elo and ehi
column values.
"""
if filename is None:
id, filename = filename, id
self.set_data(id, self.unpack_ascii(filename, ncols=4,
colkeys=colkeys,
dstype=Data1DAsymmetricErrs,
sep=sep, comment=comment))
data = self.get_data(id)
if not delta:
data.elo = data.y - data.elo
data.ehi = data.ehi - data.y
if func is numpy.average:
staterror = func([data.elo, data.ehi], axis=0)
else:
staterror = func(data.elo, data.ehi)
data.staterror = staterror
def _load_data(self, id, datasets):
"""Load one or more datasets.
Used by load_data and load_pha.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. For multi-dataset
files, currently only PHA2, the id value indicates the
first dataset: if it is an integer then the numbering
starts at id, and if a string then a suffix of 1 to n is
added. If not given then the default identifier is used,
as returned by `get_default_id`.
datasets : Data instance or iterable of Data instances
The data to load, either as a single item or, for
multiple-dataset files, an iterable of them.
"""
if not numpy.iterable(datasets):
self.set_data(id, datasets)
return
# One issue with the following is that if there's
# only one dataset in phasets and id is a string then the
# output will be "foo1" rather than "foo" (when
# id="foo"). DJB thinks we can live with this.
#
if id is None:
id = self.get_default_id()
num = len(datasets)
ids = []
for ctr, data in enumerate(datasets):
try:
idval = id + ctr
except TypeError:
# id is assumed to be a string
idval = id + str(ctr + 1)
self.set_data(idval, data)
ids.append(idval)
if num > 1:
info("Multiple data sets have been input: " +
"{}-{}".format(ids[0], ids[-1]))
else:
info("One data set has been input: {}".format(ids[0]))
# DOC-NOTE: also in sherpa.utils without the support for
# multiple datasets.
#
def load_data(self, id, filename=None, *args, **kwargs):
# pylint: disable=W1113
"""Load a data set from a file.
This loads a data set from the file, trying in order
`load_pha`, `load_image`, `load_table`, then `load_ascii`.
.. versionchanged:: 4.13.1
The id argument is now used to define the first identifier
when loading in a PHA2 file to match `load_pha` (previously
the range always started at 1).
Parameters
----------
id : int or str, optional
The identifier for the data set to use. For multi-dataset
files, currently only PHA2, the id value indicates the
first dataset: if it is an integer then the numbering
starts at id, and if a string then a suffix of 1 to n is
added. If not given then the default identifier is used,
as returned by `get_default_id`.
filename
A file name or a data structure representing the data to
use, as used by the I/O backend in use by Sherpa: e.g. a
``PHACrateDataset``, ``TABLECrate``, or ``IMAGECrate`` for
crates, as used by CIAO, or a list of AstroPy HDU objects.
args
The arguments supported by `load_pha`, `load_image`,
`load_table`, and `load_ascii`.
kwargs
The keyword arguments supported by `load_pha`, `load_image`,
`load_table`, and `load_ascii`.
See Also
--------
load_arrays : Create a data set from array values.
load_ascii : Load an ASCII file as a data set.
load_image : Load an image as a data set.
load_pha : Load a PHA data set.
load_table : Load a FITS binary file as a data set.
set_data : Set a data set.
unpack_data : Create a sherpa data object from a file.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `filename` parameter. If given two un-named arguments,
then they are interpreted as the `id` and `filename`
parameters, respectively. The remaining parameters are
expected to be given as named arguments.
Examples
--------
>>> load_data('tbl.dat')
>>> load_data('hist.dat', dstype=Data1DInt)
>>> load_data('img', 'img.fits')
>>> load_data('bg', 'img_bg.fits')
>>> cols = ['rmid', 'sur_bri', 'sur_bri_err']
>>> load_data(2, 'profile.fits', colkeys=cols)
"""
if filename is None:
id, filename = filename, id
datasets = self.unpack_data(filename, *args, **kwargs)
self._load_data(id, datasets)
def unpack_image(self, arg, coord='logical',
dstype=sherpa.astro.data.DataIMG):
"""Create an image data structure.
Parameters
----------
arg
Identify the data: a file name, or a data structure
representing the data to use, as used by the I/O backend in
use by Sherpa: an ``IMAGECrate`` for crates, as used by CIAO,
or a list of AstroPy HDU objects.
coord : { 'logical', 'image', 'physical', 'world', 'wcs' }, optional
Ensure that the image contains the given coordinate system.
dstype : optional
The image class to use. The default is `DataIMG`.
Returns
-------
img
The class of the returned object is controlled by the
``dstype`` parameter.
Raises
------
sherpa.utils.err.DataErr
If the image does not contain the requested coordinate
system.
See Also
--------
load_image : Load an image as a data set.
set_data : Set a data set.
Examples
--------
>>> img1 = unpack_img("img.fits")
>>> set_data(img1)
>>> img = unpack_img('img.fits', 'physical')
Read in an image using Crates:
>>> cr = pycrates.read_file('broad.img')
>>> idata = unpack_img(cr)
Read in an image using AstroPy:
>>> hdus = astropy.io.fits.open('broad.img')
>>> idata = unpack_img(hdus)
"""
return sherpa.astro.io.read_image(arg, coord, dstype)
def load_image(self, id, arg=None, coord='logical',
dstype=sherpa.astro.data.DataIMG):
"""Load an image as a data set.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
arg
Identify the image data: a file name, or a data structure
representing the data to use, as used by the I/O backend in
use by Sherpa: an ``IMAGECrate`` for crates, as used by CIAO,
or a list of AstroPy HDU objects.
coord : { 'logical', 'image', 'physical', 'world', 'wcs' }
The coordinate system to use. The 'image' option is the
same as 'logical', and 'wcs' the same as 'world'.
dstype : optional
The data class to use. The default is `DataIMG`.
See Also
--------
load_arrays : Create a data set from array values.
load_ascii : Load an ASCII file as a data set.
load_table : Load a FITS binary file as a data set.
set_coord : Set the coordinate system to use for image analysis.
set_data : Set a data set.
unpack_image : Create an image data structure.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `arg` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `arg` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
Examples
--------
Load the image from the file "img.fits" into the default data
set:
>>> load_image('img.fits')
Set the 'bg' data set to the contents of the file
"img_bg.fits":
>>> load_image('bg', 'img_bg.fits')
"""
if arg is None:
id, arg = arg, id
self.set_data(id, self.unpack_image(arg, coord, dstype))
# DOC-TODO: what does this return when given a PHA2 file?
def unpack_pha(self, arg, use_errors=False):
"""Create a PHA data structure.
Any instrument or background data sets referenced in the
header of the PHA file - e.g. with the ANCRFILE, RESPFILE,
and BACKFILE keywords - will also be loaded.
Parameters
----------
arg
Identify the PHA file: a file name, or a data structure
representing the data to use, as used by the I/O backend in
use by Sherpa: a ``TABLECrate`` for crates, as used by CIAO,
or a list of AstroPy HDU objects.
use_errors : bool, optional
If ``True`` then the statistical errors are taken from the
input data, rather than calculated by Sherpa from the
count values. The default is ``False``.
Returns
-------
pha : a `sherpa.astro.data.DataPHA` instance
See Also
--------
load_pha : Load a file as a PHA data set.
pack_pha : Convert a PHA data set into a file structure.
set_data : Set a data set.
Examples
--------
>>> pha1 = unpack_arf("src1.pi")
>>> pha2 = unpack_arf("field.pi")
>>> set_data(1, pha1)
>>> set_bkg(1, pha2)
Read in a PHA file using Crates:
>>> cr = pycrates.read_file("src.fits")
>>> pha = unpack_pha(cr)
Read in a PHA file using AstroPy:
>>> hdus = astropy.io.fits.open("src.fits")
>>> pha = unpack_pha(hdus)
"""
use_errors = sherpa.utils.bool_cast(use_errors)
return sherpa.astro.io.read_pha(arg, use_errors)
# DOC-TODO: what does this return when given a PHA2 file?
def unpack_bkg(self, arg, use_errors=False):
"""Create a PHA data structure for a background data set.
Any instrument information referenced in the header of the PHA
file - e.g. with the ANCRFILE and RESPFILE, keywords - will
also be loaded. Unlike `unpack_pha`, background files will not
be loaded.
Parameters
----------
arg
Identify the PHA file: a file name, or a data structure
representing the data to use, as used by the I/O backend in
use by Sherpa: a ``TABLECrate`` for crates, as used by CIAO,
or a list of AstroPy HDU objects.
use_errors : bool, optional
If ``True`` then the statistical errors are taken from the
input data, rather than calculated by Sherpa from the
count values. The default is ``False``.
Returns
-------
pha : a `sherpa.astro.data.DataPHA` instance
See Also
--------
load_bkg : Load the background from a file and add it to a PHA data set.
set_bkg : Set the background for a data set.
Examples
--------
>>> pha1 = unpack_arf("src1.pi")
>>> pha2 = unpack_bkg("field.pi")
>>> set_data(1, pha1)
>>> set_bkg(1, pha2)
Read in a PHA file using Crates:
>>> cr = pycrates.read_file("bg.fits")
>>> pha = unpack_pha(cr)
Read in a PHA file using AstroPy:
>>> hdus = astropy.io.fits.open("bg.fits")
>>> pha = unpack_pha(hdus)
"""
use_errors = sherpa.utils.bool_cast(use_errors)
return sherpa.astro.io.read_pha(arg, use_errors, True)
# DOC-TODO: how best to include datastack support?
def load_pha(self, id, arg=None, use_errors=False):
"""Load a PHA data set.
This will load the PHA data and any related information, such
as ARF, RMF, and background. The background is loaded but
*not* subtracted. Any grouping information in the file will be
applied to the data. The quality information is read in, but
*not* automatically applied. See `subtract` and `ignore_bad`.
The standard behavior is to create a single data set, but
multiple data sets can be loaded with this command, as
described in the `sherpa.astro.datastack` module.
.. versionchanged:: 4.12.2
The id argument is now used to define the first identifier
when loading in a PHA2 file (previously they always used
the range 1 to number of files).
Parameters
----------
id : int or str, optional
The identifier for the data set to use. For PHA2 files,
that is those that contain multiple datasets, the id value
indicates the first dataset: if it is an integer then the
numbering starts at id, and if a string then a suffix of 1
to n is added. If not given then the default identifier is
used, as returned by `get_default_id`.
arg
Identify the data to read: a file name, or a data structure
representing the data to use, as used by the I/O backend in
use by Sherpa: a ``PHACrateDataset`` for crates, as used by
CIAO, or a list of AstroPy HDU objects.
use_errors : bool, optional
If ``True`` then the statistical errors are taken from the
input data, rather than calculated by Sherpa from the
count values. The default is ``False``.
See Also
--------
ignore_bad : Exclude channels marked as bad in a PHA data set.
load_arf : Load an ARF from a file and add it to a PHA data set.
load_bkg : Load the background from a file and add it to a PHA data set.
load_rmf : Load a RMF from a file and add it to a PHA data set.
pack_pha : Convert a PHA data set into a file structure.
save_pha : Save a PHA data set to a file.
subtract : Subtract the background estimate from a data set.
unpack_pha : Create a PHA data structure.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `arg` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `arg` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
The `minimum_energy` setting of the `ogip` section of the
Sherpa configuration file determines the behavior when an
ARF with a minimum energy of 0 is read in. The default is
to replace the 0 by the value 1e-10, which will also cause
a warning message to be displayed.
Examples
--------
Load the PHA file 'src.pi' into the default data set, and
automatically load the ARF, RMF, and background from the files
pointed to by the ANCRFILE, RESPFILE, and BACKFILE keywords in
the file. The background is then subtracted and any 'bad
quality' bins are removed:
>>> load_pha('src.pi')
read ARF file src.arf
read RMF file src.rmf
read background file src_bkg.pi
>>> subtract()
>>> ignore_bad()
Load two files into data sets 'src' and 'bg':
>>> load_pha('src', 'x1.fits')
>>> load_pha('bg', 'x2.fits')
If a type II PHA data set is loaded, then multiple data sets
will be created, one for each order. The default behavior is
to use the dataset identifiers 1 to the number of files.
>>> clean()
>>> load_pha('src.pha2')
>>> list_data_ids()
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
If given an identifier as the first argument then this is used
to start the numbering scheme for PHA2 files. If id is an
integer then the numbers go from id:
>>> clean()
>>> load_pha(20, 'src.pha2')
>>> list_data_ids()
[20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]
If the id is a string then the identifier is formed by adding
the number of the dataset (starting at 1) to the end of
id. Note that the `list_data_ids` routine does not guarantee
an ordering to the output (as shown below):
>>> clean()
>>> load_pha('x', 'src.pha2')
>>> list_data_ids()
['x1', 'x10', 'x11', 'x12', 'x2', 'x3', 'x4', 'x5', 'x6',
'x7', 'x8', 'x9']
Create the data set from the data read in by Crates:
>>> pha = pycrates.read_pha('src.pi')
>>> load_pha(pha)
read ARF file src.arf
read RMF file src.rmf
read background file src_bkg.pi
Create the data set from the data read in by AstroPy:
>>> hdus = astropy.io.fits.open('src.pi')
>>> load_pha(hdus)
read ARF file src.arf
read RMF file src.rmf
read background file src_bkg.pi
The default behavior is to calculate the errors based on the
counts values and the choice of statistic -
e.g. ``chi2gehrels`` or ``chi2datavar`` - but the statistical
errors from the input file can be used instead by setting
``use_errors`` to ``True``:
>>> load_pha('source.fits', use_errors=True)
"""
if arg is None:
id, arg = arg, id
phasets = self.unpack_pha(arg, use_errors)
self._load_data(id, phasets)
def _get_pha_data(self, id):
"""Ensure the dataset is a PHA"""
data = self.get_data(id)
if not isinstance(data, sherpa.astro.data.DataPHA):
raise ArgumentErr('nopha', self._fix_id(id))
return data
def _get_img_data(self, id):
"""Ensure the dataset is an image"""
data = self.get_data(id)
if not isinstance(data, sherpa.astro.data.DataIMG):
raise ArgumentErr('noimg', self._fix_id(id))
return data
# def _read_error(self, filename, *args, **kwargs):
# err = None
# try:
# err = sherpa.astro.io.backend.get_ascii_data(filename, *args,
# **kwargs)[1].pop()
# except:
# try:
# err = sherpa.astro.io.backend.get_table_data(filename, *args,
# **kwargs)[1].pop()
# except:
# try:
# err = sherpa.astro.io.read_image(filename, *args, **kwargs)
# err = err.get_dep()
# except:
# raise
# return err
# DOC-NOTE: also in sherpa.utils
# DOC-TODO: does ncols make sense here? (have removed for now)
#
def load_filter(self, id, filename=None, bkg_id=None, ignore=False,
ncols=2, *args, **kwargs):
# pylint: disable=W1113
"""Load the filter array from a file and add to a data set.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filename : str
The name of the file that contains the filter
information. This file can be a FITS table or an ASCII
file. Selection of the relevant column depends on the I/O
library in use (Crates or AstroPy).
bkg_id : int or str, optional
Set if the filter array should be associated with the
background associated with the data set.
ignore : bool, optional
If ``False`` (the default) then include bins with a non-zero
filter value, otherwise exclude these bins.
colkeys : array of str, optional
An array of the column name to read in. The default is
``None``.
sep : str, optional
The separator character. The default is ``' '``.
comment : str, optional
The comment character. The default is ``'#'``.
See Also
--------
get_filter : Return the filter expression for a data set.
ignore : Exclude data from the fit.
notice : Include data in the fit.
save_filter : Save the filter array to a file.
set_filter : Set the filter array of a data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `filename` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `filename` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
Examples
--------
Read in the first column of the file and apply it to the
default data set:
>>> load_filter('filt.dat')
Select the FILTER column of the file:
>>> load_filter(2, 'filt.dat', colkeys=['FILTER'])
When using Crates as the I/O library, the above can
also be written as
>>> load_filter(2, 'filt.dat[cols filter]')
Read in a filter for an image. The image must match the size
of the data and, as ``ignore=True``, pixels with a non-zero
value are excluded (rather than included):
>>> load_filter('img', 'filt.img', ignore=True)
"""
if filename is None:
id, filename = filename, id
self.set_filter(id, self._read_user_model(filename, *args, **kwargs)[1],
bkg_id=bkg_id, ignore=ignore)
# DOC-TODO: does ncols make sense here? (have removed for now)
# DOC-TODO: prob. needs a review as the existing ahelp documentation
# talks about 2 cols, but experimentation suggests 1 col.
#
def load_grouping(self, id, filename=None, bkg_id=None, *args, **kwargs):
# pylint: disable=W1113
"""Load the grouping scheme from a file and add to a PHA data set.
This function sets the grouping column but does not
automatically group the data, since the quality array may also
need updating. The `group` function will apply the grouping
information.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filename : str
The name of the file that contains the grouping
information. This file can be a FITS table or an ASCII
file. Selection of the relevant column depends on the I/O
library in use (Crates or AstroPy).
bkg_id : int or str, optional
Set if the grouping scheme should be associated with the
background associated with the data set.
colkeys : array of str, optional
An array of the column name to read in. The default is
``None``.
sep : str, optional
The separator character. The default is ``' '``.
comment : str, optional
The comment character. The default is ``'#'``.
See Also
--------
get_grouping : Return the gouping array for a PHA data set.
group : Turn on the grouping for a PHA data set.
load_quality : Load the quality array from a file and add to a PHA data set.
save_grouping : Save the grouping scheme to a file.
set_grouping : Apply a set of grouping flags to a PHA data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `filename` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `filename` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
There is no check made to see if the grouping array contains
valid data.
Examples
--------
When using Crates as the I/O library, select the grouping
column from the file 'src.pi', and use it to set the
values in the default data set:
>>> load_grouping('src.pi[cols grouping]')
Use the ``colkeys`` option to define the column in the input
file:
>>> load_grouping('src.pi', colkeys=['grouping'])
Load the first column in 'grp.dat' and use it to populate
the grouping array of the data set called 'core'.
>>> load_grouping('core', 'grp.dat')
Use `group_counts` to calculate a grouping scheme for the
data set labelled 'src1', save this scheme to the file
'grp.dat', and then load this scheme in for data set
'src2'.
>>> group_counts('src1', 10)
>>> save_grouping('src1', 'grp.dat')
>>> load_grouping('src2', 'grp.dat', colkeys=['groups'])
"""
if filename is None:
id, filename = filename, id
grouping = self._read_user_model(filename, *args, **kwargs)[1]
self.set_grouping(id, grouping, bkg_id=bkg_id)
def load_quality(self, id, filename=None, bkg_id=None, *args, **kwargs):
# pylint: disable=W1113
"""Load the quality array from a file and add to a PHA data set.
This function sets the quality column but does not
automatically ignore any columns marked as "bad". Use the
`ignore_bad` function to apply the new quality information.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filename : str
The name of the file that contains the quality
information. This file can be a FITS table or an ASCII
file. Selection of the relevant column depends on the I/O
library in use (Crates or AstroPy).
bkg_id : int or str, optional
Set if the quality array should be associated with the
background associated with the data set.
colkeys : array of str, optional
An array of the column name to read in. The default is
``None``.
sep : str, optional
The separator character. The default is ``' '``.
comment : str, optional
The comment character. The default is ``'#'``.
See Also
--------
get_quality : Return the quality array for a PHA data set.
ignore_bad : Exclude channels marked as bad in a PHA data set.
load_grouping : Load the grouping scheme from a file and add to a PHA data set.
save_quality: Save the quality array to a file.
set_quality : Apply a set of quality flags to a PHA data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `filename` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `filename` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
There is no check made to see if the quality array contains
valid data.
Examples
--------
When using Crates as the I/O library, select the quality
column from the file 'src.pi', and use it to set the
values in the default data set:
>>> load_quality('src.pi[cols quality]')
Use the ``colkeys`` option to define the column in the input
file:
>>> load_quality('src.pi', colkeys=['quality'])
Load the first column in 'grp.dat' and use it to populate
the quality array of the data set called 'core'.
>>> load_quality('core', 'grp.dat')
"""
if filename is None:
id, filename = filename, id
self.set_quality(id,
self._read_user_model(filename, *args, **kwargs)[1], bkg_id=bkg_id)
def set_filter(self, id, val=None, bkg_id=None, ignore=False):
"""Set the filter array of a data set.
Parameters
----------
id : int or str, optional
The data set to use. If not given then the default
identifier is used, as returned by `get_default_id`.
val : array
The array of filter values (``0`` or ``1``). The size should
match the array returned by `get_dep`.
bkg_id : int or str, optional
Set to identify which background component to set. The
default value (``None``) means that this is for the source
component of the data set.
ignore : bool, optional
If ``False`` (the default) then include bins with a non-zero
filter value, otherwise exclude these bins.
See Also
--------
get_dep : Return the dependent axis of a data set.
get_filter : Return the filter expression for a data set.
ignore : Exclude data from the fit.
load_filter : Load the filter array from a file and add to a data set.
notice : Include data in the fit.
save_filter : Save the filter array to a file.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `val` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `val` parameters,
respectively.
Examples
--------
Ignore those bins with a value less 20.
>>> d = get_dep()
>>> set_filter(d >= 20)
"""
if val is None:
val, id = id, val
filter = numpy.asarray(val, dtype=numpy.bool_)
d = self.get_data(id)
if bkg_id is not None:
d = self.get_bkg(id, bkg_id)
if numpy.iterable(d.mask):
if len(d.mask) == len(filter):
if not ignore:
d.mask |= filter
else:
d.mask &= ~filter
else:
raise DataErr('mismatch', len(d.mask), len(filter))
else:
if len(d.get_y(False)) == len(filter):
if not ignore:
d.mask = filter
else:
d.mask = ~filter
else:
raise DataErr('mismatch', len(d.get_y(False)), len(filter))
# DOC-NOTE: also in sherpa.utils
# DOC-TODO: does ncols make sense here? (have removed for now)
def load_staterror(self, id, filename=None, bkg_id=None, *args, **kwargs):
# pylint: disable=W1113
"""Load the statistical errors from a file.
Read in a column or image from a file and use the values
as the statistical errors for a data set. This over rides
the errors calculated by any statistic, such as
``chi2gehrels`` or ``chi2datavar``.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filename : str
The name of the file to read in. Supported formats depends
on the I/O library in use (Crates or AstroPy) and the
type of data set (e.g. 1D or 2D).
bkg_id : int or str, optional
Set to identify which background component to set. The
default value (``None``) means that this is for the source
component of the data set.
colkeys : array of str, optional
An array of the column name to read in. The default is
``None``.
sep : str, optional
The separator character. The default is ``' '``.
comment : str, optional
The comment character. The default is ``'#'``.
See Also
--------
get_staterror : Return the statistical error on the dependent axis of a data set.
load_syserror : Load the systematic errors from a file.
set_staterror : Set the statistical errors on the dependent axis of a data set.
set_stat : Set the statistical method.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `filename` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `filename` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
Examples
--------
Read in the first column from 'tbl.dat':
>>> load_staterror('tbl.dat')
Use the column labelled 'col3'
>>> load_staterror('tbl.dat', colkeys=['col3'])
When using the Crates I/O library, the file name can include
CIAO Data Model syntax, such as column selection:
>>> load_staterror('tbl.dat[cols col3]')
Read in the first column from the file 'errors.fits' as the
statistical errors for the 'core' data set:
>>> load_staterror('core', 'errors.fits')
The data set labelled 'img' is loaded from the file
'image.fits' and the statistical errors from 'err.fits'.
The dimensions of the two images must be the same.
>>> load_image('img', 'image.fits')
>>> load_staterror('img', 'err.fits')
"""
if filename is None:
id, filename = filename, id
self.set_staterror(id,
self._read_user_model(filename, *args, **kwargs)[1], bkg_id=bkg_id)
# DOC-NOTE: also in sherpa.utils
# DOC-NOTE: is ncols really 2 here? Does it make sense?
def load_syserror(self, id, filename=None, bkg_id=None, *args, **kwargs):
# pylint: disable=W1113
"""Load the systematic errors from a file.
Read in a column or image from a file and use the values
as the systematic errors for a data set.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filename : str
The name of the file to read in. Supported formats depends
on the I/O library in use (Crates or AstroPy) and the
type of data set (e.g. 1D or 2D).
bkg_id : int or str, optional
Set to identify which background component to set. The
default value (``None``) means that this is for the source
component of the data set.
ncols : int, optional
The number of columns to read in (the first ``ncols`` columns
in the file).
colkeys : array of str, optional
An array of the column name to read in. The default is
``None``.
sep : str, optional
The separator character. The default is ``' '``.
comment : str, optional
The comment character. The default is ``'#'``.
See Also
--------
get_syserror : Return the systematic error on the dependent axis of a data set.
load_staterror : Load the statistical errors from a file.
set_syserror : Set the systematic errors on the dependent axis of a data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `filename` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `filename` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
Examples
--------
Read in the first column from 'tbl.dat':
>>> load_syserror('tbl.dat')
Use the column labelled 'col3'
>>> load_syserror('tbl.dat', colkeys=['col3'])
When using the Crates I/O library, the file name can include
CIAO Data Model syntax, such as column selection:
>>> load_syserror('tbl.dat[cols col3]')
Read in the first column from the file 'errors.fits' as the
systematic errors for the 'core' data set:
>>> load_syserror('core', 'errors.fits')
The data set labelled 'img' is loaded from the file
'image.fits' and the systematic errors from 'syserr.fits'.
The dimensions of the two images must be the same.
>>> load_image('img', 'image.fits')
>>> load_syserror('img', 'syserr.fits')
"""
if filename is None:
id, filename = filename, id
self.set_syserror(id,
self._read_user_model(filename, *args, **kwargs)[1], bkg_id=bkg_id)
# also in sherpa.utils
def set_dep(self, id, val=None, bkg_id=None):
"""Set the dependent axis of a data set.
Parameters
----------
id : int or str, optional
The data set to use. If not given then the default
identifier is used, as returned by `get_default_id`.
val : array
The array of values for the dependent axis.
bkg_id : int or str, optional
Set to identify which background component to set. The
default value (``None``) means that this is for the source
component of the data set.
See Also
--------
dataspace1d : Create the independent axis for a 1D data set.
dataspace2d : Create the independent axis for a 2D data set.
get_dep : Return the dependent axis of a data set.
load_arrays : Create a data set from array values.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `val` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `val` parameters,
respectively.
Examples
--------
Create a 1D data set with values at (0,4), (2,10), (4,12),
(6,8), (8,2), and (10,12):
>>> dataspace1d(0, 10, 2, dstype=Data1D)
>>> set_dep([4, 10, 12, 8, 2, 12])
Set the values for the source and background of the data set
'src':
>>> set_dep('src', y1)
>>> set_dep('src', bg1, bkg_id=1)
"""
if val is None:
val, id = id, val
d = self.get_data(id)
if bkg_id is not None:
d = self.get_bkg(id, bkg_id)
dep = None
if isinstance(d, sherpa.astro.data.DataPHA):
if numpy.iterable(val):
dep = numpy.asarray(val, SherpaFloat)
else:
val = SherpaFloat(val)
dep = numpy.array([val] * len(d.channel))
d.counts = dep
else:
if numpy.iterable(val):
dep = numpy.asarray(val, SherpaFloat)
else:
val = SherpaFloat(val)
dep = numpy.array([val] * len(d.get_indep()[0]))
d.y = dep
set_counts = set_dep
# DOC-NOTE: also in sherpa.utils
def set_staterror(self, id, val=None, fractional=False, bkg_id=None):
"""Set the statistical errors on the dependent axis of a data set.
These values over-ride the errors calculated by any statistic,
such as ``chi2gehrels`` or ``chi2datavar``.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
val : array or scalar
The systematic error.
fractional : bool, optional
If ``False`` (the default value), then the `val` parameter is
the absolute value, otherwise the `val` parameter
represents the fractional error, so the absolute value is
calculated as ``get_dep() * val`` (and `val` must be
a scalar).
bkg_id : int or str, optional
Set to identify which background component to set. The
default value (``None``) means that this is for the source
component of the data set.
See Also
--------
load_staterror : Load the statistical errors from a file.
load_syserror : Load the systematic errors from a file.
set_syserror : Set the systematic errors on the dependent axis of a data set.
get_error : Return the errors on the dependent axis of a data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `val` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `val` parameters,
respectively.
Examples
--------
Set the statistical error for the default data set to the value
in ``dys`` (a scalar or an array):
>>> set_staterror(dys)
Set the statistical error on the 'core' data set to be 5% of
the data values:
>>> set_staterror('core', 0.05, fractional=True)
"""
if val is None:
val, id = id, val
err = None
d = self.get_data(id)
if bkg_id is not None:
d = self.get_bkg(id, bkg_id)
fractional = sherpa.utils.bool_cast(fractional)
if numpy.iterable(val):
err = numpy.asarray(val, SherpaFloat)
elif val is not None:
val = SherpaFloat(val)
if fractional:
err = val * d.get_dep()
else:
err = numpy.array([val] * len(d.get_dep()))
d.staterror = err
# DOC-NOTE: also in sherpa.utils
def set_syserror(self, id, val=None, fractional=False, bkg_id=None):
"""Set the systematic errors on the dependent axis of a data set.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
val : array or scalar
The systematic error.
fractional : bool, optional
If ``False`` (the default value), then the `val` parameter is
the absolute value, otherwise the `val` parameter
represents the fractional error, so the absolute value is
calculated as ``get_dep() * val`` (and `val` must be
a scalar).
bkg_id : int or str, optional
Set to identify which background component to set. The
default value (``None``) means that this is for the source
component of the data set.
See Also
--------
load_staterror : Set the statistical errors on the dependent axis of a data set.
load_syserror : Set the systematic errors on the dependent axis of a data set.
set_staterror : Set the statistical errors on the dependent axis of a data set.
get_error : Return the errors on the dependent axis of a data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `val` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `val` parameters,
respectively.
Examples
--------
Set the systematic error for the default data set to the value
in ``dys`` (a scalar or an array):
>>> set_syserror(dys)
Set the systematic error on the 'core' data set to be 5% of
the data values:
>>> set_syserror('core', 0.05, fractional=True)
"""
if val is None:
val, id = id, val
err = None
d = self.get_data(id)
if bkg_id is not None:
d = self.get_bkg(id, bkg_id)
fractional = sherpa.utils.bool_cast(fractional)
if numpy.iterable(val):
err = numpy.asarray(val, SherpaFloat)
elif val is not None:
val = SherpaFloat(val)
if fractional:
err = val * d.get_dep()
else:
err = numpy.array([val] * len(d.get_dep()))
d.syserror = err
def set_exposure(self, id, exptime=None, bkg_id=None):
"""Change the exposure time of a PHA data set.
The exposure time of a PHA data set is taken from the
``EXPOSURE`` keyword in its header, but it can be changed
once the file has been loaded.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
exptime : num
The exposure time, in seconds.
bkg_id : int or str, optional
Set to identify which background component to set. The
default value (``None``) means that this is for the source
component of the data set.
See Also
--------
get_exposure : Return the exposure time of a PHA data set.
set_areascal : Change the fractional area factor of a PHA data set.
set_backscal : Change the area scaling of a PHA data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `exptime` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `exptime` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
Examples
--------
Increase the exposure time of the default data set by 5 per
cent.
>>> etime = get_exposure()
>>> set_exposure(etime * 1.05)
Use the EXPOSURE value from the ARF, rather than the
value from the PHA file, for data set 2:
>>> set_exposure(2, get_arf(2).exposure)
Set the exposure time of the second background component
of the 'jet' data set.
>>> set_exposure('jet', 12324.45, bkg_id=2)
"""
if exptime is None:
exptime, id = id, exptime
if exptime is not None:
exptime = SherpaFloat(exptime)
if bkg_id is not None:
self.get_bkg(id, bkg_id).exposure = exptime
else:
self._get_pha_data(id).exposure = exptime
def set_backscal(self, id, backscale=None, bkg_id=None):
"""Change the area scaling of a PHA data set.
The area scaling factor of a PHA data set is taken from the
BACKSCAL keyword or column, but it can be changed once the
file has been loaded.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
backscale : number or array
The scaling factor.
bkg_id : int or str, optional
Set to identify which background component to set. The
default value (``None``) means that this is for the source
component of the data set.
See Also
--------
get_backscal : Return the area scaling of a PHA data set.
set_areascal : Change the fractional area factor of a PHA data set.
set_exposure : Change the exposure time of a PHA data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `backscale` parameter. If given two un-named arguments,
then they are interpreted as the `id` and `backscale`
parameters, respectively. The remaining parameters are
expected to be given as named arguments.
"""
if backscale is None:
backscale, id = id, backscale
if numpy.iterable(backscale):
backscale = numpy.asarray(backscale)
elif backscale is not None:
backscale = SherpaFloat(backscale)
if bkg_id is not None:
self.get_bkg(id, bkg_id).backscal = backscale
else:
self._get_pha_data(id).backscal = backscale
# DOC-TODO: the description needs improving.
def set_areascal(self, id, area=None, bkg_id=None):
"""Change the fractional area factor of a PHA data set.
The area scaling factor of a PHA data set is taken from the
AREASCAL keyword, but it can be changed once the file has been
loaded.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
area : number
The scaling factor.
bkg_id : int or str, optional
Set to identify which background component to set. The
default value (``None``) means that this is for the source
component of the data set.
See Also
--------
get_areascal : Return the fractional area factor of a PHA data set.
set_backscal : Change the area scaling of a PHA data set.
set_exposure : Change the exposure time of a PHA data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `area` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `area` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
"""
if area is None:
area, id = id, area
if area is not None:
area = SherpaFloat(area)
if bkg_id is not None:
self.get_bkg(id, bkg_id).areascal = area
else:
self._get_pha_data(id).areascal = area
# DOC-NOTE: also in sherpa.utils, where it does not have
# the bkg_id parameter.
#
def get_staterror(self, id=None, filter=False, bkg_id=None):
"""Return the statistical error on the dependent axis of a data set.
The function returns the statistical errors on the values
(dependenent axis) of a data set, or its background. These
may have been set explicitly - either when the data set was
created or with a call to `set_staterror` - or as defined by
the chosen fit statistic (such as "chi2gehrels").
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filter : bool, optional
Should the filter attached to the data set be applied to
the return value or not. The default is ``False``.
bkg_id : int or str, optional
Set if the values returned should be from the given
background component, instead of the source data set.
Returns
-------
staterrors : array
The statistical error for each data point. This may be
estimated from the data (e.g. with the ``chi2gehrels``
statistic) or have been set explicitly (`set_staterror`).
For PHA data sets, the return array will match the grouping
scheme applied to the data set. The size of this array
depends on the `filter` argument.
Raises
------
sherpa.utils.err.IdentifierErr
If the data set does not exist.
See Also
--------
get_error : Return the errors on the dependent axis of a data set.
get_indep : Return the independent axis of a data set.
get_syserror : Return the systematic errors on the dependent axis of a data set.
list_data_ids : List the identifiers for the loaded data sets.
set_staterror : Set the statistical errors on the dependent axis of a data set.
Notes
-----
The default behavior is to not apply any filter defined on the
independent axes to the results, so that the return value is for
all points (or bins) in the data set. Set the `filter` argument
to `True` to apply this filter.
Examples
--------
If not explicitly given, the statistical errors on a data set
may be calculated from the data values (the independent axis),
depending on the chosen statistic:
>>> load_arrays(1, [10, 15, 19], [4, 5, 9])
>>> set_stat('chi2datavar')
>>> get_staterror()
array([ 2. , 2.23606798, 3. ])
>>> set_stat('chi2gehrels')
>>> get_staterror()
array([ 3.17944947, 3.39791576, 4.122499 ])
If the statistical errors are set - either when the data set
is created or with a call to `set_staterror` - then these values
will be used, no matter the statistic:
>>> load_arrays(1, [10, 15, 19], [4, 5, 9], [2, 3, 5])
>>> set_stat('chi2datavar')
>>> get_staterror()
array([2, 3, 5])
>>> set_stat('chi2gehrels')
>>> get_staterror()
array([2, 3, 5])
"""
d = self.get_data(id)
if bkg_id is not None:
d = self.get_bkg(id, bkg_id)
return d.get_staterror(filter, self.get_stat().calc_staterror)
# DOC-NOTE: also in sherpa.utils, where it does not have
# the bkg_id parameter.
#
def get_syserror(self, id=None, filter=False, bkg_id=None):
"""Return the systematic error on the dependent axis of a data set.
The function returns the systematic errors on the values
(dependenent axis) of a data set, or its background. It is
an error if called on a data set with no systematic errors
(which are set with `set_syserror`).
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filter : bool, optional
Should the filter attached to the data set be applied to
the return value or not. The default is ``False``.
bkg_id : int or str, optional
Set if the values returned should be from the given
background component, instead of the source data set.
Returns
-------
syserrors : array
The systematic error for each data point. The size of this
array depends on the `filter` argument.
Raises
------
sherpa.utils.err.DataErr
If the data set has no systematic errors.
sherpa.utils.err.IdentifierErr
If the data set does not exist.
See Also
--------
get_error : Return the errors on the dependent axis of a data set.
get_indep : Return the independent axis of a data set.
get_staterror : Return the statistical errors on the dependent axis of a data set.
list_data_ids : List the identifiers for the loaded data sets.
set_syserror : Set the systematic errors on the dependent axis of a data set.
Notes
-----
The default behavior is to not apply any filter defined on the
independent axes to the results, so that the return value is for
all points (or bins) in the data set. Set the `filter` argument
to `True` to apply this filter.
Examples
--------
Return the systematic error for the default data set:
>>> yerr = get_syserror()
Return an array that has been filtered to match the data:
>>> yerr = get_syserror(filter=True)
Return the filtered errors for data set "core":
>>> yerr = get_syserror("core", filter=True)
"""
d = self.get_data(id)
id = self._fix_id(id)
if bkg_id is not None:
d = self.get_bkg(id, bkg_id)
err = d.get_syserror(filter)
if err is None or not numpy.iterable(err):
raise DataErr('nosyserr', id)
return err
# DOC-NOTE: also in sherpa.utils, where it does not have
# the bkg_id parameter.
#
def get_error(self, id=None, filter=False, bkg_id=None):
"""Return the errors on the dependent axis of a data set.
The function returns the total errors (a quadrature addition
of the statistical and systematic errors) on the values
(dependent axis) of a data set or its background. The individual
components can be retrieved with the `get_staterror` and
`get_syserror` functions.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filter : bool, optional
Should the filter attached to the data set be applied to
the return value or not. The default is ``False``.
bkg_id : int or str, optional
Set if the values returned should be from the given
background component, instead of the source data set.
Returns
-------
errors : array
The error for each data point, formed by adding the
statistical and systematic errors in quadrature.
For PHA data sets, the return array will match the grouping
scheme applied to the data set. The size of this array
depends on the `filter` argument.
Raises
------
sherpa.utils.err.IdentifierErr
If the data set does not exist.
See Also
--------
get_dep : Return the dependent axis of a data set.
get_staterror : Return the statistical errors on the dependent axis of a data set.
get_syserror : Return the systematic errors on the dependent axis of a data set.
list_data_ids : List the identifiers for the loaded data sets.
Notes
-----
The default behavior is to not apply any filter defined on the
independent axes to the results, so that the return value is for
all points (or bins) in the data set. Set the `filter` argument
to `True` to apply this filter.
Examples
--------
Return the error values for the default data set, ignoring any
filter applied to it:
>>> err = get_error()
Ensure that the return values are for the selected (filtered)
points in the default data set (the return array may be smaller
than in the previous example):
>>> err = get_error(filter=True)
Find the errors for the "core" data set and its two background
components:
>>> err = get_error('core', filter=True)
>>> berr1 = get_error('core', bkg_id=1, filter=True)
>>> berr2 = get_error('core', bkg_id=2, filter=True)
"""
d = self.get_data(id)
if bkg_id is not None:
d = self.get_bkg(id, bkg_id)
return d.get_error(filter, self.get_stat().calc_staterror)
# DOC-NOTE: also in sherpa.utils
def get_indep(self, id=None, filter=False, bkg_id=None):
"""Return the independent axes of a data set.
This function returns the coordinates of each point, or pixel,
in the data set. The `get_axes` function may be be preferred
in some situations.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filter : bool, optional
Should the filter attached to the data set be applied to
the return value or not. The default is ``False``.
bkg_id : int or str, optional
Set if the values returned should be from the given
background component, instead of the source data set.
Returns
-------
axis : tuple of arrays
The independent axis values. These are the values at which
the model is evaluated during fitting. The values returned
depend on the coordinate system in use for the data set (as
set by `set_coord`). For PHA data sets the value returned
is always in channels, whatever the `set_analysis` setting
is, and does not follow any grouping setting for the data
set.
Raises
------
sherpa.utils.err.IdentifierErr
If the data set does not exist.
See Also
--------
get_axes : Return information about the independent axes of a data set.
get_dep : Return the dependent axis of a data set.
list_data_ids : List the identifiers for the loaded data sets.
set_coord : Set the coordinate system to use for image analysis.
Notes
-----
For a two-dimensional image, with size n by m pixels, the
`get_dep` function will return two arrays, each of size n * m,
which contain the coordinate of the center of each pixel. The
`get_axes` function will instead return the coordinates of
each axis separately, i.e. arrays of size n and m.
Examples
--------
For a one-dimensional data set, the X values are returned:
>>> load_arrays(1, [10, 15, 19], [4, 5, 9], Data1D)
>>> get_indep()
(array([10, 15, 19]),)
For a 2D data set the X0 and X1 values are returned:
>>> x0 = [10, 15, 12, 19]
>>> x1 = [12, 14, 10, 17]
>>> y = [4, 5, 9, -2]
>>> load_arrays(2, x0, x1, y, Data2D)
>>> get_indep(2)
(array([10, 15, 12, 19]), array([12, 14, 10, 17]))
For PHA data sets the return value is in channel units:
>>> load_pha('spec', 'src.pi')
>>> set_analysis('spec', 'energy')
>>> (chans,) = get_indep('spec')
>>> chans[0:6]
array([ 1., 2., 3., 4., 5., 6.])
If the ``filter`` flag is set then the return will be limited to
the data that is used in the fit:
>>> notice_id('spec', 0.5, 7)
>>> (nchans,) = get_indep('spec', filter=True)
>>> nchans[0:5]
array([ 35., 36., 37., 38., 39.])
For images the pixel coordinates of each pixel are returned,
as 1D arrays, one value for each pixel:
>>> load_image('img', 'image.fits')
>>> (xvals, yvals) = get_indep('img')
>>> xvals.shape
(65536,)
>>> yvals.shape
(65536,)
>>> xvals[0:5]
array([ 1., 2., 3., 4., 5.])
>>> yvals[0:5]
array([ 1., 1., 1., 1., 1.])
The coordinate system for image axes is determinated by the
`set_coord` setting for the data set:
>>> set_coord('img', 'physical')
>>> (avals, bvals) = get_indep('img')
>>> avals[0:5]
array([ 16.5, 48.5, 80.5, 112.5, 144.5])
"""
d = self.get_data(id)
if bkg_id is not None:
d = self.get_bkg(id, bkg_id)
return d.get_indep(filter=filter)
def get_axes(self, id=None, bkg_id=None):
"""Return information about the independent axes of a data set.
This function returns the coordinates of each point, or pixel,
in the data set. The `get_indep` function may be be preferred
in some situations.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
bkg_id : int or str, optional
Set if the values returned should be from the given
background component, instead of the source data set.
Returns
-------
axis : tuple of arrays
The independent axis values. The differences to `get_dep`
that this represents the "alternate grid" for the axis. For
PHA data, this is the energy grid (E_MIN and E_MAX). For
image data it is an array for each axis, of the length of
the axis, using the current coordinate system for the data
set.
Raises
------
sherpa.utils.err.IdentifierErr
If the data set does not exist.
See Also
--------
get_indep : Return the independent axis of a data set.
list_data_ids : List the identifiers for the loaded data sets.
Examples
--------
For 1D data sets, the "alternate" view is the same as the
independent axis:
>>> load_arrays(1, [10, 15, 19], [4, 5, 9], Data1D)
>>> get_indep()
array([10, 15, 19])
>>> get_axes()
array([10, 15, 19])
For a PHA data set, the approximate energy grid of the
channels is returned (this is determined by the EBOUNDS
extension of the RMF).
>>> load_pha('core', 'src.pi')
read ARF file src.arf
read RMF file src.rmf
read background file src_bkg.pi
>>> (chans,) = get_indep()
>>> (elo, ehi) = get_axes()
>>> chans[0:5]
array([ 1., 2., 3., 4., 5.])
>>> elo[0:5]
array([ 0.0073, 0.0146, 0.0292, 0.0438, 0.0584])
>>> ehi[0:5]
array([ 0.0146, 0.0292, 0.0438, 0.0584, 0.073 ])
The image has 101 columns by 108 rows. The `get_indep`
function returns one-dimensional arrays, for the full dataset,
whereas `get_axes` returns values for the individual axis:
>>> load_image('img', 'img.fits')
>>> get_data('img').shape
(108, 101)
>>> set_coord('img', 'physical')
>>> (x0, x1) = get_indep('img')
>>> (a0, a1) = get_axes('img')
>>> (x0.size, x1.size)
(10908, 10908)
>>> (a0.size, a1.size)
(101, 108)
>>> np.all(x0[:101] == a0)
True
>>> np.all(x1[::101] == a1)
True
"""
d = self.get_data(id)
if bkg_id is not None:
d = self.get_bkg(id, bkg_id)
if isinstance(d, sherpa.astro.data.DataPHA):
return d._get_ebins(group=False)
if isinstance(d, (sherpa.data.Data2D,
sherpa.astro.data.DataIMG)):
return d.get_axes()
return d.get_indep()
# DOC-NOTE: also in sherpa.utils
def get_dep(self, id=None, filter=False, bkg_id=None):
"""Return the dependent axis of a data set.
This function returns the data values (the dependent axis)
for each point or pixel in the data set.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filter : bool, optional
Should the filter attached to the data set be applied to
the return value or not. The default is ``False``.
bkg_id : int or str, optional
Set if the values returned should be from the given
background component, instead of the source data set.
Returns
-------
axis : array
The dependent axis values. The model estimate is compared
to these values during fitting. For PHA data sets, the
return array will match the grouping scheme applied to
the data set. This array is one-dimensional, even for
two dimensional (e.g. image) data.
Raises
------
sherpa.utils.err.IdentifierErr
If the data set does not exist.
See Also
--------
get_error : Return the errors on the dependent axis of a data set.
get_indep : Return the independent axis of a data set.
get_rate : Return the count rate of a PHA data set.
list_data_ids : List the identifiers for the loaded data sets.
Examples
--------
>>> load_arrays(1, [10, 15, 19], [4, 5, 9], Data1D)
>>> get_dep()
array([4, 5, 9])
>>> x0 = [10, 15, 12, 19]
>>> x1 = [12, 14, 10, 17]
>>> y = [4, 5, 9, -2]
>>> load_arrays(2, x0, x1, y, Data2D)
>>> get_dep(2)
array([4, 5, 9, -2])
If the ``filter`` flag is set then the return will be limited to
the data that is used in the fit:
>>> load_arrays(1, [10, 15, 19], [4, 5, 9])
>>> ignore_id(1, 17, None)
>>> get_dep()
array([4, 5, 9])
>>> get_dep(filter=True)
array([4, 5])
An example with a PHA data set named 'spec':
>>> notice_id('spec', 0.5, 7)
>>> yall = get_dep('spec')
>>> yfilt = get_dep('spec', filter=True)
>>> yall.size
1024
>>> yfilt.size
446
For images, the data is returned as a one-dimensional array:
>>> load_image('img', 'image.fits')
>>> ivals = get_dep('img')
>>> ivals.shape
(65536,)
"""
d = self.get_data(id)
if bkg_id is not None:
d = self.get_bkg(id, bkg_id)
dep = d.get_y(filter)
if isinstance(d, sherpa.astro.data.DataPHA):
old = d._rate
d._rate = False # return predicted counts, not rate for PHA
dep = d.get_y(filter)
d._rate = old
return dep
get_counts = get_dep
def get_rate(self, id=None, filter=False, bkg_id=None):
"""Return the count rate of a PHA data set.
Return an array of count-rate values for each bin in the
data set. The units of the returned values depends on the
values set by the `set_analysis` rountine for the data
set.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filter : bool, optional
Should the filter attached to the data set be applied to
the return value or not. The default is ``False``.
bkg_id : int or str, optional
Set if the rate should be taken from the background
associated with the data set.
Returns
-------
rate : array
The rate array. The output matches the grouping of the data
set. The units are controlled by the `set_analysis` setting
for this data set; that is, the units used in `plot_data`,
except that the `type` argument to `set_analysis` is ignored.
The return array will match the grouping scheme applied to
the data set.
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
See Also
--------
get_dep : Return the data for a data set.
ignore : Exclude data from the fit.
notice : Include data in the fit.
plot_data : Plot the data values.
set_analysis : Set the units used when fitting and displaying spectral data.
Examples
--------
Return the count-rate for the default data set. For a PHA
data set, where `set_analysis` has not been called, the return
value will be in units of count/second/keV, and a value for
each group in the data set is returned.
>>> rate = get_rate()
The return value is grouped to match the data, but is not
filtered (with the default `filter` argument). The data
set used here 46 groups in it, but after filtering only has 40
groups, but the call to `get_rate` returns a 46-element array
unless `filter` is explicitly set to `True`:
>>> notice()
>>> get_rate().size
46
>>> ignore(None, 0.5)
>>> ignore(7, None)
>>> get_rate().size
46
>>> get_rate(filter=True).size
40
The rate of data set 2 will be in units of count/s/Angstrom
and only cover the range 20 to 22 Angstroms:
>>> set_analysis(2, 'wave')
>>> notice_id(2, 20, 22)
>>> r2 = get_rate(2, filter=True)
The returned rate is now in units of count/s (the return value
is multiplied by `binwidth^factor`, where `factor` is normally
0):
>>> set_analysis(2, 'wave', factor=1)
>>> r2 = get_rate(2, filter=True)
Return the count rate for the second background component of
data set "grating":
>>> get_rate(id="grating", bkg_id=2)
"""
d = self._get_pha_data(id)
if bkg_id is not None:
d = self.get_bkg(id, bkg_id)
old = d._rate
d._rate = True # return count rate for PHA
rate = d.get_y(filter)
d._rate = old
return rate
# DOC-TODO: how to get the corresponding x bins for this data?
# i.e. what are the X values for these points
def get_specresp(self, id=None, filter=False, bkg_id=None):
"""Return the effective area values for a PHA data set.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filter : bool, optional
Should the filter attached to the data set be applied to
the ARF or not. The default is ``False``.
bkg_id : int or str, optional
Set if the ARF should be taken from a background set
associated with the data set.
Returns
-------
arf : array
The effective area values for the data set (or background
component).
Examples
--------
Return the effective-area values for the default data set:
>>> arf = get_specresp()
Return the area for the second background component of the
data set with the id "eclipse":
>>> barf = get_spectresp("eclipse", bkg_id=2)
"""
d = self._get_pha_data(id)
if bkg_id is not None:
d = self.get_bkg(id, bkg_id)
return d.get_specresp(filter)
def get_exposure(self, id=None, bkg_id=None):
"""Return the exposure time of a PHA data set.
The exposure time of a PHA data set is taken from the
EXPTIME keyword in its header, but it can be changed
once the file has been loaded.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
bkg_id : int or str, optional
Set to identify which background component to use. The
default value (``None``) means that the time is for the
source component of the data set.
Returns
-------
exposure : number
The exposure time, in seconds.
See Also
--------
get_areascal : Return the fractional area factor of a PHA data set.
get_backscal : Return the area scaling of a PHA data set.
set_exposure : Change the exposure time of a PHA data set.
Examples
--------
Return the exposure time for the default data set.
>>> t = get_exposure()
Return the exposure time for the data set with identifier 2:
>>> t2 = get_exposure(2)
Return the exposure time for the first background component
of data set "core":
>>> tbkg = get_exposure('core', bkg_id=1)
"""
if bkg_id is not None:
return self.get_bkg(id, bkg_id).exposure
return self._get_pha_data(id).exposure
def get_backscal(self, id=None, bkg_id=None):
"""Return the BACKSCAL scaling of a PHA data set.
Return the BACKSCAL setting [1]_ for the source or background
component of a PHA data set.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
bkg_id : int or str, optional
Set to identify which background component to use. The
default value (``None``) means that the value is for the
source component of the data set.
Returns
-------
backscal : number or ndarray
The BACKSCAL value, which can be a scalar or a 1D array.
See Also
--------
get_areascal : Return the fractional area factor of a PHA data set.
get_bkg_scale : Return the background scaling factor for a PHA data set.
set_backscal : Change the area scaling of a PHA data set.
Notes
-----
The BACKSCAL value can be defined as the ratio of the area of
the source (or background) extraction region in image pixels
to the total number of image pixels. The fact that there is no
ironclad definition for this quantity does not matter so long
as the value for a source dataset and its associated
background dataset are defined in the similar manner, because
only the ratio of source and background BACKSCAL values is
used. It can be a scalar or be an array.
References
----------
.. [1] "The OGIP Spectral File Format", Arnaud, K. & George, I.
http://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/spectra/ogip_92_007/ogip_92_007.html
Examples
--------
>>> get_backscal()
7.8504301607718007e-06
>>> get_backscal(bkg_id=1)
0.00022745132446289
"""
if bkg_id is not None:
return self.get_bkg(id, bkg_id).backscal
return self._get_pha_data(id).backscal
def get_bkg_scale(self, id=None, bkg_id=1, units='counts',
group=True, filter=False):
"""Return the background scaling factor for a background data set.
Return the factor applied to the background component to scale
it to match it to the source, either when subtracting the
background (units='counts'), or fitting it simultaneously
(units='rate').
.. versionchanged:: 4.12.2
The bkg_id, counts, group, and filter parameters have been
added and the routine no-longer calculates the average
scaling for all the background components but just for the
given component.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
bkg_id : int or str, optional
Set to identify which background component to use. The
default value is 1.
units : {'counts', 'rate'}, optional
The correction is applied to a model defined as counts, the
default, or a rate. The latter should be used when
calculating the correction factor for adding the background
data to the source aperture.
group : bool, optional
Should the values be grouped to match the data?
filter : bool, optional
Should the values be filtered to match the data?
Returns
-------
ratio : number or array
The scaling factor. The result can vary per channel, in which case
an array is returned.
See Also
--------
get_areascal : Return the fractional area factor of a PHA data set.
get_backscal : Return the area scaling factor for a PHA data set.
set_backscal : Change the area scaling of a PHA data set.
set_full_model : Define the convolved model expression for a data set.
set_bkg_full_model : Define the convolved background model expression for a PHA data set.
Notes
-----
The scale factor when units='counts' is::
exp_src * bscale_src * areascal_src /
(exp_bgnd * bscale_bgnd * areascal_ngnd) /
nbkg
where ``exp_x``, ``bscale_x``. and ``areascal_x`` are the
exposure, BACKSCAL, and AREASCAL values for the source
(``x=src``) and background (``x=bgnd``) regions, respectively,
and ``nbkg`` is the number of background datasets associated
with the source aperture. When units='rate', the exposure and
areascal corrections are not included.
Examples
--------
Return the background-scaling factor for the default dataset (this
assumes there's only one background component).
>>> get_bkg_scale()
0.034514770047217924
Return the factor for dataset "pi":
>>> get_bkg_scale('pi')
0.034514770047217924
Calculate the factors for the first two background components
of the default dataset, valid for combining the source
and background models to fit the source aperture:
>>> scale1 = get_bkg_scale(units='rate')
>>> scale2 = get_bkg_scale(units='rate', bkg_id=2)
"""
dset = self._get_pha_data(id)
scale = dset.get_background_scale(bkg_id, units=units,
group=group, filter=filter)
if scale is None:
# TODO: need to add bkg_id?
raise DataErr('nobkg', self._fix_id(id))
return scale
def get_areascal(self, id=None, bkg_id=None):
"""Return the fractional area factor of a PHA data set.
Return the AREASCAL setting [1]_ for the source or background
component of a PHA data set.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
bkg_id : int or str, optional
Set to identify which background component to use. The
default value (``None``) means that the value is for the
source component of the data set.
Returns
-------
areascal : number or ndarray
The AREASCAL value, which can be a scalar or a 1D array.
See Also
--------
get_backscal : Return the area scaling of a PHA data set.
set_areascal : Change the fractional area factor of a PHA data set.
Notes
-----
The fractional area scale is normally set to 1, with the ARF used
to scale the model.
References
----------
.. [1] "The OGIP Spectral File Format", Arnaud, K. & George, I.
http://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/spectra/ogip_92_007/ogip_92_007.html
Examples
--------
Return the AREASCAL value for the default data set:
>>> get_areascal()
Return the AREASCAL value for the first background component
of dataset 2:
>>> get_areascal(id=2, bkg_id=1)
"""
if bkg_id is not None:
return self.get_bkg(id, bkg_id).areascal
return self._get_pha_data(id).areascal
def _save_type(self, objtype, id, filename, bkg_id=None, **kwargs):
if filename is None:
id, filename = filename, id
_check_type(filename, string_types, 'filename', 'a string')
d = self.get_data(id)
if bkg_id is not None:
d = self.get_bkg(id, bkg_id)
if isinstance(d, (sherpa.astro.data.DataIMG,
sherpa.astro.data.DataIMGInt,
sherpa.data.Data2D, sherpa.data.Data2DInt)):
backup = d.y
if objtype == 'delchi':
raise AttributeError("save_delchi() does not apply for images")
imgtype = getattr(self, 'get_' + objtype + '_image', None)
if imgtype is None:
raise AttributeError("'get_%s_image()' not found" % objtype)
obj = imgtype(id)
try:
# write out the array using the source dataset,
# include WCS etc.
d.y = obj.y.ravel()
sherpa.astro.io.write_image(filename, d, **kwargs)
finally:
d.y = backup
return
funcname = 'get_'
if bkg_id is not None:
funcname += 'bkg_'
plottype = getattr(self, funcname + objtype + '_plot', None)
if plottype is None:
raise AttributeError("'%s%s_plot()' not found" % (funcname,
objtype))
obj = plottype(id)
if bkg_id is not None:
obj = plottype(id, bkg_id=bkg_id)
args = None
fields = None
# if type(d) in (sherpa.data.Data1DInt, sherpa.astro.data.DataPHA):
# args = [obj.xlo, obj.xhi, obj.y]
# fields = ["XLO", "XHI", str(objtype).upper()]
if isinstance(d, sherpa.astro.data.DataPHA) and \
objtype in ('model', 'source'):
args = [obj.xlo, obj.xhi, obj.y]
fields = ["XLO", "XHI", str(objtype).upper()]
else:
args = [obj.x, obj.y]
fields = ["X", str(objtype).upper()]
sherpa.astro.io.write_arrays(filename, args, fields=fields, **kwargs)
# To fix bug report 13536, save many kinds of data to ASCII by default,
# and let user override if they want FITS (or vice-versa). The new defaults
# as of CIAO 4.6 are:
#
# ascii = False (i.e., write to FITS):
#
# save_pha
# save_image
# save_table
#
# ascii = True (i.e., write to ASCII):
#
# save_arrays
# save_source
# save_model
# save_resid
# save_delchi
# save_filter
# save_staterror
# save_syserror
# save_error
# save_grouping
# save_quality
# save_data
#
# My logic is that in the former group, you are focused on writing to a
# specific kind of file (PHA, image or table) so use FITS by default.
# In the latter group, you are not focused on file type, but on some
# data or model attribute you want to write out and read into some other
# program with ease. ASCII is probably better for that.
# SMD 05/15/13
#
# DOC-NOTE: also in sherpa.utils with a different interface
def save_arrays(self, filename, args, fields=None, ascii=True,
clobber=False):
"""Write a list of arrays to a file.
Parameters
----------
filename : str
The name of the file to write the array to.
args : array of arrays
The arrays to write out.
fields : array of str
The column names (should match the size of `args`).
ascii : bool, optional
If ``False`` then the data is written as a FITS format binary
table. The default is ``True``. The exact format of the
output file depends on the I/O library in use (Crates or
AstroPy).
clobber : bool, optional
This flag controls whether an existing file can be
overwritten (``True``) or if it raises an exception (``False``,
the default setting).
Raises
------
sherpa.utils.err.IOErr
If `filename` already exists and `clobber` is ``False``.
See Also
--------
save_data : Save the data to a file.
save_image : Save the pixel values of a 2D data set to a file.
save_table : Save a data set to a file as a table.
Examples
--------
Write the x and y columns from the default data set to the
file 'src.dat':
>>> x = get_indep()
>>> y = get_dep()
>>> save_arrays('src.dat', [x, y])
Use the column names "r" and "surbri" for the columns:
>>> save_arrays('prof.fits', [x,y], fields=["r", "surbri"],
... ascii=False, clobber=True)
"""
clobber = sherpa.utils.bool_cast(clobber)
ascii = sherpa.utils.bool_cast(ascii)
sherpa.astro.io.write_arrays(filename, args, fields=fields,
ascii=ascii, clobber=clobber)
# DOC-NOTE: also in sherpa.utils with a different API
def save_source(self, id, filename=None, bkg_id=None, ascii=False,
clobber=False):
"""Save the model values to a file.
The model is evaluated on the grid of the data set, but does
not include any instrument response (such as a PSF or ARF and
RMF).
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filename : str
The name of the file to write the array to. The format
is determined by the `ascii` argument.
bkg_id : int or str, optional
Set if the background model should be written out
rather than the source.
ascii : bool, optional
If ``False`` then the data is written as a FITS format binary
table. The default is ``False``. The exact format of the
output file depends on the I/O library in use (Crates or
AstroPy).
clobber : bool, optional
If `outfile` is not ``None``, then this flag controls
whether an existing file can be overwritten (``True``)
or if it raises an exception (``False``, the default
setting).
Raises
------
sherpa.utils.err.IdentifierErr
If no model has been set for this data set.
sherpa.utils.err.IOErr
If `filename` already exists and `clobber` is ``False``.
See Also
--------
save_data : Save the data to a file.
save_model : Save the model values to a file.
set_model : Set the source model expression for a data set.
set_full_model : Define the convolved model expression for a data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `filename` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `filename` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
The output file contains the columns ``X`` and ``SOURCE`` for 1D
data sets. The residuals array respects any filter or (for PHA
files), grouping settings.
Examples
--------
Write the model values for the default data set to the file
"model.fits":
>>> save_source('model.fits')
Write the model from the data set 'jet' to the ASCII file
"model.dat":
>>> save_source('jet', "model.dat", ascii=True)
For 2D (image) data sets, the model is written out as an
image:
>>> save_source('img', 'model.img')
"""
clobber = sherpa.utils.bool_cast(clobber)
ascii = sherpa.utils.bool_cast(ascii)
self._save_type('source', id, filename, ascii=ascii, clobber=clobber,
bkg_id=bkg_id)
# DOC-NOTE: also in sherpa.utils with a different API
def save_model(self, id, filename=None, bkg_id=None, ascii=False,
clobber=False):
"""Save the model values to a file.
The model is evaluated on the grid of the data set, including
any instrument response (such as a PSF or ARF and RMF).
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filename : str
The name of the file to write the array to. The format
is determined by the `ascii` argument.
bkg_id : int or str, optional
Set if the background model should be written out
rather than the source.
ascii : bool, optional
If ``False`` then the data is written as a FITS format binary
table. The default is ``False``. The exact format of the
output file depends on the I/O library in use (Crates or
AstroPy).
clobber : bool, optional
If `outfile` is not ``None``, then this flag controls
whether an existing file can be overwritten (``True``)
or if it raises an exception (``False``, the default
setting).
Raises
------
sherpa.utils.err.IdentifierErr
If no model has been set for this data set.
sherpa.utils.err.IOErr
If `filename` already exists and `clobber` is ``False``.
See Also
--------
save_data : Save the data to a file.
save_source : Save the model values to a file.
set_model : Set the source model expression for a data set.
set_full_model : Define the convolved model expression for a data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `filename` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `filename` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
The output file contains the columns ``X`` and ``MODEL`` for 1D
data sets. The residuals array respects any filter or (for PHA
files), grouping settings.
Examples
--------
Write the model values for the default data set to the file
"model.fits":
>>> save_model('model.fits')
Write the model from the data set 'jet' to the ASCII file
"model.dat":
>>> save_model('jet', "model.dat", ascii=True)
For 2D (image) data sets, the model is written out as an
image:
>>> save_model('img', 'model.img')
"""
clobber = sherpa.utils.bool_cast(clobber)
ascii = sherpa.utils.bool_cast(ascii)
self._save_type('model', id, filename, ascii=ascii, clobber=clobber,
bkg_id=bkg_id)
# DOC-NOTE: also in sherpa.utils with a different API
def save_resid(self, id, filename=None, bkg_id=None, ascii=False,
clobber=False):
"""Save the residuals (data-model) to a file.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filename : str
The name of the file to write the array to. The format
is determined by the `ascii` argument.
bkg_id : int or str, optional
Set if the background residuals should be written out
rather than the source.
ascii : bool, optional
If ``False`` then the data is written as a FITS format binary
table. The default is ``False``. The exact format of the
output file depends on the I/O library in use (Crates or
AstroPy).
clobber : bool, optional
If `outfile` is not ``None``, then this flag controls
whether an existing file can be overwritten (``True``)
or if it raises an exception (``False``, the default
setting).
Raises
------
sherpa.utils.err.IdentifierErr
If no model has been set for this data set.
sherpa.utils.err.IOErr
If `filename` already exists and `clobber` is ``False``.
See Also
--------
save_data : Save the data to a file.
save_delchi : Save the ratio of residuals (data-model) to error to a file.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `filename` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `filename` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
The output file contains the columns ``X`` and ``RESID``. The
residuals array respects any filter or (for PHA files),
grouping settings.
Examples
--------
Write the residuals to the file "resid.fits":
>>> save_resid('resid.fits')
Write the residuals from the data set 'jet' to the
ASCII file "resid.dat":
>>> save_resid('jet', "resid.dat", ascii=True)
"""
clobber = sherpa.utils.bool_cast(clobber)
ascii = sherpa.utils.bool_cast(ascii)
self._save_type('resid', id, filename, ascii=ascii, clobber=clobber,
bkg_id=bkg_id)
# DOC-NOTE: also in sherpa.utils with a different API
def save_delchi(self, id, filename=None, bkg_id=None, ascii=True,
clobber=False):
"""Save the ratio of residuals (data-model) to error to a file.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filename : str
The name of the file to write the array to. The format
is determined by the `ascii` argument.
bkg_id : int or str, optional
Set if the background residuals should be written out
rather than the source.
ascii : bool, optional
If ``False`` then the data is written as a FITS format binary
table. The default is ``True``. The exact format of the
output file depends on the I/O library in use (Crates or
AstroPy).
clobber : bool, optional
If `outfile` is not ``None``, then this flag controls
whether an existing file can be overwritten (``True``)
or if it raises an exception (``False``, the default
setting).
Raises
------
sherpa.utils.err.IdentifierErr
If no model has been set for this data set.
sherpa.utils.err.IOErr
If `filename` already exists and `clobber` is ``False``.
See Also
--------
save_data : Save the data to a file.
save_resid : Save the residuals (data-model) to a file.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `filename` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `filename` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
The output file contains the columns ``X`` and ``DELCHI``. The
residuals array respects any filter or (for PHA files),
grouping settings.
Examples
--------
Write the residuals to the file "delchi.dat":
>>> save_delchi('delchi.dat')
Write the residuals from the data set 'jet' to the
FITS file "delchi.fits":
>>> save_delchi('jet', "delchi.fits", ascii=False)
"""
clobber = sherpa.utils.bool_cast(clobber)
ascii = sherpa.utils.bool_cast(ascii)
self._save_type('delchi', id, filename, ascii=ascii, clobber=clobber,
bkg_id=bkg_id)
# DOC-NOTE: also in sherpa.utils with a different interface
def save_filter(self, id, filename=None, bkg_id=None, ascii=True,
clobber=False):
"""Save the filter array to a file.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filename : str
The name of the file to write the array to. The format
is determined by the `ascii` argument.
bkg_id : int or str, optional
Set if the background should be written out rather
than the source.
ascii : bool, optional
If ``False`` then the data is written as a FITS format binary
table. The default is ``True``. The exact format of the
output file depends on the I/O library in use (Crates or
AstroPy).
clobber : bool, optional
If `outfile` is not ``None``, then this flag controls
whether an existing file can be overwritten (``True``)
or if it raises an exception (``False``, the default
setting).
Raises
------
sherpa.utils.err.DataErr
If the data set has not been filtered.
sherpa.utils.err.IOErr
If `filename` already exists and `clobber` is ``False``.
See Also
--------
load_filter : Load the filter array from a file and add to a data set.
save_data : Save the data to a file.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `filename` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `filename` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
The output file contains the columns ``X`` and ``FILTER``.
Examples
--------
Write the filter from the default data set as an ASCII file:
>>> save_filter('filt.dat')
Write the filter for data set 'src' to a FITS format file:
>>> save_filter('src', 'filter.fits', ascii=False)
"""
clobber = sherpa.utils.bool_cast(clobber)
ascii = sherpa.utils.bool_cast(ascii)
if filename is None:
id, filename = filename, id
_check_type(filename, string_types, 'filename', 'a string')
id = self._fix_id(id)
if bkg_id is not None:
d = self.get_bkg(id, bkg_id)
else:
d = self.get_data(id)
# Leave this check as d.mask is False since d.mask need not be a boolean
# and we want different errors if mask is True or False (and leave as
# the iterable check to catch 'd.mask' is True or any other value that
# could cause the following code to fall over).
#
if d.mask is False:
raise DataErr('notmask')
if not numpy.iterable(d.mask):
raise DataErr('nomask', id)
if isinstance(d, sherpa.astro.data.DataPHA):
x = d._get_ebins(group=True)[0]
else:
x = d.get_indep(filter=False)[0]
mask = numpy.asarray(d.mask, int)
self.save_arrays(filename, [x, mask], fields=['X', 'FILTER'],
ascii=ascii, clobber=clobber)
# DOC-NOTE: also in sherpa.utils with a different interface
def save_staterror(self, id, filename=None, bkg_id=None, ascii=True,
clobber=False):
"""Save the statistical errors to a file.
If the statistical errors have not been set explicitly, then
the values calculated by the statistic - such as ``chi2gehrels``
or ``chi2datavar`` - will be used.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filename : str
The name of the file to write the array to. The format
is determined by the `ascii` argument.
bkg_id : int or str, optional
Set if the background should be written out rather
than the source.
ascii : bool, optional
If ``False`` then the data is written as a FITS
format binary table. The default is ``True``. The
exact format of the output file depends on the
I/O library in use (Crates or AstroPy).
clobber : bool, optional
If `outfile` is not ``None``, then this flag controls
whether an existing file can be overwritten (``True``)
or if it raises an exception (``False``, the default
setting).
Raises
------
sherpa.utils.err.IOErr
If `filename` already exists and `clobber` is ``False``.
See Also
--------
load_staterror : Load the statistical errors from a file.
save_error : Save the errors to a file.
save_syserror : Save the systematic errors to a file.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `filename` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `filename` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
The output file contains the columns ``X`` and ``STAT_ERR``.
Examples
--------
Write out the statistical errors from the default data set to the
file 'errs.dat'.
>>> save_staterror('errs.dat')
Over-write the file it it already exists, and take the data
from the data set "jet":
>>> save_staterror('jet', 'err.out', clobber=True)
Write the data out in FITS format:
>>> save_staterror('staterr.fits', ascii=False)
"""
clobber = sherpa.utils.bool_cast(clobber)
ascii = sherpa.utils.bool_cast(ascii)
if filename is None:
id, filename = filename, id
_check_type(filename, string_types, 'filename', 'a string')
d = self.get_data(id)
if bkg_id is not None:
d = self.get_bkg(id, bkg_id)
id = self._fix_id(id)
if isinstance(d, sherpa.astro.data.DataPHA):
x = d._get_ebins(group=True)[0]
else:
x = d.get_indep(filter=False)[0]
err = self.get_staterror(id, filter=False, bkg_id=bkg_id)
self.save_arrays(filename, [x, err], fields=['X', 'STAT_ERR'],
ascii=ascii, clobber=clobber)
# DOC-NOTE: also in sherpa.utils with a different interface
def save_syserror(self, id, filename=None, bkg_id=None, ascii=True,
clobber=False):
"""Save the systematic errors to a file.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filename : str
The name of the file to write the array to. The format
is determined by the `ascii` argument.
bkg_id : int or str, optional
Set if the background should be written out rather
than the source.
ascii : bool, optional
If ``False`` then the data is written as a FITS
format binary table. The default is ``True``. The
exact format of the output file depends on the
I/O library in use (Crates or AstroPy).
clobber : bool, optional
If `outfile` is not ``None``, then this flag controls
whether an existing file can be overwritten (``True``)
or if it raises an exception (``False``, the default
setting).
Raises
------
sherpa.utils.err.IOErr
If the data set does not contain any systematic errors.
sherpa.utils.err.DataErr
If `filename` already exists and `clobber` is ``False``.
See Also
--------
load_syserror : Load the systematic errors from a file.
save_error : Save the errors to a file.
save_staterror : Save the statistical errors to a file.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `filename` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `filename` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
The output file contains the columns ``X`` and ``SYS_ERR``.
Examples
--------
Write out the systematic errors from the default data set to the
file 'errs.dat'.
>>> save_syserror('errs.dat')
Over-write the file it it already exists, and take the data
from the data set "jet":
>>> save_syserror('jet', 'err.out', clobber=True)
Write the data out in FITS format:
>>> save_syserror('syserr.fits', ascii=False)
"""
clobber = sherpa.utils.bool_cast(clobber)
ascii = sherpa.utils.bool_cast(ascii)
if filename is None:
id, filename = filename, id
_check_type(filename, string_types, 'filename', 'a string')
d = self.get_data(id)
if bkg_id is not None:
d = self.get_bkg(id, bkg_id)
id = self._fix_id(id)
if isinstance(d, sherpa.astro.data.DataPHA):
x = d._get_ebins(group=True)[0]
else:
x = d.get_indep(filter=False)[0]
err = self.get_syserror(id, filter=False, bkg_id=bkg_id)
self.save_arrays(filename, [x, err], fields=['X', 'SYS_ERR'],
ascii=ascii, clobber=clobber)
# DOC-NOTE: also in sherpa.utils with a different interface
def save_error(self, id, filename=None, bkg_id=None, ascii=True,
clobber=False):
"""Save the errors to a file.
The total errors for a data set are the quadrature combination
of the statistical and systematic errors. The systematic
errors can be 0. If the statistical errors have not been set
explicitly, then the values calculated by the statistic - such
as ``chi2gehrels`` or ``chi2datavar`` - will be used.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filename : str
The name of the file to write the array to. The format
is determined by the `ascii` argument.
bkg_id : int or str, optional
Set if the background should be written out rather
than the source.
ascii : bool, optional
If ``False`` then the data is written as a FITS
format binary table. The default is ``True``. The
exact format of the output file depends on the
I/O library in use (Crates or AstroPy).
clobber : bool, optional
If `outfile` is not ``None``, then this flag controls
whether an existing file can be overwritten (``True``)
or if it raises an exception (``False``, the default
setting).
Raises
------
sherpa.utils.err.IOErr
If `filename` already exists and `clobber` is ``False``.
See Also
--------
get_error : Return the errors on the dependent axis of a data set.
load_staterror : Load the statistical errors from a file.
load_syserror : Load the systematic errors from a file.
save_data : Save the data to a file.
save_staterror : Save the statistical errors to a file.
save_syserror : Save the systematic errors to a file.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `filename` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `filename` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
The output file contains the columns ``X`` and ``ERR``.
Examples
--------
Write out the errors from the default data set to the file
'errs.dat'.
>>> save_error('errs.dat')
Over-write the file it it already exists, and take the data
from the data set "jet":
>>> save_error('jet', 'err.out', clobber=True)
Write the data out in FITS format:
>>> save_error('err.fits', ascii=False)
"""
clobber = sherpa.utils.bool_cast(clobber)
ascii = sherpa.utils.bool_cast(ascii)
if filename is None:
id, filename = filename, id
_check_type(filename, string_types, 'filename', 'a string')
d = self.get_data(id)
if bkg_id is not None:
d = self.get_bkg(id, bkg_id)
id = self._fix_id(id)
if isinstance(d, sherpa.astro.data.DataPHA):
x = d._get_ebins(group=True)[0]
else:
x = d.get_indep(filter=False)[0]
err = self.get_error(id, filter=False, bkg_id=bkg_id)
self.save_arrays(filename, [x, err], fields=['X', 'ERR'],
ascii=ascii, clobber=clobber)
def save_pha(self, id, filename=None, bkg_id=None, ascii=False,
clobber=False):
"""Save a PHA data set to a file.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filename : str
The name of the file to write the array to. The format
is determined by the `ascii` argument.
bkg_id : int or str, optional
Set if the background should be written out rather
than the source.
ascii : bool, optional
If ``False`` then the data is written as a FITS
format binary table. The default is ``True``. The
exact format of the output file depends on the
I/O library in use (Crates or AstroPy).
clobber : bool, optional
If `outfile` is not ``None``, then this flag controls
whether an existing file can be overwritten (``True``)
or if it raises an exception (``False``, the default
setting).
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
sherpa.utils.err.IOErr
If `filename` already exists and `clobber` is ``False``.
See Also
--------
load_pha : Load a PHA data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `filename` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `filename` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
Examples
--------
Write out the PHA data from the default data set to the
file 'src.pi':
>>> save_pha('src.pi')
Over-write the file it it already exists, and take the data
from the data set "jet":
>>> save_pha('jet', 'out.pi', clobber=True)
Write the data out as an ASCII file:
>>> save_pha('pi.dat', ascii=True)
"""
clobber = sherpa.utils.bool_cast(clobber)
ascii = sherpa.utils.bool_cast(ascii)
if filename is None:
id, filename = filename, id
_check_type(filename, string_types, 'filename', 'a string')
d = self._get_pha_data(id)
if bkg_id is not None:
d = self.get_bkg(id, bkg_id)
sherpa.astro.io.write_pha(filename, d, ascii=ascii, clobber=clobber)
def save_grouping(self, id, filename=None, bkg_id=None, ascii=True, clobber=False):
"""Save the grouping scheme to a file.
The output is a two-column file, containing the channel and
grouping columns from the data set.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filename : str
The name of the file to write the array to. The format
is determined by the `ascii` argument.
bkg_id : int or str, optional
Set if the grouping array should be taken from the
background associated with the data set.
ascii : bool, optional
If ``False`` then the data is written as a FITS
format binary table. The default is ``True``. The
exact format of the output file depends on the
I/O library in use (Crates or AstroPy).
clobber : bool, optional
If `outfile` is not ``None``, then this flag controls
whether an existing file can be overwritten (``True``)
or if it raises an exception (``False``, the default
setting).
Raises
------
sherpa.utils.err.IOErr
If `filename` already exists and `clobber` is ``False``.
See Also
--------
get_grouping : Return the gouping array for a PHA data set.
load_quality : Load the quality array from a file and add to a PHA data set.
set_grouping : Apply a set of grouping flags to a PHA data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `filename` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `filename` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
The column names are 'CHANNEL' and 'GROUPS'.
Examples
--------
Save the channel and grouping columns from the default data
set to the file 'group.dat' as an ASCII file:
>>> save_grouping('group.dat')
Over-write the 'grp.fits' file, if it exists, and write
out the grouping data from the 'jet' data set, as a FITS
format file:
>>> save_grouping('jet', 'grp.fits', ascii=False, clobber=True)
"""
clobber = sherpa.utils.bool_cast(clobber)
ascii = sherpa.utils.bool_cast(ascii)
if filename is None:
id, filename = filename, id
_check_type(filename, string_types, 'filename', 'a string')
id = self._fix_id(id)
if bkg_id is not None:
d = self.get_bkg(id, bkg_id)
else:
d = self._get_pha_data(id)
if d.grouping is None or not numpy.iterable(d.grouping):
raise DataErr('nogrouping', id)
sherpa.astro.io.write_arrays(filename, [d.channel, d.grouping],
fields=['CHANNEL', 'GROUPS'], ascii=ascii,
clobber=clobber)
def save_quality(self, id, filename=None, bkg_id=None, ascii=True, clobber=False):
"""Save the quality array to a file.
The output is a two-column file, containing the channel and
quality columns from the data set.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filename : str
The name of the file to write the array to. The format
is determined by the `ascii` argument.
bkg_id : int or str, optional
Set if the quality array should be taken from the
background associated with the data set.
ascii : bool, optional
If ``False`` then the data is written as a FITS
format binary table. The default is ``True``. The
exact format of the output file depends on the
I/O library in use (Crates or AstroPy).
clobber : bool, optional
If `outfile` is not ``None``, then this flag controls
whether an existing file can be overwritten (``True``)
or if it raises an exception (``False``, the default
setting).
Raises
------
sherpa.utils.err.IOErr
If `filename` already exists and `clobber` is ``False``.
See Also
--------
get_quality : Return the quality array for a PHA data set.
load_quality : Load the quality array from a file and add to a PHA data set.
set_quality : Apply a set of quality flags to a PHA data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `filename` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `filename` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
The column names are 'CHANNEL' and 'QUALITY'.
Examples
--------
Save the channel and quality columns from the default data
set to the file 'quality.dat' as an ASCII file:
>>> save_quality('quality.dat')
Over-write the 'qual.fits' file, if it exists, and write
out the quality array from the 'jet' data set, as a FITS
format file:
>>> save_quality('jet', 'qual.fits', ascii=False, clobber=True)
"""
clobber = sherpa.utils.bool_cast(clobber)
ascii = sherpa.utils.bool_cast(ascii)
if filename is None:
id, filename = filename, id
_check_type(filename, string_types, 'filename', 'a string')
id = self._fix_id(id)
if bkg_id is not None:
d = self.get_bkg(id, bkg_id)
else:
d = self._get_pha_data(id)
if d.quality is None or not numpy.iterable(d.quality):
raise DataErr('noquality', id)
sherpa.astro.io.write_arrays(filename, [d.channel, d.quality],
fields=['CHANNEL', 'QUALITY'], ascii=ascii,
clobber=clobber)
# DOC-TODO: setting ascii=True is not supported for crates
# and in pyfits it seems to just be a 1D array (needs thinking about)
def save_image(self, id, filename=None, ascii=False, clobber=False):
"""Save the pixel values of a 2D data set to a file.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filename : str
The name of the file to write the data to. The format
is determined by the `ascii` argument.
ascii : bool, optional
If ``False`` then the data is written as a FITS format binary
table. The default is ``False``. The exact format of the
output file depends on the I/O library in use (Crates or
AstroPy).
clobber : bool, optional
If `outfile` is not ``None``, then this flag controls
whether an existing file can be overwritten (``True``)
or if it raises an exception (``False``, the default
setting).
Raises
------
sherpa.utils.err.IOErr
If `filename` already exists and `clobber` is ``False``.
If the data set does not contain 2D data.
See Also
--------
save_data : Save the data to a file.
save_model : Save the model values to a file.
save_source : Save the model values to a file.
save_table : Save a data set to a file as a table.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `filename` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `filename` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
Examples
--------
Write the pixel values to the file "img.fits":
>>> save_image('resid.fits')
Write the data from the data set 'jet' to the file "jet.img":
>>> save_image('jet', 'jet.img', clobber=True)
"""
clobber = sherpa.utils.bool_cast(clobber)
ascii = sherpa.utils.bool_cast(ascii)
if filename is None:
id, filename = filename, id
_check_type(filename, string_types, 'filename', 'a string')
sherpa.astro.io.write_image(filename, self.get_data(id),
ascii=ascii, clobber=clobber)
# DOC-TODO: the output for an image is "excessive"
def save_table(self, id, filename=None, ascii=False, clobber=False):
"""Save a data set to a file as a table.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filename : str
The name of the file to write the data to. The format
is determined by the `ascii` argument.
ascii : bool, optional
If ``False`` then the data is written as a FITS format binary
table. The default is ``False``. The exact format of the
output file depends on the I/O library in use (Crates or
AstroPy).
clobber : bool, optional
If `outfile` is not ``None``, then this flag controls
whether an existing file can be overwritten (``True``)
or if it raises an exception (``False``, the default
setting).
Raises
------
sherpa.utils.err.IOErr
If `filename` already exists and `clobber` is ``False``.
See Also
--------
save_data : Save the data to a file.
save_image : Save the pixel values of a 2D data set to a file.
save_pha : Save a PHA data set to a file.
save_model : Save the model values to a file.
save_source : Save the model values to a file.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `filename` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `filename` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
Examples
--------
Write the data set to the file "table.fits":
>>> save_table('table.fits')
Write the data from the data set 'jet' to the file "jet.dat",
as an ASCII file:
>>> save_table('jet', 'jet.dat', ascii=True, clobber=True)
"""
clobber = sherpa.utils.bool_cast(clobber)
ascii = sherpa.utils.bool_cast(ascii)
if filename is None:
id, filename = filename, id
id = self._fix_id(id)
_check_type(filename, string_types, 'filename', 'a string')
sherpa.astro.io.write_table(filename, self.get_data(id),
ascii=ascii, clobber=clobber)
# DOC-NOTE: also in sherpa.utils
def save_data(self, id, filename=None, bkg_id=None, ascii=True, clobber=False):
"""Save the data to a file.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
filename : str
The name of the file to write the array to. The data is
written out as an ASCII file.
bkg_id : int or str, optional
Set if the background should be written out rather
than the source (for a PHA data set).
ascii : bool, optional
If ``False`` then the data is written as a FITS format binary
table. The default is ``True``. The exact format of the
output file depends on the I/O library in use (Crates or
AstroPy).
clobber : bool, optional
This flag controls whether an existing file can be
overwritten (``True``) or if it raises an exception (``False``,
the default setting).
Raises
------
sherpa.utils.err.IdentifierErr
If there is no matching data set.
sherpa.utils.err.IOErr
If `filename` already exists and `clobber` is ``False``.
See Also
--------
save_arrays : Write a list of arrays to a file.
save_delchi : Save the ratio of residuals (data-model) to error to a file.
save_error : Save the errors to a file.
save_filter : Save the filter array to a file.
save_grouping : Save the grouping scheme to a file.
save_image : Save the pixel values of a 2D data set to a file.
save_pha : Save a PHA data set to a file.
save_quality : Save the quality array to a file.
save_resid : Save the residuals (data-model) to a file.
save_staterror : Save the statistical errors to a file.
save_syserror : Save the statistical errors to a file.
save_table : Save a data set to a file as a table.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `filename` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `filename` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
Examples
--------
Write the default data set out to the ASCII file 'src.dat':
>>> save_data('src.dat')
Write the 'rprof' data out to the FITS file 'prof.fits',
over-writing it if it already exists:
>>> save_data('rprof', 'prof.fits', clobber=True, ascii=True)
"""
clobber = sherpa.utils.bool_cast(clobber)
ascii = sherpa.utils.bool_cast(ascii)
if filename is None:
id, filename = filename, id
_check_type(filename, string_types, 'filename', 'a string')
if bkg_id is not None:
d = self.get_bkg(id, bkg_id)
else:
d = self.get_data(id)
try:
sherpa.astro.io.write_pha(filename, d, ascii=ascii,
clobber=clobber)
except IOErr:
try:
sherpa.astro.io.write_image(filename, d, ascii=ascii,
clobber=clobber)
except IOErr:
try:
sherpa.astro.io.write_table(filename, d, ascii=ascii,
clobber=clobber)
except IOErr:
# If this errors out then so be it
sherpa.io.write_data(filename, d, clobber=clobber)
def pack_pha(self, id=None):
"""Convert a PHA data set into a file structure.
Parameters
----------
id : int or str, optional
The data set to use. If not given then the default
identifier is used, as returned by `get_default_id`.
Returns
-------
pha
The return value depends on the I/O library in use.
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
See Also
--------
load_pha : Load a file as a PHA data set.
set_data : Set a data set.
unpack_pha : Create a PHA data structure.
"""
return sherpa.astro.io.pack_pha(self._get_pha_data(id))
def pack_image(self, id=None):
"""Convert a data set into an image structure.
Parameters
----------
id : int or str, optional
The data set to use. If not given then the default
identifier is used, as returned by `get_default_id`.
Returns
-------
img
The return value depends on the I/O library in use.
See Also
--------
load_image : Load an image as a data set.
set_data : Set a data set.
unpack_image : Create an image data structure.
"""
return sherpa.astro.io.pack_image(self.get_data(id))
def pack_table(self, id=None):
"""Convert a data set into a table structure.
Parameters
----------
id : int or str, optional
The data set to use. If not given then the default
identifier is used, as returned by `get_default_id`.
Returns
-------
tbl
The return value depends on the I/O library in use and the
type of data (such as `Data1D`, `Data2D`).
See Also
--------
load_table : Load a FITS binary file as a data set.
set_data : Set a data set.
unpack_table : Unpack a FITS binary file into a data structure.
"""
return sherpa.astro.io.pack_table(self.get_data(id))
@staticmethod
def create_arf(elo, ehi, specresp=None, exposure=None, ethresh=None,
name='test-arf'):
"""Create an ARF.
.. versionadded:: 4.10.1
Parameters
----------
elo, ehi : numpy.ndarray
The energy bins (low and high, in keV) for the ARF. It is
assumed that ehi_i > elo_i, elo_j > 0, the energy bins are
either ascending - so elo_i+1 > elo_i - or descending
(elo_i+1 < elo_i), and that there are no overlaps.
specresp : None or array, optional
The spectral response (in cm^2) for the ARF. It is assumed
to be >= 0. If not given a flat response of 1.0 is used.
exposure : number or None, optional
If not None, the exposure of the ARF in seconds.
ethresh : number or None, optional
Passed through to the DataARF call. It controls whether
zero-energy bins are replaced.
name : str, optional
The name of the ARF data set
Returns
-------
arf : DataARF instance
See Also
--------
create_rmf, get_arf, set_arf, unpack_arf
Examples
--------
Create a flat ARF, with a value of 1.0 cm^2 for each bin,
over the energy range 0.1 to 10 keV, with a bin spacing of
0.01 keV.
>>> egrid = np.arange(0.1, 10, 0.01)
>>> arf = create_arf(egrid[:-1], egrid[1:])
Create an ARF that has 10 percent more area than the ARF
from the default data set::
>>> arf1 = get_arf()
>>> elo = arf1.energ_lo
>>> ehi = arf1.energ_hi
>>> y = 1.1 * arf1.specresp
>>> arf2 = create_arf(elo, ehi, y, exposure=arf1.exposure)
"""
return create_arf(elo, ehi, specresp, exposure, ethresh, name)
@staticmethod
def create_rmf(rmflo, rmfhi, startchan=1, e_min=None, e_max=None,
ethresh=None, fname=None, name='delta-rmf'):
"""Create an RMF.
If fname is set to `None` then this creats a "perfect" RMF,
which has a delta-function response (so each channel uniquely
maps to a single energy bin), otherwise the RMF is taken from
the image data stored in the file pointed to by `fname`.
.. versionadded:: 4.10.1
Parameters
----------
rmflo, rmfhi : array
The energy bins (low and high, in keV) for the RMF.
It is assumed that emfhi_i > rmflo_i, rmflo_j > 0, that the energy
bins are either ascending, so rmflo_i+1 > rmflo_i or descending
(rmflo_i+1 < rmflo_i), and that there are no overlaps.
These correspond to the Elow and Ehigh columns (represented
by the ENERG_LO and ENERG_HI columns of the MATRIX block) of
the OGIP standard.
startchan : int, optional
The starting channel number: expected to be 0 or 1 but this is
not enforced.
e_min, e_max : None or array, optional
The E_MIN and E_MAX columns of the EBOUNDS block of the
RMF.
ethresh : number or None, optional
Passed through to the DataRMF call. It controls whether
zero-energy bins are replaced.
fname : None or str, optional
If None then a "perfect" RMF is generated, otherwise it gives
the name of the two-dimensional image file which stores the
response information (the format of this file matches that
created by the CIAO tool rmfimg [1]_).
name : str, optional
The name of the RMF data set
Returns
-------
rmf : DataRMF instance
See Also
--------
create_arf, get_rmf, set_rmf, unpack_rmf
References
----------
.. [1] http://cxc.harvard.edu/ciao/ahelp/rmfimg.html
"""
if fname is None:
return create_delta_rmf(rmflo, rmfhi, offset=startchan,
e_min=e_min, e_max=e_max, ethresh=ethresh,
name=name)
return create_non_delta_rmf(rmflo, rmfhi, fname,
offset=startchan, e_min=e_min,
e_max=e_max, ethresh=ethresh,
name=name)
def get_arf(self, id=None, resp_id=None, bkg_id=None):
"""Return the ARF associated with a PHA data set.
Parameters
----------
id : int or str, optional
The data set to use. If not given then the default
identifier is used, as returned by `get_default_id`.
resp_id : int or str, optional
The identifier for the ARF within this data set, if there
are multiple responses.
bkg_id : int or str, optional
Set this to return the given background component.
Returns
-------
arf : a `sherpa.astro.instrument.ARF1D` instance
This is a reference to the ARF, rather than a copy, so that
changing the fields of the object will change the values in
the data set.
See Also
--------
fake_pha : Simulate a PHA data set from a model.
get_response: Return the respone information applied to a PHA data set.
load_arf : Load an ARF from a file and add it to a PHA data set.
load_pha : Load a file as a PHA data set.
set_full_model : Define the convolved model expression for a data set.
set_arf : Set the ARF for use by a PHA data set.
set_rmf : Set the RMF for use by a PHA data set.
unpack_arf : Read in an ARF from a file.
Examples
--------
Return the exposure field of the ARF from the default data
set:
>>> get_arf().exposure
Copy the ARF from the default data set to data set 2:
>>> arf1 = get_arf()
>>> set_arf(2, arf1)
Retrieve the ARF associated to the second background
component of the 'core' data set:
>>> bgarf = get_arf('core', 'bkg.arf', bkg_id=2)
Retrieve the ARF and RMF for the default data set and
use them to create a model expression which includes
a power-law component (pbgnd) that is not convolved by the
response:
>>> arf = get_arf()
>>> rmf = get_rmf()
>>> src_expr = xsphabs.abs1 * powlaw1d.psrc
>>> set_full_model(rmf(arf(src_expr)) + powlaw1d.pbgnd)
>>> print(get_model())
"""
data = self._get_pha_data(id)
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
arf, rmf = data.get_response(resp_id)
if arf is None:
raise IdentifierErr('getitem', 'ARF data set',
data._fix_response_id(resp_id),
'in PHA data set %s has not been set' %
str(self._fix_id(id)))
if isinstance(arf, sherpa.astro.data.DataARF):
arf = sherpa.astro.instrument.ARF1D(arf, data, rmf)
return arf
# DOC-TODO: add an example of a grating/multiple response
def set_arf(self, id, arf=None, resp_id=None, bkg_id=None):
"""Set the ARF for use by a PHA data set.
Set the effective area curve for a PHA data set, or its
background.
Parameters
----------
id : int or str, optional
The data set to use. If not given then the default
identifier is used, as returned by `get_default_id`.
arf
An ARF, such as returned by `get_arf` or `unpack_arf`.
resp_id : int or str, optional
The identifier for the ARF within this data set, if there
are multiple responses.
bkg_id : int or str, optional
Set this to identify the ARF as being for use with the
background.
See Also
--------
get_arf : Return the ARF associated with a PHA data set.
load_arf : Load an ARF from a file and add it to a PHA data set.
load_pha : Load a file as a PHA data set.
set_full_model : Define the convolved model expression for a data set.
set_rmf : Set the RMF for use by a PHA data set.
unpack_arf : Read in an ARF from a file.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `arf` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `arf` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
If a PHA data set has an associated ARF - either from when the
data was loaded or explicitly with the `set_arf` function -
then the model fit to the data will include the effect of the
ARF when the model is created with `set_model` or
`set_source`. In this case the `get_source` function returns
the user model, and `get_model` the model that is fit to the
data (i.e. it includes any response information; that is the
ARF and RMF, if set). To include the ARF explicitly, use
`set_full_model`.
Examples
--------
Copy the ARF from the default data set to data set 2:
>>> arf1 = get_arf()
>>> set_arf(2, arf1)
Read in an ARF from the file 'bkg.arf' and set it as the
ARF for the background model of data set "core":
>>> arf = unpack_arf('bkg.arf')
>>> set_arf('core', arf, bkg_id=1)
"""
if arf is None:
id, arf = arf, id
# store only the ARF dataset in the PHA response dict
if type(arf) in (sherpa.astro.instrument.ARF1D,):
arf = arf._arf
_check_type(arf, sherpa.astro.data.DataARF, 'arf', 'an ARF data set')
data = self._get_pha_data(id)
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
data.set_arf(arf, resp_id)
# Set units of source dataset from channel to energy
if data.units == 'channel':
data._set_initial_quantity()
def unpack_arf(self, arg):
"""Create an ARF data structure.
Parameters
----------
arg
Identify the ARF: a file name, or a data structure
representing the data to use, as used by the I/O backend in
use by Sherpa: a ``TABLECrate`` for crates, as used by CIAO,
or a list of AstroPy HDU objects.
Returns
-------
arf : a `sherpa.astro.instrument.ARF1D` instance
See Also
--------
get_arf : Return the ARF associated with a PHA data set.
load_arf : Load an ARF from a file and add it to a PHA data set.
load_bkg_arf : Load an ARF from a file and add it to the background of a PHA data set.
load_multi_arfs : Load multiple ARFs for a PHA data set.
load_pha : Load a file as a PHA data set.
load_rmf : Load a RMF from a file and add it to a PHA data set.
set_full_model : Define the convolved model expression for a data set.
Notes
-----
The `minimum_energy` setting of the `ogip` section of the
Sherpa configuration file determines the behavior when an
ARF with a minimum energy of 0 is read in. The default is
to replace the 0 by the value 1e-10, which will also cause
a warning message to be displayed.
Examples
--------
>>> arf1 = unpack_arf("arf1.fits")
>>> arf2 = unpack_arf("arf2.fits")
Read in an ARF using Crates:
>>> acr = pycrates.read_file("src.arf")
>>> arf = unpack_arf(acr)
Read in an ARF using AstroPy:
>>> hdus = astropy.io.fits.open("src.arf")
>>> arf = unpack_arf(hdus)
"""
return sherpa.astro.instrument.ARF1D(sherpa.astro.io.read_arf(arg))
# DOC-TODO: add an example of a grating/multiple response
# DOC-TODO: how to describe I/O backend support?
def load_arf(self, id, arg=None, resp_id=None, bkg_id=None):
"""Load an ARF from a file and add it to a PHA data set.
Load in the effective area curve for a PHA data set, or its
background. The `load_bkg_arf` function can be used for
setting most background ARFs.
Parameters
----------
id : int or str, optional
The data set to use. If not given then the default
identifier is used, as returned by `get_default_id`.
arg
Identify the ARF: a file name, or a data structure
representing the data to use, as used by the I/O backend in
use by Sherpa: a ``TABLECrate`` for crates, as used by CIAO,
or a list of AstroPy HDU objects.
resp_id : int or str, optional
The identifier for the ARF within this data set, if there
are multiple responses.
bkg_id : int or str, optional
Set this to identify the ARF as being for use with the
background.
See Also
--------
get_arf : Return the ARF associated with a PHA data set.
load_bkg_arf : Load an ARF from a file and add it to the background of a PHA data set.
load_multi_arfs : Load multiple ARFs for a PHA data set.
load_pha : Load a file as a PHA data set.
load_rmf : Load a RMF from a file and add it to a PHA data set.
set_full_model : Define the convolved model expression for a data set.
set_arf : Set the ARF for use by a PHA data set.
unpack_arf : Create an ARF data structure.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `arg` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `arg` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
If a PHA data set has an associated ARF - either from when the
data was loaded or explicitly with the `set_arf` function -
then the model fit to the data will include the effect of the
ARF when the model is created with `set_model` or
`set_source`. In this case the `get_source` function returns
the user model, and `get_model` the model that is fit to the
data (i.e. it includes any response information; that is the
ARF and RMF, if set). To include the ARF explicitly, use
`set_full_model`.
The `minimum_energy` setting of the `ogip` section of the
Sherpa configuration file determines the behavior when an
ARF with a minimum energy of 0 is read in. The default is
to replace the 0 by the value 1e-10, which will also cause
a warning message to be displayed.
Examples
--------
Use the contents of the file 'src.arf' as the ARF for the
default data set.
>>> load_arf('src.arf')
Read in an ARF from the file 'bkg.arf' and set it as the
ARF for the background model of data set "core":
>>> load_arf('core', 'bkg.arf', bkg_id=1)
"""
if arg is None:
id, arg = arg, id
self.set_arf(id, self.unpack_arf(arg), resp_id, bkg_id)
def get_bkg_arf(self, id=None):
"""Return the background ARF associated with a PHA data set.
This is for the case when there is only one background
component and one background response. If this does not hold,
use `get_arf` and use the ``bkg_id`` and ``resp_id`` arguments.
Parameters
----------
id : int or str, optional
The data set to use. If not given then the default
identifier is used, as returned by `get_default_id`.
Returns
-------
arf : a `sherpa.astro.instrument.ARF1D` instance
This is a reference to the ARF, rather than a copy, so that
changing the fields of the object will change the values in
the data set.
See Also
--------
fake_pha : Simulate a PHA data set from a model.
load_bkg_arf : Load an ARF from a file and add it to the background of a PHA data set.
load_pha : Load a file as a PHA data set.
set_full_model : Define the convolved model expression for a data set.
set_arf : Set the ARF for use by a PHA data set.
set_rmf : Set the RMF for use by a PHA data set.
unpack_arf : Read in an ARF from a file.
Examples
--------
Return the exposure field of the ARF from the background of
the default data set:
>>> get_bkg_arf().exposure
Copy the ARF from the default data set to data set 2,
as the first component:
>>> arf1 = get_bkg_arf()
>>> set_arf(2, arf1, bkg_id=1)
"""
bkg_id = self._get_pha_data(id).default_background_id
resp_id = self._get_pha_data(id).primary_response_id
return self.get_arf(id, resp_id, bkg_id)
# DOC-TODO: how to describe I/O backend support?
def load_bkg_arf(self, id, arg=None):
"""Load an ARF from a file and add it to the background of a
PHA data set.
Load in the ARF to the background of the given data set. It
is only for use when there is only one background component,
and one response, for the source. For multiple backgrounds
or responses, use `load_arf`.
Parameters
----------
id : int or str, optional
The data set to use. If not given then the default
identifier is used, as returned by `get_default_id`.
arg
Identify the ARF: a file name, or a data structure
representing the data to use, as used by the I/O backend in
use by Sherpa: a ``TABLECrate`` for crates, as used by CIAO,
or a list of AstroPy HDU objects.
See Also
--------
load_arf : Load an ARF from a file and add it to a PHA data set.
load_bkg_rmf : Load a RMF from a file and add it to the background of a PHA data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `arg` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `arg` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
The `minimum_energy` setting of the `ogip` section of the
Sherpa configuration file determines the behavior when an
ARF with a minimum energy of 0 is read in. The default is
to replace the 0 by the value 1e-10, which will also cause
a warning message to be displayed.
Examples
--------
Use the contents of the file 'bkg.arf' as the ARF for the
background of the default data set.
>>> load_bkg_arf('bkg.arf')
Set 'core_bkg.arf' as the ARF for the background of data set
'core':
>>> load_bkg_arf('core', 'core_bkg.arf')
"""
if arg is None:
id, arg = arg, id
bkg_id = self._get_pha_data(id).default_background_id
resp_id = self._get_pha_data(id).primary_response_id
self.set_arf(id, self.unpack_arf(arg), resp_id, bkg_id)
def load_multi_arfs(self, id, filenames, resp_ids=None):
"""Load multiple ARFs for a PHA data set.
A grating observation - such as a Chandra LETGS data set - may
require multiple responses if the detector has insufficient energy
resolution to sort the photons into orders. In this case, the
extracted spectrum will contain the signal from more than one
diffraction orders.
This function lets the multiple ARFs for such a data set be
loaded with one command. The `load_arf` function can instead
be used to load them in individually.
Parameters
----------
id : int or str, optional
The data set to use. If not given then the default
identifier is used, as returned by `get_default_id`.
filenames : iterable of str
An array of file names.
resp_ids : iterable of int or str
The identifiers for the ARF within this data set.
The length should match the filenames argument.
See Also
--------
load_arf : Load an ARF from a file and add it to a PHA data set.
load_multi_rmfs : Load multiple RMFs for a PHA data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with two arguments, they are assumed to be
``filenames`` and ``resp_ids``, and three positional arguments
means `id`, ``filenames``, and ``resp_ids``.
The `minimum_energy` setting of the `ogip` section of the
Sherpa configuration file determines the behavior when an
ARF with a minimum energy of 0 is read in. The default is
to replace the 0 by the value 1e-10, which will also cause
a warning message to be displayed.
Examples
--------
Load three ARFs into the default data set, using response ids of
1, 2, and 3 for the LETG/HRC-S orders 1, 2, and 3 respectively:
>>> arfs = ['leg_p1.arf', 'leg_p2.arf', 'leg_p3.arf']
>>> load_multi_arfs(arfs, [1, 2, 3])
Load in the ARFs to the data set with the identifier
'lowstate':
>>> load_multi_arfs('lowstate', arfs, [1, 2, 3])
"""
# if type(filenames) not in (list, tuple):
# raise ArgumentError('Filenames must be contained in a list')
# if type(resp_ids) not in (list, tuple):
# raise ArgumentError('Response IDs must be contained in a list')
if resp_ids is None:
id, filenames, resp_ids = resp_ids, id, filenames
filenames = list(filenames)
resp_ids = list(resp_ids)
if len(filenames) != len(resp_ids):
raise ArgumentErr('multirsp')
for filename, resp_id in zip(filenames, resp_ids):
self.load_arf(id, filename, resp_id)
def get_rmf(self, id=None, resp_id=None, bkg_id=None):
"""Return the RMF associated with a PHA data set.
Parameters
----------
id : int or str, optional
The data set to use. If not given then the default
identifier is used, as returned by `get_default_id`.
resp_id : int or str, optional
The identifier for the RMF within this data set, if there
are multiple responses.
bkg_id : int or str, optional
Set this to return the given background component.
Returns
-------
rmf : a `sherpa.astro.instrument.RMF1D` instance
This is a reference to the RMF, rather than a copy, so that
changing the fields of the object will change the values in
the data set.
See Also
--------
fake_pha : Simulate a PHA data set from a model.
get_response: Return the respone information applied to a PHA data set.
load_pha : Load a file as a PHA data set.
load_rmf : Load a RMF from a file and add it to a PHA data set.
set_full_model : Define the convolved model expression for a data set.
set_arf : Set the ARF for use by a PHA data set.
set_rmf : Set the RMF for use by a PHA data set.
unpack_rmf : Read in a RMF from a file.
Examples
--------
Copy the RMF from the default data set to data set 2:
>>> rmf1 = get_rmf()
>>> set_rmf(2, rmf1)
Retrieve the RMF associated to the second background
component of the 'core' data set:
>>> bgrmf = get_rmf('core', 'bkg.rmf', bkg_id=2)
Retrieve the ARF and RMF for the default data set and
use them to create a model expression which includes
a power-law component (pbgnd) that is not convolved by the
response:
>>> arf = get_arf()
>>> rmf = get_rmf()
>>> src_expr = xsphabs.abs1 * powlaw1d.psrc
>>> set_full_model(rmf(arf(src_expr)) + powlaw1d.pbgnd)
>>> print(get_model())
"""
data = self._get_pha_data(id)
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
arf, rmf = data.get_response(resp_id)
if rmf is None:
raise IdentifierErr('getitem', 'RMF data set',
data._fix_response_id(resp_id),
'in PHA data set %s has not been set' %
str(self._fix_id(id)))
if isinstance(rmf, sherpa.astro.data.DataRMF):
rmf = sherpa.astro.instrument.RMF1D(rmf, data, arf)
return rmf
# DOC-TODO: add an example of a grating/multiple response
def set_rmf(self, id, rmf=None, resp_id=None, bkg_id=None):
"""Set the RMF for use by a PHA data set.
Set the redistribution matrix for a PHA data set, or its
background.
Parameters
----------
id : int or str, optional
The data set to use. If not given then the default
identifier is used, as returned by `get_default_id`.
rmf
An RMF, such as returned by `get_rmf` or `unpack_rmf`.
resp_id : int or str, optional
The identifier for the RMF within this data set, if there
are multiple responses.
bkg_id : int or str, optional
Set this to identify the RMF as being for use with the
background.
See Also
--------
get_rmf : Return the RMF associated with a PHA data set.
load_pha : Load a file as a PHA data set.
load_rmf : Load a RMF from a file and add it to a PHA data set.
set_full_model : Define the convolved model expression for a data set.
set_arf : Set the ARF for use by a PHA data set.
unpack_rmf : Create a RMF data structure.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `rmf` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `rmf` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
If a PHA data set has an associated RMF - either from when the
data was loaded or explicitly with the `set_rmf` function -
then the model fit to the data will include the effect of the
RMF when the model is created with `set_model` or
`set_source`. In this case the `get_source` function returns
the user model, and `get_model` the model that is fit to the
data (i.e. it includes any response information; that is the
ARF and RMF, if set). To include the RMF explicitly, use
`set_full_model`.
Examples
--------
Copy the RMF from the default data set to data set 2:
>>> rmf1 = get_rmf()
>>> set_rmf(2, rmf1)
Read in a RMF from the file 'bkg.rmf' and set it as the
RMF for the background model of data set "core":
>>> rmf = unpack_rmf('bkg.rmf')
>>> set_rmf('core', rmf, bkg_id=1)
"""
if rmf is None:
id, rmf = rmf, id
# store only the RMF dataset in the PHA response dict
if type(rmf) in (sherpa.astro.instrument.RMF1D,):
rmf = rmf._rmf
_check_type(rmf, sherpa.astro.data.DataRMF, 'rmf', 'an RMF data set')
data = self._get_pha_data(id)
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
data.set_rmf(rmf, resp_id)
# Set units of source dataset from channel to energy
if data.units == 'channel':
data._set_initial_quantity()
def unpack_rmf(self, arg):
"""Create a RMF data structure.
Parameters
----------
arg
Identify the RMF: a file name, or a data structure
representing the data to use, as used by the I/O backend in
use by Sherpa: a ``RMFCrateDataset`` for crates, as used by
CIAO, or a list of AstroPy HDU objects.
Returns
-------
rmf : a `sherpa.astro.instrument.RMF1D` instance
See Also
--------
get_rmf : Return the RMF associated with a PHA data set.
load_arf : Load a RMF from a file and add it to a PHA data set.
load_bkg_rmf : Load a RMF from a file and add it to the background of a PHA data set.
load_multi_rmfs : Load multiple RMFs for a PHA data set.
load_pha : Load a file as a PHA data set.
load_rmf : Load a RMF from a file and add it to a PHA data set.
set_full_model : Define the convolved model expression for a data set.
Notes
-----
The `minimum_energy` setting of the `ogip` section of the
Sherpa configuration file determines the behavior when an
RMF with a minimum energy of 0 is read in. The default is
to replace the 0 by the value 1e-10, which will also cause
a warning message to be displayed.
Examples
--------
>>> rmf1 = unpack_rmf("rmf1.fits")
>>> rmf2 = unpack_rmf("rmf2.fits")
Read in a RMF using Crates:
>>> acr = pycrates.read_rmf("src.rmf")
>>> rmf = unpack_rmf(acr)
Read in a RMF using AstroPy:
>>> hdus = astropy.io.fits.open("src.rmf")
>>> rmf = unpack_rmf(hdus)
"""
return sherpa.astro.instrument.RMF1D(sherpa.astro.io.read_rmf(arg))
# DOC-TODO: add an example of a grating/multiple response
# DOC-TODO: how to describe I/O backend support?
def load_rmf(self, id, arg=None, resp_id=None, bkg_id=None):
"""Load a RMF from a file and add it to a PHA data set.
Load in the redistribution matrix function for a PHA data set,
or its background. The `load_bkg_rmf` function can be used for
setting most background RMFs.
Parameters
----------
id : int or str, optional
The data set to use. If not given then the default
identifier is used, as returned by `get_default_id`.
arg
Identify the RMF: a file name, or a data structure
representing the data to use, as used by the I/O
backend in use by Sherpa: a ``RMFCrateDataset`` for
crates, as used by CIAO, or an AstroPy ``HDUList`` object.
resp_id : int or str, optional
The identifier for the RMF within this data set, if there
are multiple responses.
bkg_id : int or str, optional
Set this to identify the RMF as being for use with the
background.
See Also
--------
get_rmf : Return the RMF associated with a PHA data set.
load_bkg_rmf : Load a RMF from a file and add it to the background of a PHA data set.
load_arf : Load an ARF from a file and add it to a PHA data set.
load_multi_rmfs : Load multiple RMFs for a PHA data set.
load_pha : Load a file as a PHA data set.
set_full_model : Define the convolved model expression for a data set.
set_rmf : Load a RMF from a file and add it to a PHA data set.
unpack_rmf : Read in a RMF from a file.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `arg` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `arg` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
If a PHA data set has an associated RMF - either from when the
data was loaded or explicitly with the `set_rmf` function -
then the model fit to the data will include the effect of the
RMF when the model is created with `set_model` or
`set_source`. In this case the `get_source` function returns
the user model, and `get_model` the model that is fit to the
data (i.e. it includes any response information; that is the
ARF and RMF, if set). To include the RMF explicitly, use
`set_full_model`.
The `minimum_energy` setting of the `ogip` section of the
Sherpa configuration file determines the behavior when an
RMF with a minimum energy of 0 is read in. The default is
to replace the 0 by the value 1e-10, which will also cause
a warning message to be displayed.
Examples
--------
Use the contents of the file 'src.rmf' as the RMF for the
default data set.
>>> load_rmf('src.rmf')
Read in a RMF from the file 'bkg.rmf' and set it as the
RMF for the background model of data set "core":
>>> load_rmf('core', 'bkg.rmf', bkg_id=1)
"""
if arg is None:
id, arg = arg, id
self.set_rmf(id, self.unpack_rmf(arg), resp_id, bkg_id)
def get_bkg_rmf(self, id=None):
"""Return the background RMF associated with a PHA data set.
This is for the case when there is only one background
component and one background response. If this does not hold,
use `get_rmf` and use the ``bkg_id`` and ``resp_id`` arguments.
Parameters
----------
id : int or str, optional
The data set to use. If not given then the default
identifier is used, as returned by `get_default_id`.
Returns
-------
rmf : a `sherpa.astro.instrument.RMF1D` instance
This is a reference to the RMF, rather than a copy, so that
changing the fields of the object will change the values in
the data set.
See Also
--------
fake_pha : Simulate a PHA data set from a model.
load_bkg_rmf : Load a RMF from a file and add it to the background of a PHA data set.
load_pha : Load a file as a PHA data set.
set_full_model : Define the convolved model expression for a data set.
set_arf : Set the ARF for use by a PHA data set.
set_rmf : Set the RMF for use by a PHA data set.
unpack_rmf : Read in a RMF from a file.
Examples
--------
Copy the RMF from the default data set to data set 2,
as the first component:
>>> rmf1 = get_bkg_arf()
>>> set_rmf(2, arf1, bkg_id=1)
"""
bkg_id = self._get_pha_data(id).default_background_id
resp_id = self._get_pha_data(id).primary_response_id
return self.get_rmf(id, resp_id, bkg_id)
# DOC-TODO: how to describe I/O backend support?
def load_bkg_rmf(self, id, arg=None):
"""Load a RMF from a file and add it to the background of a
PHA data set.
Load in the RMF to the background of the given data set. It
is only for use when there is only one background component,
and one response, for the source. For multiple backgrounds
or responses, use `load_rmf`.
Parameters
----------
id : int or str, optional
The data set to use. If not given then the default
identifier is used, as returned by `get_default_id`.
arg
Identify the RMF: a file name, or a data structure
representing the data to use, as used by the I/O
backend in use by Sherpa: a ``RMFCrateDataset`` for
crates, as used by CIAO, or an AstroPy ``HDUList`` object.
See Also
--------
load_rmf : Load a RMF from a file and add it to a PHA data set.
load_bkg_arf : Load an ARF from a file and add it to the background of a PHA data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `arg` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `arg` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
The `minimum_energy` setting of the `ogip` section of the
Sherpa configuration file determines the behavior when an
RMF with a minimum energy of 0 is read in. The default is
to replace the 0 by the value 1e-10, which will also cause
a warning message to be displayed.
Examples
--------
Use the contents of the file 'bkg.rmf' as the RMF for the
background of the default data set.
>>> load_bkg_rmf('bkg.rmf')
Set 'core_bkg.rmf' as the RMF for the background of data set
'core':
>>> load_bkg_arf('core', 'core_bkg.rmf')
"""
if arg is None:
id, arg = arg, id
bkg_id = self._get_pha_data(id).default_background_id
resp_id = self._get_pha_data(id).primary_response_id
self.set_rmf(id, self.unpack_rmf(arg), resp_id, bkg_id)
def load_multi_rmfs(self, id, filenames, resp_ids=None):
"""Load multiple RMFs for a PHA data set.
A grating observation - such as a Chandra LETGS data set - may
require multiple responses if the detector has insufficient energy
resolution to sort the photons into orders. In this case, the
extracted spectrum will contain the signal from more than one
diffraction orders.
This function lets the multiple RMFs for such a data set be loaded
with one command. The `load_rmf` function can instead be used
to load them in individually.
Parameters
----------
id : int or str, optional
The data set to use. If not given then the default
identifier is used, as returned by `get_default_id`.
filenames : iterable of str
An array of file names.
resp_ids : iterable of int or str
The identifiers for the RMF within this data set.
The length should match the filenames argument.
See Also
--------
load_rmf : Load a RMF from a file and add it to a PHA data set.
load_multi_arfs : Load multiple ARFs for a PHA data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with two arguments, they are assumed to be
``filenames`` and ``resp_ids``, and three positional arguments
means `id`, ``filenames``, and ``resp_ids``.
The `minimum_energy` setting of the `ogip` section of the
Sherpa configuration file determines the behavior when an
RMF with a minimum energy of 0 is read in. The default is
to replace the 0 by the value 1e-10, which will also cause
a warning message to be displayed.
Examples
--------
Load three ARFs into the default data set, using response ids of
1, 2, and 3 for the LETG/HRC-S orders 1, 2, and 3 respectively:
>>> arfs = ['leg_p1.rmf', 'leg_p2.rmf', 'leg_p3.rmf']
>>> load_multi_rmfs(rmfs, [1, 2, 3])
Load in the RMFs to the data set with the identifier
'lowstate':
>>> load_multi_rmfs('lowstate', rmfs, [1, 2, 3])
"""
# if type(filenames) not in (list, tuple):
# raise ArgumentError('Filenames must be contained in a list')
# if type(resp_ids) not in (list, tuple):
# raise ArgumentError('Response IDs must be contained in a list')
if resp_ids is None:
id, filenames, resp_ids = resp_ids, id, filenames
filenames = list(filenames)
resp_ids = list(resp_ids)
if len(filenames) != len(resp_ids):
raise ArgumentErr('multirsp')
for filename, resp_id in zip(filenames, resp_ids):
self.load_rmf(id, filename, resp_id)
def get_bkg(self, id=None, bkg_id=None):
"""Return the background for a PHA data set.
Function to return the background for a PHA data set.
The object returned by the call can be used to query and
change properties of the background.
Parameters
----------
id : int or str, optional
The data set. If not given then the default
identifier is used, as returned by `get_default_id`.
bkg_id : int or str, optional
The identifier for this background, which is needed if
there are multiple background estimates for the source.
Returns
-------
data : a sherpa.astro.data.DataPHA object
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain a PHA data set.
sherpa.utils.err.IdentifierErr
If no data set is associated with this identifier.
See Also
--------
get_data : Return the data set by identifier.
load_bkg : Load the backgreound from a file and add it to a PHA data set.
set_bkg : Set the background for a PHA data set.
Examples
--------
>>> bg = get_bkg()
>>> bg = get_bkg('flare', 2)
"""
data = self._get_pha_data(id)
bkg = data.get_background(bkg_id)
if bkg is None:
raise IdentifierErr('getitem', 'background data set',
data._fix_background_id(bkg_id),
'in PHA data set %s has not been set' %
str(self._fix_id(id)))
return bkg
def set_bkg(self, id, bkg=None, bkg_id=None):
"""Set the background for a PHA data set.
The background can either be fit with a model - using
`set_bkg_model` - or removed from the data before fitting,
using `subtract`.
Parameters
----------
id : int or str, optional
The data set to use. If not given then the default
identifier is used, as returned by `get_default_id`.
bkg
A PHA data set, such as returned by `get_data` or
`unpack_pha`.
bkg_id : int or str, optional
The identifier for this background, which is needed if
there are multiple background estimates for the source.
See Also
--------
get_bkg : Return the background for a PHA data set.
load_bkg : Load the background from a file and add it to a PHA data set.
load_pha : Load a file as a PHA data set.
set_bkg_model : Set the background model expression for a data set.
subtract : Subtract the background estimate from a data set.
unpack_pha : Create a PHA data structure.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `bkg` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `bkg` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
If the background has no grouping of quality arrays then they
are copied from the source region. If the background has no
response information (ARF or RMF) then the response is copied
from the source region.
Examples
--------
Copy the background from the default data set to data set 2:
>>> bkg1 = get_bkg()
>>> set_bkg(2, bkg1)
Read in the PHA data from the file 'bkg.pi' and set it as the
second background component of data set "core":
>>> bkg = unpack_pha('bkg.pi')
>>> set_bkg('core', bkg, bkg_id=2)
"""
if bkg is None:
id, bkg = bkg, id
data = self._get_pha_data(id)
_check_type(bkg, sherpa.astro.data.DataPHA, 'bkg', 'a PHA data set')
data.set_background(bkg, bkg_id)
def list_bkg_ids(self, id=None):
"""List all the background identifiers for a data set.
A PHA data set can contain multiple background datasets, each
identified by an integer or string. This function returns a
list of these identifiers for a data set.
Parameters
----------
id : int or str, optional
The data set to query. If not given then the default
identifier is used, as returned by `get_default_id`.
Returns
-------
ids : array of int or str
The identifiers for the backround data sets for the data
set. In many cases this will just be ``[1]``.
See Also
--------
list_response_ids : List all the response identifiers of a data set.
load_bkg : Load the background of a PHA data set.
"""
return list(self._get_pha_data(id)._backgrounds.keys())
def list_response_ids(self, id=None, bkg_id=None):
"""List all the response identifiers of a data set.
A PHA data set can contain multiple responses, that is,
pairs of ARF and RMF, each of which has an identifier.
This function returns a list of these identifiers
for a data set.
Parameters
----------
id : int or str, optional
The data set to query. If not given then the default
identifier is used, as returned by `get_default_id`.
bkg_id : int or str, optional
Set this to identify the background component to query.
Returns
-------
ids : array of int or str
The identifiers for the response information for the data
set. In many cases this will just be ``[1]``.
See Also
--------
list_bkg_ids : List all the background identifiers for a data set.
load_arf : Load an ARF from a file and add it to a PHA data set.
load_rmf : Load a RMF from a file and add it to a PHA data set.
"""
data = self._get_pha_data(id)
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
return list(data._responses.keys())
# DOC-TODO: docs need to be added to sherpa.astro.data.set_analysis
# DOC-TODO: should the arguments be renamed to better match optional
# nature of the routine (e.g. can call set_analysis('energy'))?
def set_analysis(self, id, quantity=None, type='rate', factor=0):
"""Set the units used when fitting and displaying spectral data.
The set_analysis command sets the units for spectral
analysis. Note that in order to change the units of a data set
from 'channel' to 'energy' or 'wavelength', the appropriate
ARF and RMF instrument response files must be loaded for that
data set. The ``type`` and ``factor`` arguments control how
the data is plotted.
Parameters
----------
id : int or str
If only one argument is given then this is taken to be the
quantity argument (in which case, the change is made to
all data sets). If multiple arguments are given then this
is the identifier for the data set to change.
quantity : { 'channel', 'chan', 'bin', 'energy', 'ener', 'wavelength', 'wave' }
The units to use for the analysis.
type : { 'rate', 'counts' }, optional
The units to use on the Y axis of plots. The default
is 'rate'.
factor : int, optional
The Y axis of plots is multiplied by Energy^factor or
Wavelength^factor before display. The default is 0.
Raises
------
sherpa.utils.err.IdentifierErr
If the `id` argument is not recognized.
See Also
--------
get_analysis : Return the analysis setting for a data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `quantity` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `quantity` parameters,
respectively.
Examples
--------
Set all loaded data sets to use wavelength for any future
fitting or display.
>>> set_analysis('wave')
Set the data set with an identifier of 2 to use energy
units.
>>> set_analysis(2, 'energy')
Set data set 1 to use channel units. Plots will use a Y
axis of count/bin rather than the default count/s/bin.
>>> set_analysis(1, 'bin', 'counts')
Set data set 1 to use energy units. Plots of this data set
will display keV on the X axis and counts keV (i.e.
counts/keV * keV^2) in the Y axis.
>>> set_analysis(1, 'energy', 'counts', 2)
"""
if quantity is None:
id, quantity = quantity, id
_check_type(quantity, string_types, 'quantity', 'a string')
_check_type(type, string_types, 'type', 'a string')
ids = self.list_data_ids()
if id is not None:
ids = [id]
for id in ids:
self._get_pha_data(id).set_analysis(quantity, type, factor)
def get_analysis(self, id=None):
"""Return the units used when fitting spectral data.
Parameters
----------
id : int or str, optional
The data set to query. If not given then the default
identifier is used, as returned by `get_default_id`.
Returns
-------
setting : { 'channel', 'energy', 'wavelength' }
The analysis setting for the data set.
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
sherpa.utils.err.IdentifierErr
If the `id` argument is not recognized.
See Also
--------
get_default_id : Return the default data set identifier.
set_analysis : Change the analysis setting.
Examples
--------
Display the analysis setting for the default data set:
>>> print(get_analysis())
Check whether the data set labelled 'SgrA' is using the
wavelength setting:
>>> is_wave = get_analysis('SgrA') == 'wavelength'
"""
return self._get_pha_data(id).get_analysis()
# DOC-TODO: docs need to be added to sherpa.astro.data.set_coord
# DOC-TODO: how best to document the wcssubs support?
def set_coord(self, id, coord=None):
"""Set the coordinate system to use for image analysis.
The default coordinate system - that is, the mapping between
pixel position and coordinate value, for images (2D data sets)
is 'logical'. This function can change this setting, so that
model parameters can be fit using other systems. This setting
is also used by the `notice2d` and `ignore2d` series of
commands.
Parameters
----------
id : int or str
The data set to change. If not given then the default
identifier is used, as returned by `get_default_id`.
coord : { 'logical', 'image', 'physical', 'world', 'wcs' }
The coordinate system to use. The 'image' option is the
same as 'logical', and 'wcs' the same as 'world'.
See Also
--------
get_coord : Get the coordinate system used for image analysis.
guess : Estimate the parameter values and ranges given the loaded data.
ignore2d : Exclude a spatial region from an image.
notice2d : Include a spatial region of an image.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `coord` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `coord` parameters,
respectively.
Any limits or values already set for model parameters, such as
those made by `guess`, may need to be changed after changing
the coordinate system.
The 'logical' system is one in which the center of the
lower-left pixel has coordinates ``(1, 1)`` and the center of the
top-right pixel has coordinates ``(nx, ny)``, for a ``nx``
(columns) by ``ny`` (rows) pixel image. The pixels have a side
of length 1, so the first pixel covers the range ``x=0.5`` to
``x=1.5`` and ``y=0.5`` to ``y=1.5``.
The 'physical' and 'world' coordinate systems rely on FITS
World Coordinate System (WCS) standard [1]_. The 'physical'
system refers to a linear transformation, with possible
offset, of the 'logical' system. The 'world' system refers to
the mapping to a celestial coordinate system.
References
----------
.. [1] http://fits.gsfc.nasa.gov/fits_wcs.html
Examples
--------
Change the coordinate system of the default data set to
the world system ('wcs' is a synonym for 'world').
>>> set_coord('wcs')
Change the data set with the id of 'm82' to use the
physical coordinate system.
>>> set_coord('m82', 'physical')
"""
if coord is None:
id, coord = coord, id
_check_type(coord, string_types, 'coord', 'a string')
ids = self.list_data_ids()
if id is not None:
ids = [id]
if len(ids) == 0:
raise IdentifierErr('nodatasets')
for id in ids:
self._get_img_data(id).set_coord(coord)
# DOC-TODO: docs need to be added to sherpa.astro.data.get_coord
def get_coord(self, id=None):
"""Get the coordinate system used for image analysis.
Parameters
----------
id : int or str, optional
The data set to query. If not given then the default
identifier is used, as returned by `get_default_id`.
Returns
-------
coord : { 'logical', 'physical', 'world' }
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain image data.
sherpa.utils.err.IdentifierErr
If the `id` argument is not recognized.
See Also
--------
get_default_id : Return the default data set identifier.
set_coord : Set the coordinate system to use for image analysis.
"""
return self._get_img_data(id).coord
def ignore_bad(self, id=None, bkg_id=None):
"""Exclude channels marked as bad in a PHA data set.
Ignore any bin in the PHA data set which has a quality value
that is larger than zero.
Parameters
----------
id : int or str, optional
The data set to change. If not given then the default
identifier is used, as returned by `get_default_id`.
bkg_id : int or str, optional
The identifier for the background (the default of ``None``
uses the first component).
Raises
------
sherpa.utils.err.DataErr
If the data set has no quality array.
See Also
--------
ignore : Exclude data from the fit.
notice : Include data in the fit.
set_quality : Apply a set of quality flags to a PHA data set.
Notes
-----
The `load_pha` command - and others that create a PHA data set
- do not exclude these bad-quality bins automatically.
If the data set has been grouped, then calling `ignore_bad`
will remove any filter applied to the data set. If this
happens a warning message will be displayed.
Examples
--------
Remove any bins that are marked bad in the default data set:
>>> load_pha('src.pi')
>>> ignore_bad()
The data set 'jet' is grouped, and a filter applied. After
ignoring the bad-quality points, the filter has been removed
and will need to be re-applied:
>>> group_counts('jet', 20)
>>> notice_id('jet', 0.5, 7)
>>> get_filter('jet')
'0.496399998665:7.212399959564'
>>> ignore_bad('jet')
WARNING: filtering grouped data with quality flags, previous filters deleted
>>> get_filter('jet')
'0.001460000058:14.950400352478'
"""
data = self._get_pha_data(id)
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
data.ignore_bad()
def _notice_warning(self):
quantities = numpy.asarray([data.get_analysis()
for data in self._data.values()
if isinstance(data,
sherpa.astro.data.DataPHA)])
if len(quantities) > 1 and not (quantities == quantities[0]).all():
warning("not all PHA datasets have equal analysis quantities")
def notice(self, lo=None, hi=None, **kwargs):
if lo is not None or hi is not None:
self._notice_warning()
sherpa.ui.utils.Session.notice(self, lo, hi, **kwargs)
notice.__doc__ = sherpa.ui.utils.Session.notice.__doc__
def ignore(self, lo=None, hi=None, **kwargs):
if lo is not None or hi is not None:
self._notice_warning()
sherpa.ui.utils.Session.ignore(self, lo, hi, **kwargs)
ignore.__doc__ = sherpa.ui.utils.Session.ignore.__doc__
# DOC-TODO: how best to document the region support?
# DOC-TODO: I have not mentioned the support for radii in arcsec/minutes/degrees
# or sexagessimal formats. Is this supported here?
def notice2d(self, val=None):
"""Include a spatial region of all data sets.
Select a spatial region to include in the fit. The filter is
applied to all data sets.
Parameters
----------
val : str, optional
A region specification as a string or the name of a file
containing a region filter. The coordinates system of the
filter is taken from the coordinate setting of the data
sets (`set_coord`). If ``None``, then all points are
included.
See Also
--------
ignore2d : Exclude a spatial region from all data sets.
ignore2d_id : Exclude a spatial region from a data set.
ignore2d_image : Select the region to exclude from the image viewer.
notice2d_id : Include a spatial region of a data set.
notice2d_image : Select the region to include from the image viewer.
set_coord : Set the coordinate system to use for image analysis.
Notes
-----
The region syntax support is provided by the CIAO region
library [1]_, and supports the following shapes (the
capitalized parts of the name indicate the minimum length of
the name that is supported):
========= ===================================================
Name Arguments
========= ===================================================
RECTangle (xmin,ymin,xmax,ymax)
BOX (xcenter,ycenter,width,height)
BOX (xcenter,ycenter,width,height,angle)
ROTBOX (xcenter,ycenter,width,height,angle)
CIRcle (xcenter,ycenter,radius)
ANNULUS (xcenter,ycenter,iradius,oradius)
ELLipse (xcenter,ycenter,xradius,yradius,angle)
SECTor (xcenter,ycenter,minangle,maxangle)
PIE (xcenter,ycenter,iradius,oradius,minangle,maxangle)
POLYgon (x1,y1,x2,y2,x3,y3,...)
POInt (xcenter,ycenter)
REGION (file)
FIELD ()
========= ===================================================
Angles are measured in degrees from the X axis, with a
positive value indicating a counter-clockwise direction.
Only simple polygons are supported, which means that a polygon
can not intersect itself. The last point does not need to
equal the first point (i.e. polygons are automatically closed
if necessary).
The shapes can be combined using AND (intersection), OR
(union), or NOT (negation):
intersection::
shape1()*shape2()
shape1()&shape2()
union::
shape1()+shape2()
shape1()|shape2()
shape1()shape2()
negation::
!shape1()
shape1()-shape2()
shape1()*!shape1()
The precedence uses the same rules as the mathematical
operators ``+`` and ``*`` (with ``-`` replaced by ``*!``),
so that::
circle(0,0,10)+rect(10,-10,20,10)-circle(10,0,10)
means that the second circle is only excluded from the
rectangle, and not the first circle. To remove it from both
shapes requires writing::
circle(0,0,10)-circle(10,0,10)+rect(10,-10,20,10)-circle(10,0,10)
A point is included if the center of the pixel lies within
the region. The comparison is done using the selected
coordinate system for the image, so a pixel may not
have a width and height of 1.
The REGION specifier is only supported when using CIAO.
Unfortunately you can not combine region shapes using this
syntax. That is ``region(s1.reg)+region(s2.reg)`` is not
supported.
References
----------
.. [1] http://cxc.harvard.edu/ciao/ahelp/dmregions.html
Examples
--------
Include the data points that lie within a circle centered
at 4324.5,3827.5 with a radius of 300:
>>> set_coord('physical')
>>> notice2d('circle(4324.5,3827.5,430)')
Read in the filter from the file ``ds9.reg``, using either:
>>> notice2d('ds9.reg')
or, when using CIAO,
>>> notice2d('region(ds9.reg)')
Select those points that lie both within the rotated box and
the annulus (i.e. an intersection of the two shapes):
>>> notice2d('rotbox(100,200,50,40,45)*annulus(120,190,20,60)')
Select those points that lie within the rotated box or the
annulus (i.e. a union of the two shapes):
>>> notice2d('rotbox(100,200,50,40,45)+annulus(120,190,20,60)')
All existing spatial filters are removed:
>>> notice2d()
"""
for d in self._data.values():
_check_type(d, sherpa.astro.data.DataIMG, 'img',
'a image data set')
d.notice2d(val, False)
def ignore2d(self, val=None):
"""Exclude a spatial region from all data sets.
Select a spatial region to exclude in the fit. The filter is
applied to all data sets.
Parameters
----------
val : str, optional
A region specification as a string or the name of a file
containing a region filter. The coordinates system of the
filter is taken from the coordinate setting of the data
sets (`set_coord`). If ``None``, then all points are
included.
See Also
--------
ignore2d_id : Exclude a spatial region from a data set.
ignore2d_image : Select the region to exclude from the image viewer.
notice2d : Include a spatial region from all data sets.
notice2d_id : Include a spatial region of a data set.
notice2d_image : Select the region to include from the image viewer.
set_coord : Set the coordinate system to use for image analysis.
Notes
-----
The region syntax is described in the `notice2d` function.
Examples
--------
Exclude points that fall within the two regions:
>>> ignore2d('ellipse(200,300,40,30,-34)')
>>> ignore2d('box(40,100,30,40)')
Use a region file called 'reg.fits', by using either:
>>> ignore2d('reg.fits')
or
>>> ignore2d('region(reg.fits)')
Exclude all points.
>>> ignore2d()
"""
for d in self._data.values():
_check_type(d, sherpa.astro.data.DataIMG, 'img',
'a image data set')
d.notice2d(val, True)
def notice2d_id(self, ids, val=None):
"""Include a spatial region of a data set.
Select a spatial region to include in the fit. The filter is
applied to the given data set, or sets.
Parameters
----------
ids : int or str, or array of int or str
The data set, or sets, to use.
val : str, optional
A region specification as a string or the name of a file
containing a region filter. The coordinates system of the
filter is taken from the coordinate setting of the data
sets (`set_coord`). If ``None``, then all points are
included.
See Also
--------
ignore2d : Exclude a spatial region from all data sets.
ignore2d_id : Exclude a spatial region from a data set.
ignore2d_image : Select the region to exclude from the image viewer.
notice2d : Include a spatial region of all data sets.
notice2d_image : Select the region to include from the image viewer.
set_coord : Set the coordinate system to use for image analysis.
Notes
-----
The region syntax is described in the `notice2d` function.
Examples
--------
Select all the pixels in the default data set:
>>> notice2d_id(1)
Select all the pixels in data sets 'i1' and 'i2':
>>> notice2d_id(['i1', 'i2'])
Apply the filter to the 'img' data set:
>>> notice2d_id('img', 'annulus(4324.2,3982.2,40.2,104.3)')
Use the regions in the file `srcs.reg` for data set 1:
>>> notice2d_id(1, 'srcs.reg')
or
>>> notice2d_id(1, 'region(srcs.reg)')
"""
if self._valid_id(ids):
ids = (ids,)
else:
try:
ids = tuple(ids)
except TypeError:
_argument_type_error('ids',
'an identifier or list of identifiers')
for id in ids:
_check_type(self.get_data(id), sherpa.astro.data.DataIMG,
'img', 'a image data set')
self.get_data(id).notice2d(val, False)
def ignore2d_id(self, ids, val=None):
"""Exclude a spatial region from a data set.
Select a spatial region to exclude in the fit. The filter is
applied to the given data set, or sets.
Parameters
----------
ids : int or str, or array of int or str
The data set, or sets, to use.
val : str, optional
A region specification as a string or the name of a file
containing a region filter. The coordinates system of the
filter is taken from the coordinate setting of the data
sets (`set_coord`). If ``None``, then all points are
included.
See Also
--------
ignore2d : Exclude a spatial region from all data sets.
ignore2d_image : Select the region to exclude from the image viewer.
notice2d : Include a spatial region of all data sets.
notice2d_id : Include a spatial region from a data set.
notice2d_image : Select the region to include from the image viewer.
set_coord : Set the coordinate system to use for image analysis.
Notes
-----
The region syntax is described in the `notice2d` function.
Examples
--------
Ignore the pixels within the rectangle from data set 1:
>>> ignore2d_id(1, 'rect(10,10,20,290)')
Ignore the spatial region in the file `srcs.reg`:
>>> ignore2d_id(1, 'srcs.reg')
or
>>> ignore2d_id(1, 'region(srcs.reg)')
"""
if self._valid_id(ids):
ids = (ids,)
else:
try:
ids = tuple(ids)
except TypeError:
_argument_type_error('ids',
'an identifier or list of identifiers')
for id in ids:
_check_type(self.get_data(id), sherpa.astro.data.DataIMG,
'img', 'a image data set')
self.get_data(id).notice2d(val, True)
def notice2d_image(self, ids=None):
"""Include pixels using the region defined in the image viewer.
Include points that lie within the region defined in the image
viewer.
Parameters
----------
ids : int or str, or sequence of int or str, optional
The data set, or sets, to use. If ``None`` (the default)
then the default identifier is used, as returned by
`get_default_id`.
See Also
--------
ignore2d : Exclude a spatial region from an image.
ignore2d_image : Exclude pixels using the region defined in the image viewer.
notice2d : Include a spatial region of an image.
set_coord : Set the coordinate system to use for image analysis.
Notes
-----
The region definition is converted into the coordinate system
relevant to the data set before it is applied.
Examples
--------
Use the region in the image viewer to include points from the
default data set.
>>> notice2d_image()
Include points in the data set labelled "2".
>>> notice2d_image(2)
Include points in data sets "src" and "bg".
>>> notice2d_image(["src", "bg"])
"""
if ids is None:
ids = self._default_id
if self._valid_id(ids):
ids = (ids,)
else:
try:
ids = tuple(ids)
except TypeError:
_argument_type_error('ids',
'an identifier or list of identifiers')
for id in ids:
_check_type(self.get_data(id), sherpa.astro.data.DataIMG,
'img', 'a image data set')
coord = self.get_coord(id)
if coord == 'logical':
coord = 'image'
elif coord == 'world':
coord = 'wcs'
regions = self.image_getregion(coord).replace(';', '')
self.notice2d_id(id, regions)
def ignore2d_image(self, ids=None):
"""Exclude pixels using the region defined in the image viewer.
Exclude points that lie within the region defined in the image
viewer.
Parameters
----------
ids : int or str, or sequence of int or str, optional
The data set, or sets, to ignore. If ``None`` (the default)
then the default identifier is used, as returned by
`get_default_id`.
See Also
--------
ignore2d : Exclude a spatial region from an image.
notice2d : Include a spatial region of an image.
notice2d_image : Include pixels using the region defined in the image viewer.
set_coord : Set the coordinate system to use for image analysis.
Notes
-----
The region definition is converted into the coordinate system
relevant to the data set before it is applied.
Examples
--------
Use the region in the image viewer to ignore points from the
default data set.
>>> ignore2d_image()
Ignore points in the data set labelled "2".
>>> ignore2d_image(2)
Ignore points in data sets "src" and "bg".
>>> ignore2d_image(["src", "bg"])
"""
if ids is None:
ids = self._default_id
if self._valid_id(ids):
ids = (ids,)
else:
try:
ids = tuple(ids)
except TypeError:
_argument_type_error('ids',
'an identifier or list of identifiers')
for id in ids:
_check_type(self.get_data(id), sherpa.astro.data.DataIMG,
'img', 'a image data set')
coord = self.get_coord(id)
if coord == 'logical':
coord = 'image'
elif coord == 'world':
coord = 'wcs'
regions = self.image_getregion(coord).replace(';', '')
self.ignore2d_id(id, regions)
# DOC-TODO: how best to include datastack support? How is it handled here?
def load_bkg(self, id, arg=None, use_errors=False, bkg_id=None):
"""Load the background from a file and add it to a PHA data set.
This will load the PHA data and any response information - so
ARF and RMF - and add it as a background component to the
PHA data set.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
arg
Identify the data to read: a file name, or a data structure
representing the data to use, as used by the I/O backend in
use by Sherpa: a ``PHACrateDataset`` for crates, as used by
CIAO, or a list of AstroPy HDU objects.
use_errors : bool, optional
If ``True`` then the statistical errors are taken from the
input data, rather than calculated by Sherpa from the
count values. The default is ``False``.
bkg_id : int or str, optional
The identifier for the background (the default of ``None``
uses the first component).
See Also
--------
load_bkg_arf : Load an ARF from a file and add it to the background of a PHA data set.
load_bkg_rmf : Load a RMF from a file and add it to the background of a PHA data set.
load_pha : Load a PHA data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `arg` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `arg` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
Examples
--------
Load a source and background data set:
>>> load_pha('src.pi')
read ARF file src.arf
read RMF file src.rmf
>>> load_bkg('src_bkg.pi')
Read in the background via Crates:
>>> bpha = pycrates.read_pha('src_bkg.pi')
>>> load_bkg(bpha)
Create the data set from the data read in by AstroPy:
>>> bhdus = astropy.io.fits.open('src_bkg.pi')
>>> load_bkg(bhdus)
"""
if arg is None:
id, arg = arg, id
bkgsets = self.unpack_bkg(arg, use_errors)
if numpy.iterable(bkgsets):
for bkgid, bkg in enumerate(bkgsets):
self.set_bkg(id, bkg, bkgid + 1)
else:
self.set_bkg(id, bkgsets, bkg_id)
def group(self, id=None, bkg_id=None):
"""Turn on the grouping for a PHA data set.
A PHA data set can be grouped either because it contains
grouping information [1]_, which is automatically applied when
the data is read in with `load_pha` or `load_data`, or because
the `group` set of routines has been used to dynamically
re-group the data. The `ungroup` function removes this
grouping (however it was created). The `group` function
re-applies this grouping. The grouping scheme can be
changed dynamically, using the ``group_xxx`` series of
routines.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
bkg_id : int or str, optional
Set to group the background associated with the data set.
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain a PHA data set.
See Also
--------
fit : Fit one or more data sets.
group_adapt : Adaptively group to a minimum number of counts.
group_adapt_snr : Adaptively group to a minimum signal-to-noise ratio.
group_bins : Group into a fixed number of bins.
group_counts : Group into a minimum number of counts per bin.
group_snr : Group into a minimum signal-to-noise ratio.
group_width : Group into a fixed bin width.
set_grouping : Apply a set of grouping flags to a PHA data set.
set_quality : Apply a set of quality flags to a PHA data set.
ungroup : Turn off the grouping for a PHA data set.
Notes
-----
PHA data is often grouped to improve the signal to noise of
the data, by decreasing the number of bins, so that a
chi-square statistic can be used when fitting the data. After
calling `group`, anything that uses the data set - such as a
plot, fit, or error analysis - will use the grouped data
values. Models should be re-fit if `group` is called; the
increase in the signal of the bins may mean that a chi-square
statistic can now be used.
The grouping is implemented by separate arrays to the main
data - the information is stored in the ``grouping`` and
``quality`` arrays of the PHA data set - so that a data set can
be grouped and ungrouped many times, without losing
information. The `group` command does not create this
information; this is either created by modifying the PHA file
before it is read in, or by using the ``group_xxx`` routines
once the data has been loaded.
The ``grouped`` field of a PHA data set is set to ``True`` when
the data is grouped.
References
----------
.. [1] Arnaud., K. & George, I., "The OGIP Spectral File
Format",
http://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/spectra/ogip_92_007/ogip_92_007.html
Examples
--------
Group the data in the default data set:
>>> group()
>>> get_data().grouped
True
Group the first background component of the 'core' data set:
>>> group('core', bkg_id=1)
>>> get_bkg('core', bkg_id=1).grouped
True
The data is fit using the ungrouped data, and then plots of
the data and best-fit, and the residuals, are created. The
first plot uses the ungrouped data, and the second plot uses
the grouped data.
>>> ungroup()
>>> fit()
>>> plot_fit_resid()
>>> group()
>>> plot_fit_resid()
"""
data = self._get_pha_data(id)
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
if bkg_id is None:
# First, group backgrounds associated with the
# data set ID; report if background(s) already grouped.
for bid in data.background_ids:
try:
self.group(id, bid)
except DataErr as e:
info(str(e))
# Now check if data is already grouped, and send error message
# if so
if not data.grouped:
data.group()
def set_grouping(self, id, val=None, bkg_id=None):
"""Apply a set of grouping flags to a PHA data set.
A group is indicated by a sequence of flag values starting
with ``1`` and then ``-1`` for all the channels in the group,
following [1]_. Setting the grouping column automatically
turns on the grouping flag for that data set.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
val : array of int
This must be an array of grouping values of the same length
as the data array.
bkg_id : int or str, optional
Set to group the background associated with the data set.
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain a PHA data set.
See Also
--------
fit : Fit one or more data sets.
get_grouping : Return the grouping flags for a PHA data set.
group : Turn on the grouping for a PHA data set.
group_adapt : Adaptively group to a minimum number of counts.
group_adapt_snr : Adaptively group to a minimum signal-to-noise ratio.
group_bins : Group into a fixed number of bins.
group_counts : Group into a minimum number of counts per bin.
group_snr : Group into a minimum signal-to-noise ratio.
group_width : Group into a fixed bin width.
load_grouping : Load the grouping scheme from a file and add to a PHA data set.
set_quality : Apply a set of quality flags to a PHA data set.
ungroup : Turn off the grouping for a PHA data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `val` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `val` parameters,
respectively.
The meaning of the grouping column is taken from [1]_, which says
that +1 indicates the start of a bin, -1 if the channel is part
of group, and 0 if the data grouping is undefined for all channels.
References
----------
.. [1] "The OGIP Spectral File Format", https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/spectra/ogip_92_007/ogip_92_007.html
Examples
--------
Copy the grouping array from data set 2 into the default data
set:
>>> grp = get_grouping(2)
>>> set_grouping(grp)
Copy the grouping from data set "src1" to the source and the
first background data set of "src2":
>>> grp = get_grouping("src1")
>>> set_grouping("src2", grp)
>>> set_grouping("src2", grp, bkg_id=1)
"""
if val is None:
id, val = val, id
data = self._get_pha_data(id)
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
if val is None:
data.grouping = None
else:
if(type(val) in (numpy.ndarray,) and
issubclass(val.dtype.type, numpy.integer)):
data.grouping = numpy.asarray(val)
else:
data.grouping = numpy.asarray(val, SherpaInt)
def get_grouping(self, id=None, bkg_id=None):
"""Return the grouping array for a PHA data set.
The function returns the grouping value for each channel in
the PHA data set.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
bkg_id : int or str, optional
Set if the grouping flags should be taken from a background
associated with the data set.
Returns
-------
grouping : ndarray or ``None``
A value of ``1`` indicates the start of a new group, and ``-1``
indicates that the bin is part of the group. This array is
not filtered - that is, there is one element for each channel
in the PHA data set. Changes to the elements of this array will
change the values in the dataset (it is a reference to the values
used to define the quality, not a copy).
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain a PHA data set.
See Also
--------
fit : Fit one or more data sets.
get_quality : Return the quality array for a PHA data set.
ignore_bad : Exclude channels marked as bad in a PHA data set.
load_grouping: Load the grouping scheme from a file and add to a PHA data set.
set_grouping : Apply a set of grouping flags to a PHA data set.
Notes
-----
The meaning of the grouping column is taken from [1]_, which says
that +1 indicates the start of a bin, -1 if the channel is part
of group, and 0 if the data grouping is undefined for all channels.
References
----------
.. [1] "The OGIP Spectral File Format", https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/spectra/ogip_92_007/ogip_92_007.html
Examples
--------
Copy the grouping array from the default data set to data set 2:
>>> grp1 = get_grouping()
>>> set_grouping(2, grp1)
Return the grouping array of the background component labelled
2 for the 'histate' data set:
>>> grp = get_grouping('histate', bkg_id=2)
"""
data = self._get_pha_data(id)
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
return data.grouping
def set_quality(self, id, val=None, bkg_id=None):
"""Apply a set of quality flags to a PHA data set.
A quality value of 0 indicates a good channel,
otherwise (values >=1) the channel is considered bad and can be
excluded using the `ignore_bad` function, as discussed
in [1]_.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
val : array of int
This must be an array of quality values of the same length
as the data array.
bkg_id : int or str, optional
Set if the quality values should be associated with the
background associated with the data set.
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain a PHA data set.
See Also
--------
fit : Fit one or more data sets.
get_quality : Return the quality array for a PHA data set.
ignore_bad : Exclude channels marked as bad in a PHA data set.
load_quality : Load the quality array from a file and add to a PHA data set.
set_grouping : Apply a set of grouping flags to a PHA data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `val` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `val` parameters,
respectively.
The meaning of the quality column is taken from [1]_, which says
that 0 indicates a "good" channel, 1 and 2 are for channels that
are identified as "bad" or "dubious" (respectively) by software,
5 indicates a "bad" channel set by the user, and values of 3 or 4
are not used.
References
----------
.. [1] "The OGIP Spectral File Format", https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/spectra/ogip_92_007/ogip_92_007.html
Examples
--------
Copy the quality array from data set 2 into the default data
set, and then ensure that any 'bad' channels are ignored:
>>> qual = get_data(2).quality
>>> set_quality(qual)
>>> ignore_bad()
Copy the quality array from data set "src1" to the source and
background data sets of "src2":
>>> qual = get_data("src1").quality
>>> set_quality("src2", qual)
>>> set_quality("src2", qual, bkg_id=1)
"""
if val is None:
id, val = val, id
data = self._get_pha_data(id)
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
if val is None:
data.quality = None
else:
if(type(val) in (numpy.ndarray,) and
issubclass(val.dtype.type, numpy.integer)):
data.quality = numpy.asarray(val)
else:
data.quality = numpy.asarray(val, SherpaInt)
# DOC TODO: Need to document that routines like get_quality return
# a reference to the data - so can change the data structure
# - and not a copy
# DOC-TODO: explain that many of these can be done with
# direct object access
# get_data().exposure [= ...]
def get_quality(self, id=None, bkg_id=None):
"""Return the quality flags for a PHA data set.
The function returns the quality value for each channel in
the PHA data set.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
bkg_id : int or str, optional
Set if the quality flags should be taken from a background
associated with the data set.
Returns
-------
qual : ndarray or ``None``
The quality value for each channel in the PHA data set.
This array is not grouped or filtered - that is, there
is one element for each channel in the PHA data set. Changes
to the elements of this array will change the values in the
dataset (is is a reference to the values used to define the
quality, not a copy).
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain a PHA data set.
See Also
--------
fit : Fit one or more data sets.
get_grouping : Return the grouping array for a PHA data set.
get_indep : Return the independent axes of a data set.
ignore_bad : Exclude channels marked as bad in a PHA data set.
load_quality : Load the quality array from a file and add to a PHA data set.
set_quality : Apply a set of quality flags to a PHA data set.
Notes
-----
The meaning of the quality column is taken from [1]_, which says
that 0 indicates a "good" channel, 1 and 2 are for channels that
are identified as "bad" or "dubious" (respectively) by software,
5 indicates a "bad" channel set by the user, and values of 3 or 4
are not used.
References
----------
.. [1] "The OGIP Spectral File Format", https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/spectra/ogip_92_007/ogip_92_007.html
Examples
--------
Copy the quality array from the default data set to data set 2:
>>> qual1 = get_quality()
>>> set_quality(2, qual1)
Return the quality array of the background component labelled
2 for the 'histate' data set:
>>> qual = get_quality('histate', bkg_id=2)
Change the quality setting for all channels below 30 in the
default data set to 5 (considered bad by the user):
>>> chans, = get_indep()
>>> qual = get_quality()
>>> qual[chans < 30] = 5
"""
data = self._get_pha_data(id)
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
return data.quality
def ungroup(self, id=None, bkg_id=None):
"""Turn off the grouping for a PHA data set.
A PHA data set can be grouped either because it contains
grouping information [1]_, which is automatically applied when
the data is read in with `load_pha` or `load_data`, or because
the ``group_xxx`` set of routines has been used to dynamically
re-group the data. The `ungroup` function removes this
grouping (however it was created).
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
bkg_id : int or str, optional
Set to ungroup the background associated with the data set.
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain a PHA data set.
See Also
--------
fit : Fit one or more data sets.
group : Turn on the grouping for a PHA data set.
Notes
-----
PHA data is often grouped to improve the signal to noise of
the data, by decreasing the number of bins, so that a
chi-square statistic can be used when fitting the data. After
calling `ungroup`, anything that uses the data set - such as a
plot, fit, or error analysis - will use the original data
values. Models should be re-fit if `ungroup` is called; this
may require a change of statistic depending on the counts per
channel in the spectrum.
The grouping is implemented by separate arrays to the main
data - the information is stored in the ``grouping`` and
``quality`` arrays of the PHA data set - so that a data set
can be grouped and ungrouped many times, without losing
information.
The ``grouped`` field of a PHA data set is set to ``False`` when
the data is not grouped.
If subtracting the background estimate from a data set, the
grouping applied to the source data set is used for both
source and background data sets.
References
----------
.. [1] Arnaud., K. & George, I., "The OGIP Spectral File
Format",
http://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/spectra/ogip_92_007/ogip_92_007.html
Examples
--------
Ungroup the data in the default data set:
>>> ungroup()
>>> get_data().grouped
False
Ungroup the first background component of the 'core' data set:
>>> ungroup('core', bkg_id=1)
>>> get_bkg('core', bkg_id=1).grouped
False
"""
data = self._get_pha_data(id)
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
if bkg_id is None:
# First, ungroup backgrounds associated with the
# data set ID; report if background(s) already ungrouped.
for bid in data.background_ids:
try:
self.ungroup(id, bid)
except DataErr as e:
info(str(e))
# Now check if data is already ungrouped, and send error message
# if so
if data.grouped:
data.ungroup()
# DOC-TODO: need to document somewhere that this ignores existing
# quality flags and how to use tabStops to include
# this information
# DOC-TODO: how to set the quality if using tabstops to indicate
# "bad" channels, rather than ones to ignore
def group_bins(self, id, num=None, bkg_id=None, tabStops=None):
"""Group into a fixed number of bins.
Combine the data so that there `num` equal-width bins (or
groups). The binning scheme is applied to all the channels,
but any existing filter - created by the `ignore` or `notice`
set of functions - is re-applied after the data has been
grouped.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
num : int
The number of bins in the grouped data set. Each bin
will contain the same number of channels.
bkg_id : int or str, optional
Set to group the background associated with the data set.
When ``bkg_id`` is None (which is the default), the
grouping is applied to all the associated background
data sets as well as the source data set.
tabStops : array of int or bool, optional
If set, indicate one or more ranges of channels that should
not be included in the grouped output. The array should
match the number of channels in the data set and non-zero or
``True`` means that the channel should be ignored from the
grouping (use 0 or ``False`` otherwise).
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain a PHA data set.
See Also
--------
group_adapt : Adaptively group to a minimum number of counts.
group_adapt_snr : Adaptively group to a minimum signal-to-noise ratio.
group_counts : Group into a minimum number of counts per bin.
group_snr : Group into a minimum signal-to-noise ratio.
group_width : Group into a fixed bin width.
set_grouping : Apply a set of grouping flags to a PHA data set.
set_quality : Apply a set of quality flags to a PHA data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `num` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `num` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
Unlike `group`, it is possible to call `group_bins` multiple
times on the same data set without needing to call `ungroup`.
Since the bin width is an integer number of channels, it is
likely that some channels will be "left over". This is even
more likely when the ``tabStops`` parameter is set. If this
happens, a warning message will be displayed to the screen and
the quality value for these channels will be set to 2. This
information can be found with the `get_quality` command.
Examples
--------
Group the default data set so that there are 50 bins.
>>> group_bins(50)
Group the 'jet' data set to 50 bins and plot the result,
then re-bin to 100 bins and overplot the data:
>>> group_bins('jet', 50)
>>> plot_data('jet')
>>> group_bins('jet', 100)
>>> plot_data('jet', overplot=True)
The grouping is applied to the full data set, and then
the filter - in this case defined over the range 0.5
to 8 keV - will be applied. This means that the
noticed data range will likely contain less than
50 bins.
>>> set_analysis('energy')
>>> notice(0.5, 8)
>>> group_bins(50)
>>> plot_data()
Do not group any channels numbered less than 20 or
800 or more. Since there are 780 channels to be
grouped, the width of each bin will be 20 channels
and there are no "left over" channels:
>>> notice()
>>> channels = get_data().channel
>>> ign = (channels <= 20) | (channels >= 800)
>>> group_bins(39, tabStops=ign)
>>> plot_data()
"""
if num is None:
id, num = num, id
data = self._get_pha_data(id)
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
data.group_bins(num, tabStops)
# DOC-TODO: should num= be renamed val= to better match
# underlying code/differ from group_bins?
def group_width(self, id, num=None, bkg_id=None, tabStops=None):
"""Group into a fixed bin width.
Combine the data so that each bin contains `num` channels.
The binning scheme is applied to all the channels, but any
existing filter - created by the `ignore` or `notice` set of
functions - is re-applied after the data has been grouped.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
num : int
The number of channels to combine into a group.
bkg_id : int or str, optional
Set to group the background associated with the data set.
When ``bkg_id`` is None (which is the default), the
grouping is applied to all the associated background
data sets as well as the source data set.
tabStops : array of int or bool, optional
If set, indicate one or more ranges of channels that should
not be included in the grouped output. The array should
match the number of channels in the data set and non-zero or
``True`` means that the channel should be ignored from the
grouping (use 0 or ``False`` otherwise).
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain a PHA data set.
See Also
--------
group_adapt : Adaptively group to a minimum number of counts.
group_adapt_snr : Adaptively group to a minimum signal-to-noise ratio.
group_bins : Group into a fixed number of bins.
group_counts : Group into a minimum number of counts per bin.
group_snr : Group into a minimum signal-to-noise ratio.
set_grouping : Apply a set of grouping flags to a PHA data set.
set_quality : Apply a set of quality flags to a PHA data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `num` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `num` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
Unlike `group`, it is possible to call `group_width` multiple
times on the same data set without needing to call `ungroup`.
Unless the requested bin width is a factor of the number of
channels (and no ``tabStops`` parameter is given), then some
channels will be "left over". If this happens, a warning
message will be displayed to the screen and the quality value
for these channels will be set to 2. This information can be
found with the `get_quality` command.
Examples
--------
Group the default data set so that each bin contains 20
channels:
>>> group_width(20)
Plot two versions of the 'jet' data set: the first uses
20 channels per group and the second is 50 channels per
group:
>>> group_width('jet', 20)
>>> plot_data('jet')
>>> group_width('jet', 50)
>>> plot_data('jet', overplot=True)
The grouping is applied to the full data set, and then
the filter - in this case defined over the range 0.5
to 8 keV - will be applied.
>>> set_analysis('energy')
>>> notice(0.5, 8)
>>> group_width(50)
>>> plot_data()
The grouping is not applied to channels 101 to
149, inclusive:
>>> notice()
>>> channels = get_data().channel
>>> ign = (channels > 100) & (channels < 150)
>>> group_width(40, tabStops=ign)
>>> plot_data()
"""
if num is None:
id, num = num, id
data = self._get_pha_data(id)
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
data.group_width(num, tabStops)
def group_counts(self, id, num=None, bkg_id=None,
maxLength=None, tabStops=None):
"""Group into a minimum number of counts per bin.
Combine the data so that each bin contains `num` or more
counts. The binning scheme is applied to all the channels, but
any existing filter - created by the `ignore` or `notice` set
of functions - is re-applied after the data has been grouped.
The background is *not* included in this calculation; the
calculation is done on the raw data even if `subtract` has
been called on this data set.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
num : int
The number of channels to combine into a group.
bkg_id : int or str, optional
Set to group the background associated with the data set.
When ``bkg_id`` is None (which is the default), the
grouping is applied to all the associated background
data sets as well as the source data set.
maxLength : int, optional
The maximum number of channels that can be combined into a
single group.
tabStops : array of int or bool, optional
If set, indicate one or more ranges of channels that should
not be included in the grouped output. The array should
match the number of channels in the data set and non-zero or
``True`` means that the channel should be ignored from the
grouping (use 0 or ``False`` otherwise).
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain a PHA data set.
See Also
--------
group_adapt : Adaptively group to a minimum number of counts.
group_adapt_snr : Adaptively group to a minimum signal-to-noise ratio.
group_bins : Group into a fixed number of bins.
group_snr : Group into a minimum signal-to-noise ratio.
group_width : Group into a fixed bin width.
set_grouping : Apply a set of grouping flags to a PHA data set.
set_quality : Apply a set of quality flags to a PHA data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `num` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `num` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
Unlike `group`, it is possible to call `group_counts` multiple
times on the same data set without needing to call `ungroup`.
If channels can not be placed into a "valid" group, then a
warning message will be displayed to the screen and the
quality value for these channels will be set to 2. This
information can be found with the `get_quality` command.
Examples
--------
Group the default data set so that each bin contains at
least 20 counts:
>>> group_counts(20)
Plot two versions of the 'jet' data set: the first uses
20 counts per group and the second is 50:
>>> group_counts('jet', 20)
>>> plot_data('jet')
>>> group_counts('jet', 50)
>>> plot_data('jet', overplot=True)
The grouping is applied to the full data set, and then
the filter - in this case defined over the range 0.5
to 8 keV - will be applied.
>>> set_analysis('energy')
>>> notice(0.5, 8)
>>> group_counts(30)
>>> plot_data()
If a channel has more than 30 counts then do not group,
otherwise group channels so that they contain at least 40
counts. The `group_adapt` and `group_adapt_snr` functions
provide similar functionality to this example. A maximum
length of 10 channels is enforced, to avoid bins getting too
large when the signal is low.
>>> notice()
>>> counts = get_data().counts
>>> ign = counts > 30
>>> group_counts(40, tabStops=ign, maxLength=10)
"""
if num is None:
id, num = num, id
data = self._get_pha_data(id)
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
data.group_counts(num, maxLength, tabStops)
# DOC-TODO: check the Poisson stats claim; I'm guessing it means
# gaussian (i.e. sqrt(n))
def group_snr(self, id, snr=None, bkg_id=None,
maxLength=None, tabStops=None, errorCol=None):
"""Group into a minimum signal-to-noise ratio.
Combine the data so that each bin has a signal-to-noise ratio
of at least `snr`. The binning scheme is applied to all the
channels, but any existing filter - created by the `ignore` or
`notice` set of functions - is re-applied after the data has
been grouped. The background is *not* included in this
calculation; the calculation is done on the raw data even if
`subtract` has been called on this data set.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
snr : number
The minimum signal-to-noise ratio that must be reached
to form a group of channels.
bkg_id : int or str, optional
Set to group the background associated with the data set.
When ``bkg_id`` is None (which is the default), the
grouping is applied to all the associated background
data sets as well as the source data set.
maxLength : int, optional
The maximum number of channels that can be combined into a
single group.
tabStops : array of int or bool, optional
If set, indicate one or more ranges of channels that should
not be included in the grouped output. The array should
match the number of channels in the data set and non-zero or
``True`` means that the channel should be ignored from the
grouping (use 0 or ``False`` otherwise).
errorCol : array of num, optional
If set, the error to use for each channel when calculating
the signal-to-noise ratio. If not given then Poisson
statistics is assumed. A warning is displayed for each
zero-valued error estimate.
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain a PHA data set.
See Also
--------
group_adapt : Adaptively group to a minimum number of counts.
group_adapt_snr : Adaptively group to a minimum signal-to-noise ratio.
group_bins : Group into a fixed number of bins.
group_counts : Group into a minimum number of counts per bin.
group_width : Group into a fixed bin width.
set_grouping : Apply a set of grouping flags to a PHA data set.
set_quality : Apply a set of quality flags to a PHA data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `snr` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `snr` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
Unlike `group`, it is possible to call `group_snr` multiple
times on the same data set without needing to call `ungroup`.
If channels can not be placed into a "valid" group, then a
warning message will be displayed to the screen and the
quality value for these channels will be set to 2. This
information can be found with the `get_quality` command.
Examples
--------
Group the default data set so that each bin has a
signal-to-noise ratio of at least 5:
>>> group_snr(20)
Plot two versions of the 'jet' data set: the first uses
a signal-to-noise ratio of 3 and the second 5:
>>> group_snr('jet', 3)
>>> plot_data('jet')
>>> group_snr('jet', 5)
>>> plot_data('jet', overplot=True)
"""
if snr is None:
id, snr = snr, id
data = self._get_pha_data(id)
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
data.group_snr(snr, maxLength, tabStops, errorCol)
def group_adapt(self, id, min=None, bkg_id=None,
maxLength=None, tabStops=None):
"""Adaptively group to a minimum number of counts.
Combine the data so that each bin contains `min` or more
counts. The difference to `group_counts` is that this
algorithm starts with the bins with the largest signal, in
order to avoid over-grouping bright features, rather than at
the first channel of the data. The adaptive nature means that
low-count regions between bright features may not end up in
groups with the minimum number of counts. The binning scheme
is applied to all the channels, but any existing filter -
created by the `ignore` or `notice` set of functions - is
re-applied after the data has been grouped.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
min : int
The number of channels to combine into a group.
bkg_id : int or str, optional
Set to group the background associated with the data set.
When ``bkg_id`` is ``None`` (which is the default), the
grouping is applied to all the associated background
data sets as well as the source data set.
maxLength : int, optional
The maximum number of channels that can be combined into a
single group.
tabStops : array of int or bool, optional
If set, indicate one or more ranges of channels that should
not be included in the grouped output. The array should
match the number of channels in the data set and non-zero or
``True`` means that the channel should be ignored from the
grouping (use 0 or ``False`` otherwise).
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain a PHA data set.
See Also
--------
group_adapt_snr : Adaptively group to a minimum signal-to-noise ratio.
group_bins : Group into a fixed number of bins.
group_counts : Group into a minimum number of counts per bin.
group_snr : Group into a minimum signal-to-noise ratio.
group_width : Group into a fixed bin width.
set_grouping : Apply a set of grouping flags to a PHA data set.
set_quality : Apply a set of quality flags to a PHA data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `min` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `min` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
Unlike `group`, it is possible to call `group_adapt` multiple
times on the same data set without needing to call `ungroup`.
If channels can not be placed into a "valid" group, then a
warning message will be displayed to the screen and the
quality value for these channels will be set to 2. This
information can be found with the `get_quality` command.
Examples
--------
Group the default data set so that each bin contains at
least 20 counts:
>>> group_adapt(20)
Plot two versions of the 'jet' data set: the first uses
an adaptive scheme of 20 counts per bin, the second
the `group_counts` method:
>>> group_adapt('jet', 20)
>>> plot_data('jet')
>>> group_counts('jet', 20)
>>> plot_data('jet', overplot=True)
"""
if min is None:
id, min = min, id
data = self._get_pha_data(id)
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
data.group_adapt(min, maxLength, tabStops)
# DOC-TODO: shouldn't this be snr=None rather than min=None
def group_adapt_snr(self, id, min=None, bkg_id=None,
maxLength=None, tabStops=None, errorCol=None):
"""Adaptively group to a minimum signal-to-noise ratio.
Combine the data so that each bin has a signal-to-noise ratio
of at least `num`. The difference to `group_snr` is that this
algorithm starts with the bins with the largest signal, in
order to avoid over-grouping bright features, rather than at
the first channel of the data. The adaptive nature means that
low-count regions between bright features may not end up in
groups with the minimum number of counts. The binning scheme
is applied to all the channels, but any existing filter -
created by the `ignore` or `notice` set of functions - is
re-applied after the data has been grouped.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
num : number
The minimum signal-to-noise ratio that must be reached
to form a group of channels.
bkg_id : int or str, optional
Set to group the background associated with the data set.
When ``bkg_id`` is ``None`` (which is the default), the
grouping is applied to all the associated background
data sets as well as the source data set.
maxLength : int, optional
The maximum number of channels that can be combined into a
single group.
tabStops : array of int or bool, optional
If set, indicate one or more ranges of channels that should
not be included in the grouped output. The array should
match the number of channels in the data set and non-zero or
``True`` means that the channel should be ignored from the
grouping (use 0 or ``False`` otherwise).
errorCol : array of num, optional
If set, the error to use for each channel when calculating
the signal-to-noise ratio. If not given then Poisson
statistics is assumed. A warning is displayed for each
zero-valued error estimate.
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain a PHA data set.
See Also
--------
group_adapt : Adaptively group to a minimum number of counts.
group_bins : Group into a fixed number of bins.
group_counts : Group into a minimum number of counts per bin.
group_snr : Group into a minimum signal-to-noise ratio.
group_width : Group into a fixed bin width.
set_grouping : Apply a set of grouping flags to a PHA data set.
set_quality : Apply a set of quality flags to a PHA data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `num` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `num` parameters,
respectively. The remaining parameters are expected to be
given as named arguments.
Unlike `group`, it is possible to call `group_adapt_snr`
multiple times on the same data set without needing to call
`ungroup`.
If channels can not be placed into a "valid" group, then a
warning message will be displayed to the screen and the
quality value for these channels will be set to 2. This
information can be found with the `get_quality` command.
Examples
--------
Group the default data set so that each bin contains
a signal-to-noise ratio of at least 5:
>>> group_adapt_snr(5)
Plot two versions of the 'jet' data set: the first uses an
adaptive scheme and the second the non-adaptive version:
>>> group_adapt_snr('jet', 4)
>>> plot_data('jet')
>>> group_snr('jet', 4)
>>> plot_data('jet', overplot=True)
"""
if min is None:
id, min = min, id
data = self._get_pha_data(id)
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
data.group_adapt_snr(min, maxLength, tabStops, errorCol)
def subtract(self, id=None):
"""Subtract the background estimate from a data set.
The ``subtract`` function performs a channel-by-channel
subtraction of the background estimate from the data. After
this command, anything that uses the data set - such as a
plot, fit, or error analysis - will use the subtracted
data. Models should be re-fit if ``subtract`` is called.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain a PHA data set.
See Also
--------
fit : Fit one or more data sets.
unsubtract : Undo any background subtraction for the data set.
Notes
-----
Unlike X-Spec [1]_, Sherpa does not automatically subtract
the background estimate from the data.
Background subtraction can only be performed when data and
background are of the same length. If the data and background
are ungrouped, both must have same number of channels. If
they are grouped, data and background can start with different
numbers of channels, but must have the same number of groups
after grouping.
The equation for the subtraction is::
src_counts - bg_counts * (src_exposure * src_backscal)
-----------------------------
(bg_exposure * bg_backscal)
where src_exposure and bg_exposure are the source and
background exposure times, and src_backscal and bg_backscal
are the source and background backscales. The backscale, read
from the ``BACKSCAL`` header keyword of the PHA file [2]_, is
the ratio of data extraction area to total detector area.
The ``subtracted`` field of a dataset is set to ``True`` when
the background is subtracted.
References
----------
.. [1] https://heasarc.gsfc.nasa.gov/xanadu/xspec/manual/XspecSpectralFitting.html
.. [2] https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/spectra/ogip_92_007/node5.html
Examples
--------
Background subtract the default data set.
>>> subtract()
>>> get_data().subtracted
True
Remove the background from the data set labelled 'src':
>>> subtract('src')
>>> get_data('src').subtracted
True
Overplot the background-subtracted data on the original
data for the default data set:
>>> plot_data()
>>> subtract()
>>> plot_data(overplot=True)
"""
if not self._get_pha_data(id).subtracted:
self._get_pha_data(id).subtract()
def unsubtract(self, id=None):
"""Undo any background subtraction for the data set.
The `unsubtract` function undoes any changes made by
`subtract`. After this command, anything that uses the data
set - such as a plot, fit, or error analysis - will use the
original data values. Models should be re-fit if `subtract` is
called.
Parameters
----------
id : int or str, optional
The identifier for the data set to use. If not given then
the default identifier is used, as returned by
`get_default_id`.
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain a PHA data set.
See Also
--------
fit : Fit one or more data sets.
subtract : Subtract the background estimate from a data set.
Notes
-----
The ``subtracted`` field of a PHA data set is set to ``False``
when the background is not subtracted.
Examples
--------
Remove the background subtraction from the default data set.
>>> subtract()
>>> get_data().subtracted
False
Remove the background subtraction from the data set labelled
'src':
>>> subtract('src')
>>> get_data('src').subtracted
False
"""
if self._get_pha_data(id).subtracted:
self._get_pha_data(id).unsubtract()
def fake_pha(self, id, arf, rmf, exposure, backscal=None, areascal=None,
grouping=None, grouped=False, quality=None, bkg=None):
"""Simulate a PHA data set from a model.
The function creates a simulated PHA data set based on a source
model, instrument response (given as an ARF and RMF), and exposure
time, along with a Poisson noise term. A background component can
be included.
Parameters
----------
id : int or str
The identifier for the data set to create. If it already
exists then it is assumed to contain a PHA data set and the
counts will be over-written.
arf : filename or ARF object or list of filenames
The name of the ARF, or an ARF data object (e.g. as
returned by `get_arf` or `unpack_arf`). A list of filenames
can be passed in for instruments that require multile ARFs.
Set this to `None` to use any arf that is already set for
the data set given by id.
rmf : filename or RMF object or list of filenames
The name of the RMF, or an RMF data object (e.g. as
returned by `get_arf` or `unpack_arf`). A list of filenames
can be passed in for instruments that require multile RMFs.
Set this to `None` to use any arf that is already set for
the data set given by id.
exposure : number
The exposure time, in seconds.
backscal : number, optional
The 'BACKSCAL' value for the data set.
areascal : number, optional
The 'AREASCAL' value for the data set.
grouping : array, optional
The grouping array for the data (see `set_grouping`).
grouped : bool, optional
Should the simulated data be grouped (see `group`)? The
default is ``False``. This value is only used if the
`grouping` parameter is set.
quality : array, optional
The quality array for the data (see `set_quality`).
bkg : optional
If left empty, then only the source emission is simulated.
If set to a PHA data object, then the counts from this data
set are scaled appropriately and added to the simulated
source signal. To use background model, set ``bkg="model"`. In that
case a background dataset with ``bkg_id=1`` has to be set before
calling ``fake_pha``. That background dataset needs to include
the data itself (not used in this function), the background model,
and the response.
Raises
------
sherpa.utils.err.ArgumentErr
If the data set already exists and does not contain PHA
data.
See Also
--------
fake : Simulate a data set.
get_arf : Return the ARF associated with a PHA data set.
get_rmf : Return the RMF associated with a PHA data set.
get_dep : Return the dependent axis of a data set.
load_arrays : Create a data set from array values.
set_model : Set the source model expression for a data set.
Notes
-----
A model expression is created by using the supplied ARF and RMF
to convolve the source expression for the dataset (the return
value of `get_source` for the supplied `id` parameter). This
expresion is evaluated for each channel to create the expectation
values, which is then passed to a Poisson random number generator
to determine the observed number of counts per channel. Any
background component is scaled by appropriate terms (exposure
time, area scaling, and the backscal value) before it is passed to
a Poisson random number generator. The simulated background is
added to the simulated data.
Examples
--------
Estimate the signal from a 5000 second observation using the
ARF and RMF from "src.arf" and "src.rmf" respectively:
>>> set_source(1, xsphabs.gal * xsapec.clus)
>>> gal.nh = 0.12
>>> clus.kt, clus.abundanc = 4.5, 0.3
>>> clus.redshift = 0.187
>>> clus.norm = 1.2e-3
>>> fake_pha(1, 'src.arf', 'src.rmf', 5000)
Simulate a 1 mega second observation for the data and model
from the default data set. The simulated data will include an
estimated background component based on scaling the existing
background observations for the source. The simulated data
set, which has the same grouping as the default set, for
easier comparison, is created with the 'sim' label and then
written out to the file 'sim.pi':
>>> arf = get_arf()
>>> rmf = get_rmf()
>>> bkg = get_bkg()
>>> bscal = get_backscal()
>>> grp = get_grouping()
>>> qual = get_quality()
>>> texp = 1e6
>>> set_source('sim', get_source())
>>> fake_pha('sim', arf, rmf, texp, backscal=bscal, bkg=bkg,
... grouping=grp, quality=qual, grouped=True)
>>> save_pha('sim', 'sim.pi')
Sometimes, the background dataset is noisy because there are not
enough photons in the background region. In this case, the background
model can be used to generate the photons that the background
contributes to the source spectrum. To do this, a background model
must be passed in. This model is then convolved with the ARF and RMF
(which must be set before) of the default background data set:
>>> set_bkg_source('sim', 'const1d.con1')
>>> load_arf('sim', 'bkg.arf.fits', bkg_id=1)
>>> load_rmf('sim', 'bkg_rmf.fits', bkg_id=1)
>>> fake_pha('sim', arf, rmf, texp, backscal=bscal, bkg='model',
... grouping=grp, quality=qual, grouped=True)
>>> save_pha('sim', 'sim.pi')
"""
id = self._fix_id(id)
if id in self._data:
d = self._get_pha_data(id)
else:
d = sherpa.astro.data.DataPHA('', None, None)
self.set_data(id, d)
if rmf is None and len(d.response_ids) == 0:
raise DataErr('normffake', id)
if type(rmf) in (str, numpy.string_):
if os.path.isfile(rmf):
rmf = self.unpack_rmf(rmf)
else:
raise IOErr("filenotfound", rmf)
if type(arf) in (str, numpy.string_):
if os.path.isfile(arf):
arf = self.unpack_arf(arf)
else:
raise IOErr("filenotfound", arf)
if not (rmf is None and arf is None):
for resp_id in d.response_ids:
d.delete_response(resp_id)
# Get one rmf for testing the channel number
# This would be a lot simpler if I could just raise the
# incombatiblersp error on the OO layer (that happens, but the id
# is not in the error messaage).
if rmf is None:
rmf0 = d.get_rmf()
elif numpy.iterable(rmf):
rmf0 = self.unpack_rmf(rmf[0])
else:
rmf0 = rmf
if d.channel is None:
d.channel = sao_arange(1, rmf0.detchans)
else:
if len(d.channel) != rmf0.detchans:
raise DataErr('incompatibleresp', rmf.name, str(id))
# at this point, we can be sure that arf is not a string, because
# if it was, it would have gone through load_arf already above.
if not (rmf is None and arf is None):
if numpy.iterable(arf):
self.load_multi_arfs(id, arf, range(len(arf)))
else:
self.set_arf(id, arf)
if numpy.iterable(rmf):
self.load_multi_rmfs(id, rmf, range(len(rmf)))
else:
self.set_rmf(id, rmf)
d.exposure = exposure
if backscal is not None:
d.backscal = backscal
if areascal is not None:
d.areascal = areascal
if quality is not None:
d.quality = quality
if grouping is not None:
d.grouping = grouping
if d.grouping is not None:
if sherpa.utils.bool_cast(grouped):
d.group()
else:
d.ungroup()
# Update background here. bkg contains a new background;
# delete the old background (if any) and add the new background
# to the simulated data set, BEFORE simulating data, and BEFORE
# adding scaled background counts to the simulated data.
bkg_models = {}
if bkg is not None:
if bkg == 'model':
bkg_models = {1: self.get_bkg_source(id)}
else:
for bkg_id in d.background_ids:
d.delete_background(bkg_id)
self.set_bkg(id, bkg)
# Calculate the source model, and take a Poisson draw based on
# the source model. That becomes the simulated data.
m = self.get_model(id)
fake.fake_pha(d, m, is_source=False, add_bkgs=bkg is not None,
id=str(id), bkg_models=bkg_models)
d.name = 'faked'
###########################################################################
# PSF
###########################################################################
def load_psf(self, modelname, filename_or_model, *args, **kwargs):
kernel = filename_or_model
if isinstance(filename_or_model, string_types):
try:
kernel = self._eval_model_expression(filename_or_model)
except:
kernel = self.unpack_data(filename_or_model,
*args, **kwargs)
psf = sherpa.astro.instrument.PSFModel(modelname, kernel)
if isinstance(kernel, sherpa.models.Model):
self.freeze(kernel)
self._add_model_component(psf)
self._psf_models.append(psf)
load_psf.__doc__ = sherpa.ui.utils.Session.load_psf.__doc__
###########################################################################
# Models
###########################################################################
# DOC-NOTE: also in sherpa.utils
def set_full_model(self, id, model=None):
"""Define the convolved model expression for a data set.
The model expression created by `set_model` can be modified by
"instrumental effects", such as PSF, ARF and RMF for PHA data
sets, or a pile up model. These can be set automatically - for
example, the ARF and RMF can be set up when the source data is
loaded - or explicitly with calls to routines like `set_psf`,
`set_arf`, `set_rmf`, and `set_pileup_model`. The
`set_full_model` function is for when this is not sufficient,
and full control is needed. Examples of when this would be
needed include: if different PSF models should be applied to
different source components; some source components need to
include the ARF and RMF but some do not.
Parameters
----------
id : int or str, optional
The data set containing the source expression. If not given
then the default identifier is used, as returned by
`get_default_id`.
model : str or sherpa.models.Model object
This defines the model used to fit the data. It can be a
Python expression or a string version of it.
See Also
--------
fit : Fit one or more data sets.
set_bkg_full_model : Define the convolved background model expression for a PHA data set.
set_pileup_model : Include a model of the Chandra ACIS pile up when fitting PHA data.
set_psf : Add a PSF model to a data set.
set_model : Set the source model expression for a data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `model` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `model` parameters,
respectively.
Some functions - such as `plot_source` and `calc_energy_flux`
- may not work for model expressions created by
`set_full_model`.
Examples
--------
Extract the response - the combined RMF and ARF - for a PHA
data set - and apply it to a model (`xsphabs` * `xsapec`) and
then include a `powlaw1d` component that only includes the
RMF and a gaussian that has no instrumental response:
>>> rsp = get_response()
>>> rmf = get_rmf()
>>> smodel = xsphabs.galabs * xsapec.emiss
>>> bmodel = powlaw1d.pbgnd
>>> set_full_model(rsp(smodel) + rmf(bmodel) + gauss1d.iline)
Apply different PSFs to different components, as well as an
unconvolved component:
>>> load_psf("psf1", "psf1.fits")
>>> load_psf("psf2", "psf2.fits")
>>> smodel = psf1(gauss2d.src1) + psf2(beta2d.src2) + const2d.bgnd
>>> set_full_model("src", smodel)
"""
sherpa.ui.utils.Session.set_full_model(self, id, model)
if model is None:
id, model = model, id
data = self.get_data(id)
if isinstance(data, sherpa.astro.data.DataPHA):
model = self.get_model(id)
if data._responses:
instruments = (sherpa.astro.instrument.RSPModel,
sherpa.astro.instrument.RMFModel,
sherpa.astro.instrument.ARFModel,
sherpa.astro.instrument.MultiResponseSumModel,
sherpa.astro.instrument.PileupRMFModel)
do_warning = True
# if type(model) in instruments:
# if isinstance(model, instruments):
if sherpa.ui.utils._is_subclass(type(model), instruments):
do_warning = False
for part in model:
# if type(part) in instruments:
# if isinstance(part, instruments):
if sherpa.ui.utils._is_subclass(type(part), instruments):
do_warning = False
if do_warning:
warning("PHA source model '%s' \ndoes not" %
model.name +
" have an associated instrument model; " +
"consider using \nset_source() instead of" +
" set_full_model() to include associated " +
"\ninstrument automatically")
set_full_model.__doc__ = sherpa.ui.utils.Session.set_full_model.__doc__
def _add_convolution_models(self, id, data, model, is_source):
"""Add in "hidden" components to the model expression.
This includes PSF and pileup models and, for PHA data sets,
it adds in any background terms and the response function.
Notes
-----
If a background is added to a PHA data set using a vector,
rather than scalar, value, the code has to convert from
the model evaluation grid (e.g. keV or Angstroms) to the
scale array, which will be in channels. The only way to do
this is to apply the instrument response to the background
model separately from the source model, which will fail if
the instrument model is not linear, such as the jdpileup
model.
"""
id = self._fix_id(id)
# Add any convolution components from the sherpa.ui layer
model = super()._add_convolution_models(id, data, model, is_source)
# If we don't need to deal with DataPHA issues we can return
if not isinstance(data, sherpa.astro.data.DataPHA) or not is_source:
return model
return sherpa.astro.background.add_response(self, id, data, model)
def _get_response(self, id, pha):
"""Calculate the response for the dataset.
Parameter
---------
id : int or str
The identifier (this is required to be valid).
pha : sherpa.astro.data.DataPHA
The dataset
Returns
-------
response
The return value depends on whether an ARF, RMF, or pile up
model has been associated with the data set.
"""
pileup_model = self._pileup_models.get(id)
return pha.get_full_response(pileup_model)
def get_response(self, id=None, bkg_id=None):
"""Return the response information applied to a PHA data set.
For a PHA data set, the source model - created by `set_model`
- is modified by a model representing the instrumental effects
- such as the effective area of the mirror, the energy
resolution of the detector, and any model of pile up - which
is collectively known as the instrument response. The
`get_response` function returns the instrument response model.
Parameters
----------
id : int or str, optional
The data set containing the instrument response. If not given
then the default identifier is used, as returned by
`get_default_id`.
bkg_id : int or str, optional
If given, return the response for the given background
component, rather than the source.
Returns
-------
response
The return value depends on whether an ARF, RMF, or pile up
model has been associated with the data set.
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
See Also
--------
get_arf : Return the ARF associated with a PHA data set.
get_pileup_model : Return the pile up model for a data set.
get_rmf : Return the RMF associated with a PHA data set.
set_bkg_full_model : Define the convolved background model expression for a PHA data set.
set_full_model : Define the convolved model expression for a data set.
Examples
--------
Create an empty PHA data set, load in an ARF and RMF, and then
retrieve the response. The response is then used to model the
instrument response applied to a `powlaw1d` model component,
along with a constant component (`bgnd`) that does not
"pass through" the instrument response:
>>> dataspace1d(1, 1024, 1, dstype=DataPHA)
>>> load_arf('src.arf')
>>> load_rmf('src.rmf')
>>> rsp = get_response()
>>> set_full_model(rsp(powlaw1d.pl) + const1d.bgnd)
"""
id = self._fix_id(id)
if bkg_id is not None:
pha = self.get_bkg(id, bkg_id)
else:
pha = self._get_pha_data(id)
return self._get_response(id, pha)
def get_pileup_model(self, id=None):
"""Return the pile up model for a data set.
Return the pile up model set by a call to `set_pileup_model`.
Parameters
----------
id : int or str, optional
The data set containing the source expression. If not given
then the default identifier is used, as returned by
`get_default_id`.
Returns
-------
model : a `sherpa.astro.models.JDPileup` instance
Raises
------
sherpa.utils.err.IdentifierErr
If no pile up model has been set for the data set.
See Also
--------
delete_pileup_model : Delete the pile up model for a data set.
fit : Fit one or more data sets.
get_model : Return the model expression for a data set.
get_source : Return the source model expression for a data set.
sherpa.astro.models.JDPileup : The ACIS pile up model.
list_pileup_model_ids : List of all the data sets with a pile up model.
set_pileup_model : Include a model of the Chandra ACIS pile up when fitting PHA data.
Examples
--------
>>> jdp1 = get_pileup_model()
>>> jdp2 = get_pileup_model(2)
"""
return self._get_item(id, self._pileup_models, 'pileup model',
'has not been set')
def delete_pileup_model(self, id=None):
"""Delete the pile up model for a data set.
Remove the pile up model applied to a source model.
.. versionadded:: 4.12.2
Parameters
----------
id : int or str, optional
The data set. If not given then the
default identifier is used, as returned by `get_default_id`.
See Also
--------
get_pileup_model : Return the pile up model for a data set.
list_pileup_model_ids : List of all the data sets with a pile up model.
set_pileup_model : Add a pile up model to a data set.
Examples
--------
>>> delete_pileup_model()
>>> delete_pileup_model('core')
"""
id = self._fix_id(id)
self._pileup_models.pop(id, None)
def list_pileup_model_ids(self):
"""List of all the data sets with a pile up model.
.. versionadded:: 4.12.2
Returns
-------
ids : list of int or str
The identifiers for all the data sets which have a pile up
model set by `set_pileup_model`.
See Also
--------
list_data_ids : List the identifiers for the loaded data sets.
list_model_ids : List of all the data sets with a source expression.
set_pileup_model : Add a pile up model to a data set.
"""
keys = list(self._pileup_models.keys())
return sorted(keys, key=str)
# DOC-NOTE: should this be made a general function, since it
# presumably does not care about pileup, just adds the
# given model into the expression? Or is it PHA specific?
def set_pileup_model(self, id, model=None):
"""Include a model of the Chandra ACIS pile up when fitting PHA data.
Chandra observations of bright sources can be affected by
pileup, so that there is a non-linear correlation between
the source model and the predicted counts. This process can
be modelled by including the `jdpileup` model for a
data set, using the `set_pileup_model`.
Parameters
----------
id : int or str, optional
The data set containing the source expression. If not given
then the default identifier is used, as returned by
`get_default_id`.
model : an instance of the `sherpa.astro.models.JDPileup` class
See Also
--------
delete_pileup_model : Delete the pile up model for a data set.
fit : Fit one or more data sets.
get_pileup_model : Return the pile up model for a data set.
sherpa.models.model.JDPileup : The ACIS pile up model.
list_pileup_model_ids : List of all the data sets with a pile up model.
set_full_model : Define the convolved model expression for a data set.
set_model : Set the source model expression for a data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `model` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `model` parameters,
respectively.
This is a generic function, and can be used to model other
non-linear detector effects, but at present the only available
model is for the ACIS pile up provided by the jdpileup model.
Examples
--------
Plot up the model (an xsphabs model multiplied by a powlaw1d
component) and then overplot the same expression but including
the effects of pile up in the Chandra ACIS instrument:
>>> load_pha('src.pi')
>>> set_source(xsphabs.gal * powlaw1d.pl)
>>> plot_model()
>>> set_pileup_model(jdpileup.jpd)
>>> plot_model(overplot=True)
"""
if model is None:
id, model = model, id
if isinstance(model, string_types):
model = self._eval_model_expression(model)
self._set_item(id, model, self._pileup_models, sherpa.models.Model,
'model', 'a model object or model expression string')
def get_bkg_source(self, id=None, bkg_id=None):
"""Return the model expression for the background of a PHA data set.
This returns the model expression created by `set_bkg_model`
or `set_bkg_source`. It does not include any instrument
response.
Parameters
----------
id : int or str, optional
The data set to use. If not given then the default
identifier is used, as returned by `get_default_id`.
bkg_id : int or str, optional
Identify the background component to use, if there are
multiple ones associated with the data set.
Returns
-------
model : a sherpa.models.Model object
This can contain multiple model components. Changing
attributes of this model changes the model used by the data
set.
See Also
--------
delete_bkg_model : Delete the background model expression for a data set.
get_bkg_model : Return the model expression for the background of a PHA data set.
list_model_ids : List of all the data sets with a source expression.
set_bkg_model : Set the background model expression for a PHA data set.
show_bkg_model : Display the background model expression for a data set.
Examples
--------
Return the background model expression for the default data
set:
>>> bkg = get_bkg_source()
>>> len(bkg.pars)
2
"""
id = self._fix_id(id)
bkg_id = self._fix_background_id(id, bkg_id)
model = self._background_sources.get(id, {}).get(bkg_id)
if model is None:
raise ModelErr('nobkg', bkg_id, id)
return model
def get_bkg_model(self, id=None, bkg_id=None):
"""Return the model expression for the background of a PHA data set.
This returns the model expression for the background of a data
set, including the instrument response (e.g. ARF and RMF),
whether created automatically or explicitly, with
``set_bkg_full_model``.
Parameters
----------
id : int or str, optional
The data set to use. If not given then the default
identifier is used, as returned by ``get_default_id``.
bkg_id : int or str, optional
Identify the background component to use, if there are
multiple ones associated with the data set.
Returns
-------
instance
This can contain multiple model components and any
instrument response. Changing attributes of this model
changes the model used by the data set.
See Also
--------
delete_bkg_model : Delete the background model expression for a data set.
get_bkg_source : Return the model expression for the background of a PHA data set.
list_model_ids : List of all the data sets with a source expression.
set_bkg_model : Set the background model expression for a PHA data set.
set_bkg_full_model : Define the convolved background model expression for a PHA data set.
show_bkg_model : Display the background model expression for a data set.
Examples
--------
Return the background model expression for the default data
set, including any instrument response:
>>> bkg = get_bkg_model()
"""
id = self._fix_id(id)
bkg_id = self._fix_background_id(id, bkg_id)
mdl = self._background_models.get(id, {}).get(bkg_id)
if mdl is None:
is_source = True
src = self._background_sources.get(id, {}).get(bkg_id)
else:
is_source = False
src = mdl
if src is None:
raise ModelErr('nobkg', bkg_id, id)
if not is_source:
return src
# The background response is set by the DataPHA.set_background
# method (copying one over, if it does not exist), which means
# that the only way to get to this point is if the user has
# explicitly deleted the background response. In this case
# we error out.
#
bkg = self.get_bkg(id, bkg_id)
if len(bkg.response_ids) == 0:
raise DataErr('nobrsp', str(id), str(bkg_id))
resp = sherpa.astro.instrument.Response1D(bkg)
return resp(src)
def set_bkg_full_model(self, id, model=None, bkg_id=None):
"""Define the convolved background model expression for a PHA data set.
Set a model expression for a background data set in the same
way that `set_full_model` does for a source. This is for when
the background is being fitted simultaneously to the source,
rather than subtracted from it.
Parameters
----------
id : int or str, optional
The data set containing the source expression. If not given
then the default identifier is used, as returned by
`get_default_id`.
model : str or sherpa.models.Model object
This defines the model used to fit the data. It can be a
Python expression or a string version of it.
bkg_id : int or str, optional
The identifier for the background of the data set, in
cases where multiple backgrounds are provided.
See Also
--------
fit : Fit one or more data sets.
set_full_model : Define the convolved model expression for a data set.
set_pileup_model : Include a model of the Chandra ACIS pile up when fitting PHA data.
set_psf : Add a PSF model to a data set.
set_model : Set the source model expression for a data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `model` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `model` parameters,
respectively.
Some functions - such as `plot_bkg_source` - may not work for
model expressions created by `set_bkg_full_model`.
Examples
--------
The background is fit by two power laws - one that is passed
through the instrument response (``gbgnd``) and one that is not
(``pbgnd``). The source is modelled by ``xsphabs * galabs``,
together with the background model, scaled by the ratio of
area and time. Note that the background component in the
source expression uses the source response rather than
background response.
>>> rsp = get_response()
>>> bresp = get_response(bkg_id=1)
>>> bscale = get_bkg_scale()
>>> smodel = xsphabs.galabs * xsapec.emiss
>>> bmdl = brsp(powlaw1d.gbdng) + powlaw1d.pbgnd
>>> smdl = rsp(smodel) + bscale*(rsp(gbgnd) + pbgnd)
>>> set_full_model(smdl)
>>> set_bkg_full_model(bmdl)
"""
if model is None:
id, model = model, id
id = self._fix_id(id)
bkg_id = self._fix_background_id(id, bkg_id)
if isinstance(model, string_types):
model = self._eval_model_expression(model)
_check_type(model, sherpa.models.Model, 'model',
'a model object or model expression string')
self._background_models.setdefault(id, {})[bkg_id] = model
data = self.get_bkg(id, bkg_id)
if data.units != 'channel' and data._responses:
instruments = (sherpa.astro.instrument.RSPModel,
sherpa.astro.instrument.RMFModel,
sherpa.astro.instrument.ARFModel,
sherpa.astro.instrument.MultiResponseSumModel,
sherpa.astro.instrument.PileupRMFModel)
do_warning = True
# if type(model) in instruments:
# if isinstance(model, instruments):
if sherpa.ui.utils._is_subclass(type(model), instruments):
do_warning = False
for part in model:
# if type(part) in instruments:
# if isinstance(part, instruments):
if sherpa.ui.utils._is_subclass(type(part), instruments):
do_warning = False
if do_warning:
self.delete_bkg_model(id, bkg_id)
raise TypeError("PHA background source model '%s' \n" % model.name +
" does not have an associated instrument model;" +
" consider using\n set_bkg_source() instead of" +
" set_bkg_model() to include associated\n instrument" +
" automatically")
self._runparamprompt(model.pars)
# DOC-TODO: should probably explain more about how backgrounds are fit?
def set_bkg_model(self, id, model=None, bkg_id=None):
"""Set the background model expression for a PHA data set.
The background emission can be fit by a model, defined by the
`set_bkg_model` call, rather than subtracted from the data.
If the background is subtracted then the background model is
ignored when fitting the data.
Parameters
----------
id : int or str, optional
The data set containing the source expression. If not given
then the default identifier is used, as returned by
`get_default_id`.
model : str or sherpa.models.Model object
This defines the model used to fit the data. It can be a
Python expression or a string version of it.
bkg_id : int or str, optional
The identifier for the background of the data set, in
cases where multiple backgrounds are provided.
See Also
--------
delete_model : Delete the model expression from a data set.
fit : Fit one or more data sets.
integrate1d : Integrate 1D source expressions.
set_model : Set the model expression for a data set.
set_bkg_full_model : Define the convolved background model expression for a PHA data set.
show_bkg_model : Display the background model expression for a data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `model` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `model` parameters,
respectively.
The emission defined by the background model expression is
included in the fit to the source dataset, scaling by exposure
time and area size (given by the ratio of the background to
source BACKSCAL values). That is, if ``src_model`` and
``bkg_model`` represent the source and background model
expressions set by calls to `set_model` and `set_bkg_model`
respectively, the source data is fit by::
src_model + scale * bkg_model
where ``scale`` is the scaling factor.
PHA data sets will automatically apply the instrumental
response (ARF and RMF) to the background expression. For some
cases this is not useful - for example, when different
responses should be applied to different model components - in
which case `set_bkg_full_model` should be used instead.
Examples
--------
The background is model by a gaussian line (``gauss1d`` model
component called ``bline``) together with an absorbed polynomial
(the ``bgnd`` component). The absorbing component (``gal``) is
also used in the source expression.
>>> set_model(xsphabs.gal*powlaw1d.pl)
>>> set_bkg_model(gauss1d.bline + gal*polynom1d.bgnd)
In this example, the default data set has two background
estimates, so models are set for both components. The same
model is applied to both, except that the relative
normalisations are allowed to vary (by inclusion of the
``scale`` component).
>>> bmodel = xsphabs.gabs * powlaw1d.pl
>>> set_bkg_model(2, bmodel)
>>> set_bkg_model(2, bmodel * const1d.scale, bkg_id=2)
"""
if model is None:
id, model = model, id
id = self._fix_id(id)
bkg_id = self._fix_background_id(id, bkg_id)
if isinstance(model, string_types):
model = self._eval_model_expression(model)
_check_type(model, sherpa.models.Model, 'model',
'a model object or model expression string')
self._background_sources.setdefault(id, {})[bkg_id] = model
self._runparamprompt(model.pars)
# Delete any previous model set with set_full_bkg_model()
bkg_mdl = self._background_models.get(id, {}).pop(bkg_id, None)
if bkg_mdl is not None:
warning("Clearing background convolved model\n'%s'\n" %
(bkg_mdl.name) + "for dataset %s background %s" %
(str(id), str(bkg_id)))
set_bkg_source = set_bkg_model
def delete_bkg_model(self, id=None, bkg_id=None):
"""Delete the background model expression for a data set.
This removes the model expression, created by `set_bkg_model`,
for the background component of a data set. It does not delete
the components of the expression, or remove the models for any
other background components or the source of the data set.
Parameters
----------
id : int or str, optional
The data set containing the source expression. If not given
then the default identifier is used, as returned by
`get_default_id`.
bkg_id : int or string, optional
The identifier for the background component to use.
See Also
--------
clean : Clear all stored session data.
delete_model : Delete the model expression for a data set.
get_default_id : Return the default data set identifier.
list_bkg_ids : List all the background identifiers for a data set.
set_model : Set the source model expression for a data set.
show_model : Display the source model expression for a data set.
Examples
--------
Remove the background model expression for the default data set:
>>> delete_bkg_model()
Remove the model expression for the background component
labelled 'down' for the data set with the identifier 'src':
>>> delete_bkg_model('src', 'down')
"""
id = self._fix_id(id)
bkg_id = self._fix_background_id(id, bkg_id)
# remove dependency of having a loaded PHA dataset at the time
# of bkg model init.
# bkg_id = self._get_pha_data(id)._fix_background_id(bkg_id)
self._background_models.get(id, {}).pop(bkg_id, None)
self._background_sources.get(id, {}).pop(bkg_id, None)
def _read_user_model(self, filename, *args, **kwargs):
x = None
y = None
try:
data = self.unpack_ascii(filename, *args, **kwargs)
x = data.get_x()
y = data.get_y()
# we have to check for the case of a *single* column in an ascii file
# extract the single array from the read and bypass the dataset
except TypeError:
y = sherpa.astro.io.backend.get_ascii_data(filename, *args,
**kwargs)[1].pop()
except:
try:
data = self.unpack_table(filename, *args, **kwargs)
x = data.get_x()
y = data.get_y()
# we have to check for the case of a *single* column in a
# fits table
# extract the single array from the read and bypass the dataset
except TypeError:
y = sherpa.astro.io.backend.get_table_data(filename, *args,
**kwargs)[1].pop()
except:
# unpack_data doesn't include a call to try
# getting data from image, so try that here.
data = self.unpack_image(filename, *args, **kwargs)
# x = data.get_x()
y = data.get_y()
return (x, y)
def load_xstable_model(self, modelname, filename, etable=False):
"""Load a XSPEC table model.
Create an additive ('atable', [1]_), multiplicative
('mtable', [2]_), or exponential ('etable', [3]_) XSPEC
table model component. These models may have multiple model
parameters.
.. versionchanged:: 4.14.0
The etable argument has been added to allow exponential table
models to be used.
Parameters
----------
modelname : str
The identifier for this model component.
filename : str
The name of the FITS file containing the data, which should
match the XSPEC table model definition [4]_.
etable : bool, optional
Set if this is an etable (as there's no way to determine this
from the file itself). Defaults to False.
Raises
------
sherpa.utils.err.ImportErr
If XSPEC support is not enabled.
See Also
--------
load_conv : Load a 1D convolution model.
load_psf : Create a PSF model
load_template_model : Load a set of templates and use it as a model component.
load_table_model : Load tabular or image data and use it as a model component.
set_model : Set the source model expression for a data set.
set_full_model : Define the convolved model expression for a data set.
Notes
-----
NASA's HEASARC site contains a link to community-provided
XSPEC table models [5]_.
References
----------
.. [1] http://heasarc.gsfc.nasa.gov/docs/xanadu/xspec/manual/XSmodelAtable.html
.. [2] http://heasarc.gsfc.nasa.gov/docs/xanadu/xspec/manual/XSmodelMtable.html
.. [3] http://heasarc.gsfc.nasa.gov/docs/xanadu/xspec/manual/XSmodelEtable.html
.. [4] http://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_92_009/ogip_92_009.html
.. [5] https://heasarc.gsfc.nasa.gov/xanadu/xspec/newmodels.html
Examples
--------
Load in the XSPEC table model from the file 'bbrefl_1xsolar.fits'
and create a model component labelled 'xtbl', which is then
used in a source expression:
>>> load_xstable_model('xtbl', 'bbrefl_1xsolar.fits')
>>> set_source(xsphabs.gal * xtbl)
>>> print(xtbl)
Load in an XSPEC etable model:
>>> load_xstable_model('etbl', 'etable.mod', etable=True)
"""
try:
from sherpa.astro import xspec
except ImportError as exc:
# TODO: what is the best error to raise here?
raise ImportErr('notsupported', 'XSPEC') from exc
tablemodel = xspec.read_xstable_model(modelname, filename, etable=etable)
self._tbl_models.append(tablemodel)
self._add_model_component(tablemodel)
# also in sherpa.utils
# DOC-NOTE: can filename be a crate/hdulist?
# DOC-TODO: how to describe the supported args/kwargs (not just for this function)?
def load_table_model(self, modelname, filename,
method=sherpa.utils.linear_interp, *args, **kwargs):
# pylint: disable=W1113
"""Load tabular or image data and use it as a model component.
.. note:: Deprecated in Sherpa 4.9
The new `load_xstable_model` routine should be used for
loading XSPEC table model files. Support for these files
will be removed from `load_table_model` in the next
release.
A table model is defined on a grid of points which is
interpolated onto the independent axis of the data set. The
model will have at least one parameter (the amplitude, or
scaling factor to multiply the data by), but may have more
(if X-Spec table models are used).
Parameters
----------
modelname : str
The identifier for this table model.
filename : str
The name of the file containing the data, which should
contain two columns, which are the x and y values for
the data, or be an image.
method : func
The interpolation method to use to map the input data onto
the coordinate grid of the data set. Linear,
nearest-neighbor, and polynomial schemes are provided in
the sherpa.utils module.
args
Arguments for reading in the data.
kwargs
Keyword arguments for reading in the data.
See Also
--------
load_conv : Load a 1D convolution model.
load_psf : Create a PSF model
load_template_model : Load a set of templates and use it as a model component.
load_xstable_model : Load a XSPEC table model.
set_model : Set the source model expression for a data set.
set_full_model : Define the convolved model expression for a data set.
Notes
-----
Examples of interpolation schemes provided by `sherpa.utils`
are: `linear_interp`, `nearest_interp`, `neville`, and
`neville2d`.
Examples
--------
Load in the data from filt.fits and use it to multiply
the source model (a power law and a gaussian). Allow
the amplitude for the table model to vary between 1
and 1e6, starting at 1e3.
>>> load_table_model('filt', 'filt.fits')
>>> set_source(filt * (powlaw1d.pl + gauss1d.gline))
>>> set_par(filt.ampl, 1e3, min=1, max=1e6)
Load in an image ("broad.img") and use the pixel values as a
model component for data set "img":
>>> load_table_model('emap', 'broad.img')
>>> set_source('img', emap * gauss2d)
"""
tablemodel = TableModel(modelname)
# interpolation method
tablemodel.method = method
tablemodel.filename = filename
try:
if not sherpa.utils.is_binary_file(filename):
# TODO: use a Sherpa exception
raise Exception("Not a FITS file")
self.load_xstable_model(modelname, filename)
warnings.warn('Use load_xstable_model to load XSPEC table models',
DeprecationWarning)
return
except Exception:
x = None
y = None
try:
x, y = self._read_user_model(filename, *args, **kwargs)
except:
# Fall back to reading plain ASCII, if no other
# more sophisticated I/O backend loaded (such as
# pyfits or crates) SMD 05/29/13
data = sherpa.io.read_data(filename, ncols=2)
x = data.x
y = data.y
tablemodel.load(x, y)
self._tbl_models.append(tablemodel)
self._add_model_component(tablemodel)
# ## also in sherpa.utils
# DOC-TODO: how to describe *args/**kwargs
# DOC-TODO: how is the _y value used if set
def load_user_model(self, func, modelname, filename=None, *args, **kwargs):
# pylint: disable=W1113
"""Create a user-defined model.
Assign a name to a function; this name can then be used as any
other name of a model component, either in a source expression
- such as with `set_model` - or to change a parameter
value. The `add_user_pars` function should be called after
`load_user_model` to set up the parameter names and
defaults.
Parameters
----------
func : func
The function that evaluates the model.
modelname : str
The name to use to refer to the model component.
filename : str, optional
Set this to include data from this file in the model. The
file should contain two columns, and the second column is
stored in the ``_y`` attribute of the model.
args
Arguments for reading in the data from `filename`, if set.
See `load_table` and `load_image` for more information.
kwargs
Keyword arguments for reading in the data from `filename`,
if set. See `load_table` and `load_image` for more information.
See Also
--------
add_model : Create a user-defined model class.
add_user_pars : Add parameter information to a user model.
load_image : Load an image as a data set.
load_table : Load a FITS binary file as a data set.
load_table_model : Load tabular data and use it as a model component.
load_template_model : Load a set of templates and use it as a model component.
set_model : Set the source model expression for a data set.
Notes
-----
The `load_user_model` function is designed to make it easy to
add a model, but the interface is not the same as the existing
models (such as having to call both `load_user_model` and
`add_user_pars` for each new instance). The `add_model`
function is used to add a model as a Python class, which is
more work to set up, but then acts the same way as the
existing models.
The function used for the model depends on the dimensions of
the data. For a 1D model, the signature is::
def func1d(pars, x, xhi=None):
where, if xhi is not None, then the dataset is binned and the
x argument is the low edge of each bin. The pars argument is
the parameter array - the names, defaults, and limits can be
set with `add_user_pars` - and should not be changed. The
return value is an array the same size as x.
For 2D models, the signature is::
def func2d(pars, x0, x1, x0hi=None, x1hi=None):
There is no way using this interface to indicate that the
model is for 1D or 2D data.
Examples
--------
Create a two-parameter model of the form "y = mx + c",
where the intercept is the first parameter and the slope the
second, set the parameter names and default values, then
use it in a source expression:
>>> def func1d(pars, x, xhi=None):
... if xhi is not None:
... x = (x + xhi)/2
... return x * pars[1] + pars[0]
...
>>> load_user_model(func1d, "myfunc")
>>> add_user_pars(myfunc, ["c", "m"], [0, 1])
>>> set_source(myfunc + gauss1d.gline)
"""
usermodel = sherpa.models.UserModel(modelname)
usermodel.calc = func
usermodel._file = filename
if filename is not None:
_, usermodel._y = self._read_user_model(filename, *args, **kwargs)
self._add_model_component(usermodel)
###########################################################################
# Fitting
###########################################################################
# TODO: change bkg_ids default to None or some other "less-dangerous" value
def _add_extra_data_and_models(self, ids, datasets, models, bkg_ids={}):
for id, d in zip(ids, datasets):
if isinstance(d, sherpa.astro.data.DataPHA):
bkg_models = self._background_models.get(id, {})
bkg_srcs = self._background_sources.get(id, {})
if d.subtracted:
if (bkg_models or bkg_srcs):
warning(('data set %r is background-subtracted; ' +
'background models will be ignored') % id)
elif not (bkg_models or bkg_srcs):
if d.background_ids and self._current_stat.name != 'wstat':
warning(('data set %r has associated backgrounds, ' +
'but they have not been subtracted, ' +
'nor have background models been set') % id)
else:
bkg_ids[id] = []
for bkg_id in d.background_ids:
if not (bkg_id in bkg_models or bkg_id in bkg_srcs):
raise ModelErr('nobkg', bkg_id, id)
bkg = d.get_background(bkg_id)
datasets.append(bkg)
bkg_data = d
if len(bkg.response_ids) != 0:
bkg_data = bkg
bkg_model = bkg_models.get(bkg_id, None)
bkg_src = bkg_srcs.get(bkg_id, None)
if bkg_model is None and bkg_src is not None:
resp = sherpa.astro.instrument.Response1D(bkg_data)
bkg_model = resp(bkg_src)
models.append(bkg_model)
bkg_ids[id].append(bkg_id)
def _prepare_bkg_fit(self, id, otherids=()):
# prep data ids for fitting
ids = self._get_fit_ids(id, otherids)
# Gather up lists of data objects and models to fit
# to them. Add to lists *only* if there actually is
# a model to fit. E.g., if data sets 1 and 2 exist,
# but only data set 1 has a model, then "fit all" is
# understood to mean "fit 1". If both data sets have
# models, then "fit all" means "fit 1 and 2 together".
datasets = []
models = []
fit_to_ids = []
for i in ids:
# get PHA data and associated background models by id
data = self._get_pha_data(i)
bkg_models = self._background_models.get(i, {})
bkg_sources = self._background_sources.get(i, {})
for bi in data.background_ids:
mod = None
ds = self.get_bkg(i, bi)
if bi in bkg_models or bi in bkg_sources:
mod = self.get_bkg_model(i, bi)
if mod is not None:
datasets.append(ds)
models.append(mod)
fit_to_ids.append(i)
# If no data sets have models assigned to them, stop now.
if len(models) < 1:
raise IdentifierErr("nomodels")
return fit_to_ids, datasets, models
def _get_bkg_fit(self, id, otherids=(), estmethod=None, numcores=1):
fit_to_ids, datasets, models = self._prepare_bkg_fit(id, otherids)
# Do not add backgrounds to backgrounds.
# self._add_extra_data_and_models(fit_to_ids, datasets, models)
fit_to_ids = tuple(fit_to_ids)
f = self._get_fit_obj(datasets, models, estmethod, numcores)
return fit_to_ids, f
# also in sherpa.utils
# DOC-TODO: existing docs suggest that bkg_only can be set, but looking
# at the code it is always set to False.
#
def fit(self, id=None, *otherids, **kwargs):
# pylint: disable=W1113
"""Fit a model to one or more data sets.
Use forward fitting to find the best-fit model to one or more
data sets, given the chosen statistic and optimization
method. The fit proceeds until the results converge or the
number of iterations exceeds the maximum value (these values
can be changed with `set_method_opt`). An iterative scheme can
be added using `set_iter_method` to try and improve the
fit. The final fit results are displayed to the screen and can
be retrieved with `get_fit_results`.
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then
all data sets with an associated model are fit simultaneously.
*otherids : sequence of int or str, optional
Other data sets to use in the calculation.
outfile : str, optional
If set, then the fit results will be written to a file with
this name. The file contains the per-iteration fit results.
clobber : bool, optional
This flag controls whether an existing file can be
overwritten (``True``) or if it raises an exception (``False``,
the default setting).
Raises
------
sherpa.utils.err.FitErr
If `filename` already exists and `clobber` is ``False``.
See Also
--------
conf : Estimate the confidence intervals using the confidence method.
contour_fit : Contour the fit to a data set.
covar : Estimate the confidence intervals using the confidence method.
fit_bkg : Fit a model to one or more background PHA data sets.
freeze : Fix model parameters so they are not changed by a fit.
get_fit_results : Return the results of the last fit.
plot_fit : Plot the fit results (data, model) for a data set.
image_fit : Display the data, model, and residuals for a data set in the image viewer.
set_stat : Set the statistical method.
set_method : Change the optimization method.
set_method_opt : Change an option of the current optimization method.
set_bkg_full_model : Define the convolved background model expression for a PHA data set.
set_bkg_model : Set the background model expression for a PHA data set.
set_full_model : Define the convolved model expression for a data set.
set_iter_method : Set the iterative-fitting scheme used in the fit.
set_model : Set the model expression for a data set.
show_fit : Summarize the fit results.
thaw : Allow model parameters to be varied during a fit.
Notes
-----
For PHA data sets with background components, the function
will fit any background components for which a background
model has been created (rather than being subtracted). The
`fit_bkg` function can be used to fit models to just the
background data.
Examples
--------
Simultaneously fit all data sets with models and then
store the results in the variable fres:
>>> fit()
>>> fres = get_fit_results()
Fit just the data set 'img':
>>> fit('img')
Simultaneously fit data sets 1, 2, and 3:
>>> fit(1, 2, 3)
Fit data set 'jet' and write the fit results to the text file
'jet.fit', over-writing it if it already exists:
>>> fit('jet', outfile='jet.fit', clobber=True)
"""
kwargs['bkg_only'] = False
self._fit(id, *otherids, **kwargs)
def fit_bkg(self, id=None, *otherids, **kwargs):
# pylint: disable=W1113
"""Fit a model to one or more background PHA data sets.
Fit only the backgound components of PHA data sets. This can
be used to find the best-fit background parameters, which can
then be frozen before fitting the data, or to ensure that
these parameters are well defined before performing a
simultaneous source and background fit.
Parameters
----------
id : int or str, optional
The data set that provides the background data. If not
given then all data sets with an associated background
model are fit simultaneously.
*otherids : sequence of int or str, optional
Other data sets to use in the calculation.
outfile : str, optional
If set, then the fit results will be written to a file with
this name. The file contains the per-iteration fit results.
clobber : bool, optional
This flag controls whether an existing file can be
overwritten (``True``) or if it raises an exception (``False``,
the default setting).
Raises
------
sherpa.utils.err.FitErr
If `filename` already exists and `clobber` is ``False``.
See Also
--------
conf : Estimate the confidence intervals using the confidence method.
contour_fit : Contour the fit to a data set.
covar : Estimate the confidence intervals using the confidence method.
fit : Fit a model to one or more data sets.
freeze : Fix model parameters so they are not changed by a fit.
get_fit_results : Return the results of the last fit.
plot_fit : Plot the fit results (data, model) for a data set.
image_fit : Display the data, model, and residuals for a data set in the image viewer.
set_stat : Set the statistical method.
set_method : Change the optimization method.
set_method_opt : Change an option of the current optimization method.
set_bkg_full_model : Define the convolved background model expression for a PHA data set.
set_bkg_model : Set the background model expression for a PHA data set.
set_full_model : Define the convolved model expression for a data set.
set_iter_method : Set the iterative-fitting scheme used in the fit.
set_model : Set the model expression for a data set.
show_bkg_source : Display the background model expression for a data set.
show_bkg_model : Display the background model expression used to fit a data set.
show_fit : Summarize the fit results.
thaw : Allow model parameters to be varied during a fit.
Notes
-----
This is only for PHA data sets where the background is being
modelled, rather than subtracted from the data.
Examples
--------
Simultaneously fit all background data sets with models and
then store the results in the variable fres:
>>> fit_bkg()
>>> fres = get_fit_results()
Fit the background for data sets 1 and 2, then do a
simultaneous fit to the source and background data sets:
>>> fit_bkg(1,2)
>>> fit(1,2)
"""
kwargs['bkg_only'] = True
self._fit(id, *otherids, **kwargs)
def _fit(self, id=None, *otherids, **kwargs):
# pylint: disable=W1113
ids = f = None
fit_bkg = False
if 'bkg_only' in kwargs and kwargs.pop('bkg_only'):
fit_bkg = True
# validate the kwds to f.fit() so user typos do not
# result in regular fit
# valid_keys = sherpa.utils.get_keyword_names(sherpa.fit.Fit.fit)
valid_keys = ('outfile', 'clobber', 'filter_nan', 'cache', 'numcores')
for key in kwargs.keys():
if key not in valid_keys:
raise TypeError("unknown keyword argument: '%s'" % key)
numcores = kwargs.get('numcores', 1)
if fit_bkg:
ids, f = self._get_bkg_fit(id, otherids, numcores=numcores)
else:
ids, f = self._get_fit(id, otherids, numcores=numcores)
if 'filter_nan' in kwargs and kwargs.pop('filter_nan'):
for i in ids:
self.get_data(i).mask = self.get_data(
i).mask & numpy.isfinite(self.get_data(i).get_x())
res = f.fit(**kwargs)
res.datasets = ids
self._fit_results = res
info(res.format())
def _get_stat_info(self):
ids, datasets, models = self._prepare_fit(None)
extra_ids = {}
self._add_extra_data_and_models(ids, datasets, models, extra_ids)
output = []
nids = len(ids)
if len(datasets) > 1:
bkg_datasets = datasets[nids:]
bkg_models = models[nids:]
jj = 0
for id, d, m in zip(ids, datasets[:nids], models[:nids]):
f = sherpa.fit.Fit(d, m, self._current_stat)
statinfo = f.calc_stat_info()
statinfo.name = 'Dataset %s' % (str(id))
statinfo.ids = (id,)
output.append(statinfo)
bkg_ids = extra_ids.get(id, ())
nbkg_ids = len(bkg_ids)
idx_lo = jj * nbkg_ids
idx_hi = idx_lo + nbkg_ids
for bkg_id, bkg, bkg_mdl in zip(bkg_ids,
bkg_datasets[idx_lo:idx_hi],
bkg_models[idx_lo:idx_hi]):
bkg_f = sherpa.fit.Fit(bkg, bkg_mdl, self._current_stat)
statinfo = bkg_f.calc_stat_info()
statinfo.name = ("Background %s for Dataset %s" %
(str(bkg_id), str(id)))
statinfo.ids = (id,)
statinfo.bkg_ids = (bkg_id,)
output.append(statinfo)
jj += 1
f = self._get_fit_obj(datasets, models, None)
statinfo = f.calc_stat_info()
if len(ids) == 1:
statinfo.name = 'Dataset %s' % str(ids)
else:
statinfo.name = 'Datasets %s' % str(ids).strip("()")
statinfo.ids = ids
output.append(statinfo)
return output
###########################################################################
# Plotting
###########################################################################
def get_data_plot(self, id=None, recalc=True):
try:
d = self.get_data(id)
except IdentifierErr:
return super().get_data_plot(id, recalc=recalc)
if isinstance(d, sherpa.astro.data.DataPHA):
plotobj = self._dataphaplot
if recalc:
plotobj.prepare(d, self.get_stat())
return plotobj
return super().get_data_plot(id, recalc=recalc)
get_data_plot.__doc__ = sherpa.ui.utils.Session.get_data_plot.__doc__
def get_model_plot(self, id=None, recalc=True):
try:
d = self.get_data(id)
except IdentifierErr:
return super().get_model_plot(id, recalc=recalc)
if isinstance(d, sherpa.astro.data.DataPHA):
plotobj = self._modelhisto
if recalc:
plotobj.prepare(d, self.get_model(id), self.get_stat())
return plotobj
return super().get_model_plot(id, recalc=recalc)
get_model_plot.__doc__ = sherpa.ui.utils.Session.get_model_plot.__doc__
# also in sherpa.utils, but without the lo/hi arguments
def get_source_plot(self, id=None, lo=None, hi=None, recalc=True):
"""Return the data used by plot_source.
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
lo : number, optional
The low value to plot (only used for PHA data sets).
hi : number, optional
The high value to plot (only use for PHA data sets).
recalc : bool, optional
If ``False`` then the results from the last call to
`plot_source` (or `get_source_plot`) are returned,
otherwise the data is re-generated.
Returns
-------
instance
An object representing the data used to create the plot by
`plot_source`. The return value depends on the data
set (e.g. PHA, 1D binned, 1D un-binned). If ``lo`` or ``hi``
were set then the ``mask`` attribute of the object can be
used to apply the filter to the ``xlo``, ``xhi``, and ``y``
attributes.
See Also
--------
get_model_plot : Return the data used by plot_model.
plot_model : Plot the model for a data set.
plot_source : Plot the source expression for a data set.
Examples
--------
Retrieve the source plot information for the default data
set and then display it:
>>> splot = get_source_plot()
>>> print(splot)
Return the plot data for data set 2, and then use it to create
a plot:
>>> s2 = get_source_plot(2)
>>> s2.plot()
Retrieve the source plots for the 0.5 to 7 range of the
'jet' and 'core' data sets and display them on the same plot:
>>> splot1 = get_source_plot(id='jet', lo=0.5, hi=7)
>>> splot2 = get_source_plot(id='core', lo=0.5, hi=7)
>>> splot1.plot()
>>> splot2.overplot()
Access the plot data (for a PHA data set) and select only the
bins corresponding to the 2-7 keV range defined in the call:
>>> splot = get_source_plot(lo=2, hi=7)
>>> xlo = splot.xlo[splot.mask]
>>> xhi = splot.xhi[splot.mask]
>>> y = splot.y[splot.mask]
For a PHA data set, the units on both the X and Y axes of the
plot are controlled by the `set_analysis` command. In this
case the Y axis will be in units of photon/s/cm^2/keV x Energy
and the X axis in keV:
>>> set_analysis('energy', factor=1)
>>> splot = get_source_plot()
>>> print(splot)
"""
try:
d = self.get_data(id)
except IdentifierErr as ie:
if recalc:
raise ie
d = None
if isinstance(d, sherpa.astro.data.DataPHA):
plotobj = self._astrosourceplot
if recalc:
plotobj.prepare(d, self.get_source(id), lo=lo, hi=hi)
return plotobj
return super().get_source_plot(id, recalc=recalc)
def get_fit_plot(self, id=None, recalc=True):
plotobj = self._fitplot
if not recalc:
return plotobj
d = self.get_data(id)
if isinstance(d, sherpa.astro.data.DataPHA):
dataobj = self.get_data_plot(id, recalc=recalc)
# We don't use get_model_plot as that uses the ungrouped data
# modelobj = self.get_model_plot(id)
# but we do want to use a histogram plot, not _modelplot.
# modelobj = self._modelplot
# Should this object be stored in self? There's
# no way to get it by API (apart from get_fit_plot).
#
modelobj = sherpa.astro.plot.ModelPHAHistogram()
modelobj.prepare(d, self.get_model(id),
self.get_stat())
plotobj.prepare(dataobj, modelobj)
return plotobj
return super().get_fit_plot(id, recalc=recalc)
get_fit_plot.__doc__ = sherpa.ui.utils.Session.get_fit_plot.__doc__
def get_model_component_plot(self, id, model=None, recalc=True):
"""Return the data used to create the model-component plot.
For PHA data, the response model is automatically added by the
routine unless the model contains a response.
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
model : str or sherpa.models.model.Model instance
The component to use (the name, if a string).
recalc : bool, optional
If ``False`` then the results from the last call to
`plot_model_component` (or `get_model_component_plot`)
are returned, otherwise the data is re-generated.
Returns
-------
instance
An object representing the data used to create the plot by
`plot_model_component`. The return value depends on the
data set (e.g. PHA, 1D binned, or 1D un-binned).
See Also
--------
get_model_plot : Return the data used to create the model plot.
plot_model : Plot the model for a data set.
plot_model_component : Plot a component of the model for a data set.
Notes
-----
The function does not follow the normal Python standards for
parameter use, since it is designed for easy interactive use.
When called with a single un-named argument, it is taken to be
the `model` parameter. If given two un-named arguments, then
they are interpreted as the `id` and `model` parameters,
respectively.
Examples
--------
Return the plot data for the ``pl`` component used in the
default data set:
>>> cplot = get_model_component_plot(pl)
Return the full source model (``fplot``) and then for the
components ``gal * pl`` and ``gal * gline``, for the data set
'jet':
>>> fmodel = xsphabs.gal * (powlaw1d.pl + gauss1d.gline)
>>> set_source('jet', fmodel)
>>> fit('jet')
>>> fplot = get_model_plot('jet')
>>> plot1 = get_model_component_plot('jet', pl*gal)
>>> plot2 = get_model_component_plot('jet', gline*gal)
For PHA data sets the response is automatically added, but it
can also be manually specified. In the following plot1 and
plot2 contain the same data:
>>> plot1 = get_model_component_plot(pl)
>>> rsp = get_response()
>>> plot2 = get_model_component_plot(rsp(pl))
"""
if model is None:
id, model = model, id
model = self._check_model(model)
try:
d = self.get_data(id)
except IdentifierErr as ie:
if recalc:
raise ie
d = None
if isinstance(d, sherpa.astro.data.DataPHA):
plotobj = self._astrocompmdlplot
if recalc:
if not has_pha_response(model):
try:
rsp = self.get_response(id) # TODO: bkg_id?
model = rsp(model)
except DataErr:
# no response
pass
plotobj.prepare(d, model, self.get_stat())
return plotobj
return super().get_model_component_plot(id, model=model, recalc=recalc)
# copy doc string from sherpa.utils
def get_source_component_plot(self, id, model=None, recalc=True):
if model is None:
id, model = model, id
model = self._check_model(model)
try:
d = self.get_data(id)
except IdentifierErr as ie:
if recalc:
raise ie
d = None
if isinstance(d, sherpa.astro.data.DataPHA):
plotobj = self._astrocompsrcplot
if recalc:
plotobj.prepare(d, model, self.get_stat())
return plotobj
return super().get_source_component_plot(id, model=model, recalc=recalc)
get_source_component_plot.__doc__ = sherpa.ui.utils.Session.get_source_component_plot.__doc__
def get_pvalue_plot(self, null_model=None, alt_model=None, conv_model=None,
id=1, otherids=(), num=500, bins=25, numcores=None,
recalc=False):
if recalc and conv_model is None and \
isinstance(self.get_data(id), sherpa.astro.data.DataPHA):
conv_model = self.get_response(id)
return super().get_pvalue_plot(null_model=null_model, alt_model=alt_model,
conv_model=conv_model, id=id, otherids=otherids,
num=num, bins=bins, numcores=numcores,
recalc=recalc)
get_pvalue_plot.__doc__ = sherpa.ui.utils.Session.get_pvalue_plot.__doc__
def get_order_plot(self, id=None, orders=None, recalc=True):
"""Return the data used by plot_order.
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
orders : optional
Which response to use. The argument can be a scalar or
array, in which case multiple curves will be displayed.
The default is to use all orders.
recalc : bool, optional
If ``False`` then the results from the last call to
`plot_order` (or `get_order_plot`) are returned, otherwise
the data is re-generated.
Returns
-------
data : a `sherpa.astro.plot.OrderPlot` instance
An object representing the data used to create the plot by
`plot_order`.
See Also
--------
get_default_id : Return the default data set identifier.
plot_order : Plot the model for a data set convolved by the given response.
Examples
--------
Retrieve the plot information for order 1 of the default data set
and then display it:
>>> oplot = get_order_plot(orders=1)
>>> print(oplot)
Return the plot data for orders 1 and 2 of data set 'jet', plot the
first and then overplot the second:
>>> plots = get_order_plot('jet', orders=[1, 2])
>>> plots[0].plot()
>>> plots[1].overplot()
"""
plotobj = self._orderplot
if recalc:
plotobj.prepare(self._get_pha_data(id),
self.get_model(id), orders=orders)
return plotobj
def get_arf_plot(self, id=None, resp_id=None, recalc=True):
"""Return the data used by plot_arf.
Parameters
----------
id : int or str, optional
The data set with an ARF. If not given then the default
identifier is used, as returned by `get_default_id`.
resp_id : int or str, optional
Which ARF to use in the case that multiple ARFs are
associated with a data set. The default is ``None``,
which means the first one.
recalc : bool, optional
If ``False`` then the results from the last call to
`plot_arf` (or `get_arf_plot`) are returned, otherwise
the data is re-generated.
Returns
-------
arf_plot : a `sherpa.astro.plot.ARFPlot` instance
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
See Also
--------
plot : Create one or more plot types.
plot_arf : Plot the ARF associated with a data set.
Examples
--------
Return the ARF plot data for the default data set:
>>> aplot = get_arf_plot()
>>> aplot.y.max()
676.95794677734375
Return the ARF data for the second response of the
data set labelled 'histate', and then plot it:
>>> aplot = get_arf_plot('histate', 2)
>>> aplot.plot()
"""
plotobj = self._arfplot
if not recalc:
return plotobj
id = self._fix_id(id)
arf = self._get_pha_data(id).get_arf(resp_id)
if arf is None:
raise DataErr('noarf', id)
plotobj.prepare(arf, self._get_pha_data(id))
return plotobj
def get_bkg_fit_plot(self, id=None, bkg_id=None, recalc=True):
"""Return the data used by plot_bkg_fit.
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
bkg_id : int or str, optional
Identify the background component to use, if there are
multiple ones associated with the data set.
recalc : bool, optional
If ``False`` then the results from the last call to
`plot_bkg_fit` (or `get_bkg_fit_plot`) are returned,
otherwise the data is re-generated.
Returns
-------
model : a `sherpa.astro.plot.BkgFitPlot` instance
An object representing the data used to create the plot by
`plot_bkg_fit`.
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
sherpa.utils.err.IdentifierErr
If the ``bkg_id`` parameter is invalid.
sherpa.utils.err.ModelErr
If no model expression has been created for the background
data.
See Also
--------
get_bkg_plot : Return the data used by plot_bkg.
get_bkg_model_plot : Return the data used by plot_bkg_model.
plot_bkg_fit : Plot the fit results (data, model) for the background of a PHA data set.
Examples
--------
Create the data needed to create the "fit plot" for the background
of the default data set and display it:
>>> bplot = get_bkg_fit_plot()
>>> print(bplot)
Return the plot data for data set 2, and then use it to create
a plot:
>>> b2 = get_bkg_fit_plot(2)
>>> b2.plot()
The fit plot consists of a combination of a data plot and a
model plot, which are captured in the `dataplot` and `modelplot`
attributes of the return value. These can be used to display
the plots individually, such as:
>>> b2.dataplot.plot()
>>> b2.modelplot.plot()
or, to combine the two:
>>> b2.dataplot.plot()
>>> b2.modelplot.overplot()
Return the plot data for the second background component to the
"jet" data set:
>>> bplot = get_bkg_fit_plot('jet', bkg_id=2)
"""
plotobj = self._bkgfitplot
if not recalc:
return plotobj
dataobj = self.get_bkg_plot(id, bkg_id, recalc=recalc)
# We don't use get_bkg_model_plot as that uses the ungrouped data
# modelobj = self.get_bkg_model_plot(id, bkg_id, recalc=recalc)
modelobj = self._bkgmodelplot
modelobj.prepare(self.get_bkg(id, bkg_id),
self.get_bkg_model(id, bkg_id),
self.get_stat())
plotobj.prepare(dataobj, modelobj)
return plotobj
def get_bkg_model_plot(self, id=None, bkg_id=None, recalc=True):
"""Return the data used by plot_bkg_model.
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
bkg_id : int or str, optional
Identify the background component to use, if there are
multiple ones associated with the data set.
recalc : bool, optional
If ``False`` then the results from the last call to
`plot_bkg_model` (or `get_bkg_model_plot`) are returned,
otherwise the data is re-generated.
Returns
-------
model : a `sherpa.astro.plot.BkgModelHistogram` instance
An object representing the data used to create the plot by
`plot_bkg_model`.
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
sherpa.utils.err.IdentifierErr
If the ``bkg_id`` parameter is invalid.
sherpa.utils.err.ModelErr
If no model expression has been created for the background
data.
See Also
--------
get_bkg_source_plot : Return the data used by plot_bkg_source.
plot_bkg_model : Plot the model for the background of a PHA data set.
plot_bkg_source : Plot the model expression for the background of a PHA data set.
Examples
--------
>>> bplot = get_bkg_model_plot()
>>> print(bplot)
>>> get_bkg_model_plot('jet', bkg_id=1).plot()
>>> get_bkg_model_plot('jet', bkg_id=2).overplot()
"""
plotobj = self._bkgmodelhisto
if recalc:
plotobj.prepare(self.get_bkg(id, bkg_id),
self.get_bkg_model(id, bkg_id),
self.get_stat())
return plotobj
def get_bkg_plot(self, id=None, bkg_id=None, recalc=True):
"""Return the data used by plot_bkg.
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
bkg_id : int or str, optional
Identify the background component to use, if there are
multiple ones associated with the data set.
recalc : bool, optional
If ``False`` then the results from the last call to
`plot_bkg` (or `get_bkg_plot`) are returned, otherwise
the data is re-generated.
Returns
-------
data : a `sherpa.astro.plot.BkgDataPlot` instance
An object representing the data used to create the plot by
`plot_data`. The relationship between the returned values
and the values in the data set depend on the analysis,
filtering, and grouping settings of the data set.
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
sherpa.utils.err.IdentifierErr
If the ``bkg_id`` parameter is invalid.
See Also
--------
get_default_id : Return the default data set identifier.
plot_bkg : Plot the background values for a PHA data set.
Examples
--------
Create the data needed to create the "data plot" for the background
of the default data set and display it:
>>> bplot = get_bkg_plot()
>>> print(bplot)
Return the plot data for data set 2, and then use it to create
a plot:
>>> b2 = get_bkg_plot(2)
>>> b2.plot()
Return the plot data for the second background component to the
"jet" data set:
>>> bplot = get_bkg_plot('jet', bkg_id=2)
"""
plotobj = self._bkgdataplot
if recalc:
plotobj.prepare(self.get_bkg(id, bkg_id),
self.get_stat())
return plotobj
def get_bkg_source_plot(self, id=None, lo=None, hi=None,
bkg_id=None, recalc=True):
"""Return the data used by plot_bkg_source.
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
lo : number, optional
The low value to plot.
hi : number, optional
The high value to plot.
bkg_id : int or str, optional
Identify the background component to use, if there are
multiple ones associated with the data set.
recalc : bool, optional
If ``False`` then the results from the last call to
`plot_bkg_source` (or `get_bkg_source_plot`) are returned,
otherwise the data is re-generated.
Returns
-------
source : a `sherpa.astro.plot.BkgSourcePlot` instance
An object representing the data used to create the plot by
`plot_bkg_source`.
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
sherpa.utils.err.IdentifierErr
If the ``bkg_id`` parameter is invalid.
sherpa.utils.err.ModelErr
If no model expression has been created for the background
data.
See Also
--------
get_bkg_model_plot : Return the data used by plot_bkg_model.
plot_bkg_model : Plot the model for the background of a PHA data set.
plot_bkg_source : Plot the model expression for the background of a PHA data set.
Examples
--------
Retrieve the source plot information for the background of
the default data set and display it:
>>> splot = get_bkg_source_plot()
>>> print(splot)
Return the background plot data for data set 2, and then use it
to create a plot:
>>> s2 = get_bkg_source_plot(2)
>>> s2.plot()
Create a plot of the first two background components of the
'histate' data set, overplotting the second on the first:
>>> b1 = get_bkg_source_plot('histate', bkg_id=1)
>>> b2 = get_bkg_source_plot('histate', bkg_id=2)
>>> b1.plot()
>>> b2.overplot()
Retrieve the background source plots for the 0.5 to 7 range of the
'jet' and 'core' data sets and display them on the same plot:
>>> splot1 = get_bkg_source_plot(id='jet', lo=0.5, hi=7)
>>> splot2 = get_bkg_source_plot(id='core', lo=0.5, hi=7)
>>> splot1.plot()
>>> splot2.overplot()
For a PHA data set, the units on both the X and Y axes of the
plot are controlled by the `set_analysis` command. In this
case the Y axis will be in units of photons/s/cm^2 and the X
axis in keV:
>>> set_analysis('energy', factor=1)
>>> splot = get_bkg_source_plot()
>>> print(splot)
"""
plotobj = self._bkgsourceplot
if recalc:
plotobj.prepare(self.get_bkg(id, bkg_id),
self.get_bkg_source(id, bkg_id),
lo=lo, hi=hi)
return plotobj
def get_bkg_resid_plot(self, id=None, bkg_id=None, recalc=True):
"""Return the data used by plot_bkg_resid.
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
bkg_id : int or str, optional
Identify the background component to use, if there are
multiple ones associated with the data set.
recalc : bool, optional
If ``False`` then the results from the last call to
`plot_bkg_resid` (or `get_bkg_resid_plot`) are returned,
otherwise the data is re-generated.
Returns
-------
resid : a `sherpa.astro.plot.BkgResidPlot` instance
An object representing the data used to create the plot by
`plot_bkg_resid`.
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
sherpa.utils.err.IdentifierErr
If the ``bkg_id`` parameter is invalid.
sherpa.utils.err.ModelErr
If no model expression has been created for the background
data.
See Also
--------
get_bkg_chisqr_plot : Return the data used by plot_bkg_chisqr.
get_bkg_delchi_plot : Return the data used by plot_bkg_delchi.
get_bkg_ratio_plot : Return the data used by plot_bkg_ratio.
plot_bkg_resid : Plot the residual (data-model) values for the background of a PHA data set.
Examples
--------
>>> bplot = get_bkg_resid_plot()
>>> print(bplot)
>>> get_bkg_resid_plot('jet', bkg_id=1).plot()
>>> get_bkg_resid_plot('jet', bkg_id=2).overplot()
"""
plotobj = self._bkgresidplot
if recalc:
plotobj.prepare(self.get_bkg(id, bkg_id),
self.get_bkg_model(id, bkg_id),
self.get_stat())
return plotobj
def get_bkg_ratio_plot(self, id=None, bkg_id=None, recalc=True):
"""Return the data used by plot_bkg_ratio.
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
bkg_id : int or str, optional
Identify the background component to use, if there are
multiple ones associated with the data set.
recalc : bool, optional
If ``False`` then the results from the last call to
`plot_bkg_ratio` (or `get_bkg_ratio_plot`) are returned,
otherwise the data is re-generated.
Returns
-------
ratio : a `sherpa.astro.plot.BkgRatioPlot` instance
An object representing the data used to create the plot by
`plot_bkg_ratio`.
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
sherpa.utils.err.IdentifierErr
If the ``bkg_id`` parameter is invalid.
sherpa.utils.err.ModelErr
If no model expression has been created for the background
data.
See Also
--------
get_bkg_chisqr_plot : Return the data used by plot_bkg_chisqr.
get_bkg_delchi_plot : Return the data used by plot_bkg_delchi.
get_bkg_resid_plot : Return the data used by plot_bkg_resid.
plot_bkg_ratio : Plot the ratio of data to model values for the background of a PHA data set.
Examples
--------
>>> bplot = get_bkg_ratio_plot()
>>> print(bplot)
>>> get_bkg_ratio_plot('jet', bkg_id=1).plot()
>>> get_bkg_ratio_plot('jet', bkg_id=2).overplot()
"""
plotobj = self._bkgratioplot
if recalc:
plotobj.prepare(self.get_bkg(id, bkg_id),
self.get_bkg_model(id, bkg_id),
self.get_stat())
return plotobj
def get_bkg_delchi_plot(self, id=None, bkg_id=None, recalc=True):
"""Return the data used by plot_bkg_delchi.
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
bkg_id : int or str, optional
Identify the background component to use, if there are
multiple ones associated with the data set.
recalc : bool, optional
If ``False`` then the results from the last call to
`plot_bkg_delchi` (or `get_bkg_delchi_plot`) are returned,
otherwise the data is re-generated.
Returns
-------
delchi : a `sherpa.astro.plot.BkgDelchiPlot` instance
An object representing the data used to create the plot by
`plot_bkg_delchi`.
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
sherpa.utils.err.IdentifierErr
If the ``bkg_id`` parameter is invalid.
sherpa.utils.err.ModelErr
If no model expression has been created for the background
data.
See Also
--------
get_bkg_chisqr_plot : Return the data used by plot_bkg_chisqr.
get_bkg_ratio_plot : Return the data used by plot_bkg_ratio.
get_bkg_resid_plot : Return the data used by plot_bkg_resid.
plot_bkg_delchi : Plot the ratio of residuals to error for the background of a PHA data set.
Examples
--------
>>> bplot = get_bkg_delchi_plot()
>>> print(bplot)
>>> get_bkg_delchi_plot('jet', bkg_id=1).plot()
>>> get_bkg_delchi_plot('jet', bkg_id=2).overplot()
"""
plotobj = self._bkgdelchiplot
if recalc:
plotobj.prepare(self.get_bkg(id, bkg_id),
self.get_bkg_model(id, bkg_id),
self.get_stat())
return plotobj
def get_bkg_chisqr_plot(self, id=None, bkg_id=None, recalc=True):
"""Return the data used by plot_bkg_chisqr.
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
bkg_id : int or str, optional
Identify the background component to use, if there are
multiple ones associated with the data set.
recalc : bool, optional
If ``False`` then the results from the last call to
`plot_bkg_chisqr` (or `get_bkg_chisqr_plot`) are returned,
otherwise the data is re-generated.
Returns
-------
chisqr : a `sherpa.astro.plot.BkgChisqrPlot` instance
An object representing the data used to create the plot by
`plot_bkg_chisqr`.
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
sherpa.utils.err.IdentifierErr
If the ``bkg_id`` parameter is invalid.
sherpa.utils.err.ModelErr
If no model expression has been created for the background
data.
See Also
--------
get_bkg_delchi_plot : Return the data used by plot_bkg_delchi.
get_bkg_ratio_plot : Return the data used by plot_bkg_ratio.
get_bkg_resid_plot : Return the data used by plot_bkg_resid.
plot_bkg_chisqr : Plot the chi-squared value for each point of the background of a PHA data set.
Examples
--------
>>> bplot = get_bkg_chisqr_plot()
>>> print(bplot)
>>> get_bkg_chisqr_plot('jet', bkg_id=1).plot()
>>> get_bkg_chisqr_plot('jet', bkg_id=2).overplot()
"""
plotobj = self._bkgchisqrplot
if recalc:
plotobj.prepare(self.get_bkg(id, bkg_id),
self.get_bkg_model(id, bkg_id),
self.get_stat())
return plotobj
def _prepare_energy_flux_plot(self, plot, lo, hi, id, num, bins,
correlated, numcores, bkg_id,
scales=None, model=None, otherids=(),
clip='hard'):
"""Run sample_energy_flux and convert to a plot.
"""
dist = self.sample_energy_flux(lo, hi, id=id, otherids=otherids,
num=num, scales=scales, model=model,
correlated=correlated, numcores=numcores,
bkg_id=bkg_id, clip=clip)
plot.prepare(dist, bins)
return plot
def _prepare_photon_flux_plot(self, plot, lo, hi, id, num, bins,
correlated, numcores, bkg_id,
scales=None, model=None, otherids=(),
clip='hard'):
"""Run sample_photon_flux and convert to a plot.
"""
dist = self.sample_photon_flux(lo, hi, id=id, otherids=otherids,
num=num, scales=scales, model=model,
correlated=correlated, numcores=numcores,
bkg_id=bkg_id, clip=clip)
plot.prepare(dist, bins)
return plot
def get_energy_flux_hist(self, lo=None, hi=None, id=None, num=7500, bins=75,
correlated=False, numcores=None, bkg_id=None,
scales=None, model=None, otherids=(), recalc=True,
clip='hard'):
"""Return the data displayed by plot_energy_flux.
The get_energy_flux_hist() function calculates a histogram of
simulated energy flux values representing the energy flux probability
distribution for a model component, accounting for the errors on the
model parameters.
.. versionchanged:: 4.12.2
The scales parameter is no longer ignored when set and the
model and otherids parameters have been added. The clip
argument has been added.
Parameters
----------
lo : number, optional
The lower limit to use when summing up the signal. If not
given then the lower value of the data grid is used.
hi : optional
The upper limit to use when summing up the signal. If not
given then the upper value of the data grid is used.
id : int or string, optional
The identifier of the data set to use. If `None`, the
default value, then all datasets with associated models are
used to calculate the errors and the model evaluation is
done using the default dataset.
num : int, optional
The number of samples to create. The default is 7500.
bins : int, optional
The number of bins to use for the histogram.
correlated : bool, optional
If ``True`` (the default is ``False``) then ``scales`` is the
full covariance matrix, otherwise it is just a 1D array
containing the variances of the parameters (the diagonal
elements of the covariance matrix).
numcores : optional
The number of CPU cores to use. The default is to use all
the cores on the machine.
bkg_id : int or string, optional
The identifier of the background component to use. This
should only be set when the line to be measured is in the
background model.
scales : array, optional
The scales used to define the normal distributions for the
parameters. The size and shape of the array depends on the
number of free parameters in the fit (n) and the value of
the `correlated` parameter. When the parameter is `True`,
scales must be given the covariance matrix for the free
parameters (a n by n matrix that matches the parameter
ordering used by Sherpa). For un-correlated parameters
the covariance matrix can be used, or a one-dimensional
array of n elements can be used, giving the width (specified
as the sigma value of a normal distribution) for each
parameter (e.g. the square root of the diagonal elements
of the covariance matrix). If the scales parameter is not
given then the covariance matrix is evaluated for the
current model and best-fit parameters.
model : model, optional
The model to integrate. If left as `None` then the source
model for the dataset will be used. This can be used to
calculate the unabsorbed flux, as shown in the examples.
The model must be part of the source expression.
otherids : sequence of integer and string ids, optional
The list of other datasets that should be included when
calculating the errors to draw values from.
recalc : bool, optional
If ``True``, the default, then re-calculate the values rather
than use the values from the last time the function was
run.
clip : {'hard', 'soft', 'none'}, optional
What clipping strategy should be applied to the sampled
parameters. The default ('hard') is to fix values at their
hard limits if they exceed them. A value of 'soft' uses
the soft limits instead, and 'none' applies no
clipping.
Returns
-------
hist : a `sherpa.astro.plot.EnergyFluxHistogram` instance
An object representing the data used to create the plot by
`plot_energy_flux`.
See Also
--------
get_photon_flux_hist : Return the data displayed by plot_photon_flux.
plot_energy_flux : Display the energy flux distribution.
plot_photon_flux : Display the photon flux distribution.
sample_energy_flux : Return the energy flux distribution of a model.
sample_flux : Return the flux distribution of a model.
sample_photon_flux : Return the photon flux distribution of a model.
Examples
--------
Get the energy flux distribution for the range 0.5 to 7 for
the default data set:
>>> ehist = get_energy_flux_hist(0.5, 7, num=1000)
>>> print(ehist)
Compare the 0.5 to 2 energy flux distribution from the "core"
data set to the values from the "jet" data set:
>>> ehist1 = get_energy_flux_hist(0.5, 2, id='jet', num=1000)
>>> ehist2 = get_energy_flux_hist(0.5, 2, id='core', num=1000)
Compare the flux distribution for the full source expression
(aflux) to that for just the pl component (uflux); this can be
useful to calculate the unabsorbed flux distribution if the
full source model contains an absorption component:
>>> aflux = get_energy_flux_hist(0.5, 2, num=1000, bins=20)
>>> uflux = get_energy_flux_hist(0.5, 2, model=pl, num=1000, bins=20)
When there are multiple datasets loaded,
`get_energy_flux_hist` uses all datasets to evaluate the
errors when the `id` parameter is left at its default value of
`None`. The `otherids` parameter is used, along with `id`, to
specify exactly what datasets are used:
>>> x = get_energy_flux_hist(2, 10, num=1000, bins=20, model=src)
>>> y = get_energy_flux_hist(2, 10, num=1000, bins=20, model=src,
... id=1, otherids=(2, 3, 4))
"""
if recalc:
self._prepare_energy_flux_plot(self._energyfluxplot, lo, hi, id=id,
num=num, bins=bins, correlated=correlated,
scales=scales, model=model,
otherids=otherids, clip=clip,
numcores=numcores, bkg_id=bkg_id)
return self._energyfluxplot
def get_photon_flux_hist(self, lo=None, hi=None, id=None, num=7500, bins=75,
correlated=False, numcores=None, bkg_id=None,
scales=None, model=None, otherids=(), recalc=True,
clip='hard'):
"""Return the data displayed by plot_photon_flux.
The get_photon_flux_hist() function calculates a histogram of
simulated photon flux values representing the photon flux probability
distribution for a model component, accounting for the errors on the
model parameters.
.. versionchanged:: 4.12.2
The scales parameter is no longer ignored when set and the
model and otherids parameters have been added.
Parameters
----------
lo : number, optional
The lower limit to use when summing up the signal. If not
given then the lower value of the data grid is used.
hi : optional
The upper limit to use when summing up the signal. If not
given then the upper value of the data grid is used.
id : int or string, optional
The identifier of the data set to use. If `None`, the
default value, then all datasets with associated models are
used to calculate the errors and the model evaluation is
done using the default dataset.
num : int, optional
The number of samples to create. The default is 7500.
bins : int, optional
The number of bins to use for the histogram.
correlated : bool, optional
If ``True`` (the default is ``False``) then ``scales`` is the
full covariance matrix, otherwise it is just a 1D array
containing the variances of the parameters (the diagonal
elements of the covariance matrix).
numcores : optional
The number of CPU cores to use. The default is to use all
the cores on the machine.
bkg_id : int or string, optional
The identifier of the background component to use. This
should only be set when the line to be measured is in the
background model.
scales : array, optional
The scales used to define the normal distributions for the
parameters. The size and shape of the array depends on the
number of free parameters in the fit (n) and the value of
the `correlated` parameter. When the parameter is `True`,
scales must be given the covariance matrix for the free
parameters (a n by n matrix that matches the parameter
ordering used by Sherpa). For un-correlated parameters
the covariance matrix can be used, or a one-dimensional
array of n elements can be used, giving the width (specified
as the sigma value of a normal distribution) for each
parameter (e.g. the square root of the diagonal elements
of the covariance matrix). If the scales parameter is not
given then the covariance matrix is evaluated for the
current model and best-fit parameters.
model : model, optional
The model to integrate. If left as `None` then the source
model for the dataset will be used. This can be used to
calculate the unabsorbed flux, as shown in the examples.
The model must be part of the source expression.
otherids : sequence of integer and string ids, optional
The list of other datasets that should be included when
calculating the errors to draw values from.
recalc : bool, optional
If ``True``, the default, then re-calculate the values rather
than use the values from the last time the function was
run.
clip : {'hard', 'soft', 'none'}, optional
What clipping strategy should be applied to the sampled
parameters. The default ('hard') is to fix values at their
hard limits if they exceed them. A value of 'soft' uses
the soft limits instead, and 'none' applies no
clipping.
Returns
-------
hist : a `sherpa.astro.plot.PhotonFluxHistogram` instance
An object representing the data used to create the plot by
`plot_photon_flux`.
See Also
--------
get_energy_flux_hist : Return the data displayed by plot_energy_flux.
plot_energy_flux : Display the energy flux distribution.
plot_photon_flux : Display the photon flux distribution.
sample_energy_flux : Return the energy flux distribution of a model.
sample_flux : Return the flux distribution of a model.
sample_photon_flux : Return the photon flux distribution of a model.
Examples
--------
Get the photon flux distribution for the range 0.5 to 7 for
the default data set:
>>> phist = get_photon_flux_hist(0.5, 7, num=1000)
>>> print(phist)
Compare the 0.5 to 2 photon flux distribution from the "core"
data set to the values from the "jet" data set:
>>> phist1 = get_photon_flux_hist(0.5, 2, id='jet', num=1000)
>>> phist2 = get_photon_flux_hist(0.5, 2, id='core', num=1000)
Compare the flux distribution for the full source expression
(aflux) to that for just the pl component (uflux); this can be
useful to calculate the unabsorbed flux distribution if the
full source model contains an absorption component:
>>> aflux = get_photon_flux_hist(0.5, 2, num=1000, bins=20)
>>> uflux = get_photon_flux_hist(0.5, 2, model=pl, num=1000, bins=20)
When there are multiple datasets loaded,
`get_photon_flux_hist` uses all datasets to evaluate the
errors when the `id` parameter is left at its default value of
`None`. The `otherids` parameter is used, along with `id`, to
specify exactly what datasets are used:
>>> x = get_photon_flux_hist(2, 10, num=1000, bins=20, model=src)
>>> y = get_photon_flux_hist(2, 10, num=1000, bins=20, model=src,
... id=1, otherids=(2, 3, 4))
"""
if recalc:
self._prepare_photon_flux_plot(self._photonfluxplot, lo, hi, id=id,
num=num, bins=bins, correlated=correlated,
scales=scales, model=model,
otherids=otherids, clip=clip,
numcores=numcores, bkg_id=bkg_id)
return self._photonfluxplot
def plot_arf(self, id=None, resp_id=None, replot=False, overplot=False,
clearwindow=True, **kwargs):
"""Plot the ARF associated with a data set.
Display the effective area curve from the ARF
component of a PHA data set.
Parameters
----------
id : int or str, optional
The data set with an ARF. If not given then the default
identifier is used, as returned by `get_default_id`.
resp_id : int or str, optional
Which ARF to use in the case that multiple ARFs are
associated with a data set. The default is ``None``,
which means the first one.
replot : bool, optional
Set to ``True`` to use the values calculated by the last
call to `plot_data`. The default is ``False``.
overplot : bool, optional
If ``True`` then add the data to an existing plot, otherwise
create a new plot. The default is ``False``.
clearwindow : bool, optional
Should the existing plot area be cleared before creating this
new plot (e.g. for multi-panel plots)?
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
See Also
--------
get_arf_plot : Return the data used by plot_arf.
plot : Create one or more plot types.
Examples
--------
Plot the ARF for the default data set:
>>> plot_arf()
Plot the ARF from data set 1 and overplot
the ARF from data set 2:
>>> plot_arf(1)
>>> plot_arf(2, overplot=True)
Plot the ARFs labelled "arf1" and "arf2" for the
"src" data set:
>>> plot_arf("src", "arf1")
>>> plot_arf("src", "arf2", overplot=True)
The following example requires that the Matplotlib backend
is selected, since this determines what extra keywords
`plot_arf` accepts. The ARFs from the default and data set
2 are drawn together, but the second curve is drawn with
a dashed line.
>>> plot_arf(ylog=True)
>>> plot_arf(2, overplot=True, linestyle='dashed')
"""
plotobj = self.get_arf_plot(id, resp_id, recalc=not replot)
self._plot(plotobj, overplot=overplot, clearwindow=clearwindow,
**kwargs)
# DOC-NOTE: also in sherpa.utils, but without the lo/hi arguments
def plot_source(self, id=None, lo=None, hi=None, replot=False,
overplot=False, clearwindow=True, **kwargs):
"""Plot the source expression for a data set.
This function plots the source model for a data set. This does
not include any instrument response (e.g. a convolution
created by `set_psf` or ARF and RMF automatically created for
a PHA data set).
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
lo : number, optional
The low value to plot (only used for PHA data sets).
hi : number, optional
The high value to plot (only use for PHA data sets).
replot : bool, optional
Set to ``True`` to use the values calculated by the last
call to `plot_source`. The default is ``False``.
overplot : bool, optional
If ``True`` then add the data to an existing plot, otherwise
create a new plot. The default is ``False``.
clearwindow : bool, optional
Should the existing plot area be cleared before creating this
new plot (e.g. for multi-panel plots)?
See Also
--------
get_source_plot : Return the data used by plot_source.
get_default_id : Return the default data set identifier.
plot : Create one or more plot types.
plot_model : Plot the model for a data set.
set_analysis : Set the units used when fitting and displaying spectral data.
set_xlinear : New plots will display a linear X axis.
set_xlog : New plots will display a logarithmically-scaled X axis.
set_ylinear : New plots will display a linear Y axis.
set_ylog : New plots will display a logarithmically-scaled Y axis.
Examples
--------
Plot the unconvolved source model for the default data set:
>>> plot_source()
Overplot the source model for data set 2 on data set 1:
>>> plot_source(1)
>>> plot_source(2, overplot=True)
Restrict the plot to values between 0.5 and 7 for the
independent axis:
>>> plot_source(lo=0.5, hi=7)
For a PHA data set, the units on both the X and Y axes of the
plot are controlled by the `set_analysis` command. In this
case the Y axis will be in units of photons/s/cm^2 and the X
axis in keV:
>>> set_analysis('energy', factor=1)
>>> plot_source()
"""
d = self.get_data(id)
if isinstance(d, sherpa.astro.data.DataPHA):
# Note: lo/hi arguments mean we can not just rely on superclass
plotobj = self.get_source_plot(id, lo=lo, hi=hi, recalc=not replot)
self._plot(plotobj, overplot=overplot, clearwindow=clearwindow,
**kwargs)
return
super().plot_source(id=id, replot=replot, overplot=overplot,
clearwindow=clearwindow, **kwargs)
# DOC-TODO: is orders the same as resp_id?
def plot_order(self, id=None, orders=None, replot=False, overplot=False,
clearwindow=True, **kwargs):
"""Plot the model for a data set convolved by the given response.
Some data sets - such as grating PHA data - can have multiple
responses. The `plot_order` function acts like `plot_model`,
in that it displays the model after passing through a
response, but allows the user to select which response to use.
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
orders : optional
Which response to use. The argument can be a scalar or
array, in which case multiple curves will be displayed.
The default is to use all orders.
replot : bool, optional
Set to ``True`` to use the values calculated by the last
call to `plot_model`. The default is ``False``.
overplot : bool, optional
If ``True`` then add the data to an existing plot, otherwise
create a new plot. The default is ``False``.
clearwindow : bool, optional
Should the existing plot area be cleared before creating this
new plot (e.g. for multi-panel plots)?
See Also
--------
get_order_plot : Return the data used by plot_order.
plot : Create one or more plot types.
plot_model : Plot the model for a data set.
Examples
--------
Display the source model convolved by the first response
for the default data set:
>>> plot_order(orders=1)
Plot the source convolved through the first and second
responses for the second data set (separate curves for
each response):
>>> plot_order(2, orders=[1, 2])
Add the orders plot to a model plot:
>>> plot_model()
>>> plot_order(orders=[2, 3], overplot=True)
"""
plotobj = self.get_order_plot(id, orders=orders, recalc=not replot)
self._plot(plotobj, overplot=overplot, clearwindow=clearwindow,
**kwargs)
def plot_bkg(self, id=None, bkg_id=None, replot=False, overplot=False,
clearwindow=True, **kwargs):
"""Plot the background values for a PHA data set.
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
bkg_id : int or str, optional
Identify the background component to use, if there are
multiple ones associated with the data set.
replot : bool, optional
Set to ``True`` to use the values calculated by the last
call to `plot_bkg`. The default is ``False``.
overplot : bool, optional
If ``True`` then add the data to an existing plot, otherwise
create a new plot. The default is ``False``.
clearwindow : bool, optional
Should the existing plot area be cleared before creating this
new plot (e.g. for multi-panel plots)?
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
sherpa.utils.err.IdentifierErr
If the ``bkg_id`` parameter is invalid.
See Also
--------
get_bkg_plot : Return the data used by plot_bkg.
get_default_id : Return the default data set identifier.
plot : Create one or more plot types.
set_analysis : Set the units used when fitting and displaying spectral data.
set_xlinear : New plots will display a linear X axis.
set_xlog : New plots will display a logarithmically-scaled X axis.
set_ylinear : New plots will display a linear Y axis.
set_ylog : New plots will display a logarithmically-scaled Y axis.
Examples
--------
Plot the background from the default data set:
>>> plot_bkg()
Overplot the background from the 'jet' data set on the
data. There is no scaling for differences in aperture or
exposure time:
>>> plot_data('jet')
>>> plot_bkg('jet', overplot=True)
Compare the first two background components of data set 1:
>>> plot_bkg(1, 1)
>>> plot_bkg(1, 2, overplot=True)
"""
plotobj = self.get_bkg_plot(id, bkg_id, recalc=not replot)
self._plot(plotobj, overplot=overplot, clearwindow=clearwindow,
**kwargs)
def plot_bkg_model(self, id=None, bkg_id=None, replot=False,
overplot=False, clearwindow=True, **kwargs):
"""Plot the model for the background of a PHA data set.
This function plots the model for the background of a PHA data
set, which includes any instrument response (the
ARF and RMF).
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
bkg_id : int or str, optional
Identify the background component to use, if there are
multiple ones associated with the data set.
replot : bool, optional
Set to ``True`` to use the values calculated by the last
call to `plot_bkg_model`. The default is ``False``.
overplot : bool, optional
If ``True`` then add the data to an existing plot, otherwise
create a new plot. The default is ``False``.
clearwindow : bool, optional
Should the existing plot area be cleared before creating this
new plot (e.g. for multi-panel plots)?
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
sherpa.utils.err.IdentifierErr
If the ``bkg_id`` parameter is invalid.
sherpa.utils.err.ModelErr
If no model expression has been created for the background
data.
See Also
--------
get_bkg_model_plot : Return the data used by plot_bkg_model.
plot_bkg_source : Plot the model expression for the background of a PHA data set.
set_bkg_model : Set the background model expression for a PHA data set.
Examples
--------
>>> plot_bkg_model()
>>> plot_bkg('jet')
>>> plot_bkg_model('jet', bkg_id=1, overplot=True)
>>> plot_bkg_model('jet', bkg_id=2, overplot=True)
"""
plotobj = self.get_bkg_model_plot(id, bkg_id, recalc=not replot)
self._plot(plotobj, overplot=overplot, clearwindow=clearwindow,
**kwargs)
def plot_bkg_resid(self, id=None, bkg_id=None, replot=False,
overplot=False, clearwindow=True, **kwargs):
"""Plot the residual (data-model) values for the background of a PHA data set.
Display the residuals for the background of a PHA data set
when it is being fit, rather than subtracted from the source.
.. versionchanged:: 4.12.0
The Y axis is now always drawn using a linear scale.
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
bkg_id : int or str, optional
Identify the background component to use, if there are
multiple ones associated with the data set.
replot : bool, optional
Set to ``True`` to use the values calculated by the last
call to `plot_bkg_resid`. The default is ``False``.
overplot : bool, optional
If ``True`` then add the data to an existing plot, otherwise
create a new plot. The default is ``False``.
clearwindow : bool, optional
Should the existing plot area be cleared before creating this
new plot (e.g. for multi-panel plots)?
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
sherpa.utils.err.IdentifierErr
If the ``bkg_id`` parameter is invalid.
sherpa.utils.err.ModelErr
If no model expression has been created for the background
data.
See Also
--------
get_bkg_resid_plot : Return the data used by plot_bkg_resid.
plot_bkg_chisqr : Plot the chi-squared value for each point of the background of a PHA data set.
plot_bkg_delchi : Plot the ratio of residuals to error for the background of a PHA data set.
plot_bkg_ratio : Plot the ratio of data to model values for the background of a PHA data set.
set_bkg_model : Set the background model expression for a PHA data set.
Notes
-----
The ylog setting is ignored, and the Y axis is drawn using a
linear scale.
Examples
--------
>>> plot_bkg_resid()
>>> plot_bkg('jet')
>>> plot_bkg_resid('jet', bkg_id=1, overplot=True)
>>> plot_bkg_resid('jet', bkg_id=2, overplot=True)
"""
plotobj = self.get_bkg_resid_plot(id, bkg_id, recalc=not replot)
self._plot(plotobj, overplot=overplot, clearwindow=clearwindow,
**kwargs)
def plot_bkg_ratio(self, id=None, bkg_id=None, replot=False,
overplot=False, clearwindow=True, **kwargs):
"""Plot the ratio of data to model values for the background of a PHA data set.
Display the ratio of data to model values for the background
of a PHA data set when it is being fit, rather than subtracted
from the source.
.. versionchanged:: 4.12.0
The Y axis is now always drawn using a linear scale.
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
bkg_id : int or str, optional
Identify the background component to use, if there are
multiple ones associated with the data set.
replot : bool, optional
Set to ``True`` to use the values calculated by the last
call to `plot_bkg_ratio`. The default is ``False``.
overplot : bool, optional
If ``True`` then add the data to an existing plot, otherwise
create a new plot. The default is ``False``.
clearwindow : bool, optional
Should the existing plot area be cleared before creating this
new plot (e.g. for multi-panel plots)?
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
sherpa.utils.err.IdentifierErr
If the ``bkg_id`` parameter is invalid.
sherpa.utils.err.ModelErr
If no model expression has been created for the background
data.
See Also
--------
get_bkg_ratio_plot : Return the data used by plot_bkg_ratio.
plot_bkg_chisqr : Plot the chi-squared value for each point of the background of a PHA data set.
plot_bkg_delchi : Plot the ratio of residuals to error for the background of a PHA data set.
plot_bkg_resid : Plot the residual (data-model) values for the background of a PHA data set.
set_bkg_model : Set the background model expression for a PHA data set.
Notes
-----
The ylog setting is ignored, and the Y axis is drawn using a
linear scale.
Examples
--------
>>> plot_bkg_ratio()
>>> plot_bkg_ratio('jet', bkg_id=1)
>>> plot_bkg_ratio('jet', bkg_id=2, overplot=True)
"""
plotobj = self.get_bkg_ratio_plot(id, bkg_id, recalc=not replot)
self._plot(plotobj, overplot=overplot, clearwindow=clearwindow,
**kwargs)
def plot_bkg_delchi(self, id=None, bkg_id=None, replot=False,
overplot=False, clearwindow=True, **kwargs):
"""Plot the ratio of residuals to error for the background of a PHA data set.
Display the ratio of the residuals (data-model) to the error
values for the background of a PHA data set when it is being
fit, rather than subtracted from the source.
.. versionchanged:: 4.12.0
The Y axis is now always drawn using a linear scale.
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
bkg_id : int or str, optional
Identify the background component to use, if there are
multiple ones associated with the data set.
replot : bool, optional
Set to ``True`` to use the values calculated by the last
call to `plot_bkg_ratio`. The default is ``False``.
overplot : bool, optional
If ``True`` then add the data to an existing plot, otherwise
create a new plot. The default is ``False``.
clearwindow : bool, optional
Should the existing plot area be cleared before creating this
new plot (e.g. for multi-panel plots)?
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
sherpa.utils.err.IdentifierErr
If the ``bkg_id`` parameter is invalid.
sherpa.utils.err.ModelErr
If no model expression has been created for the background
data.
See Also
--------
get_bkg_delchi_plot : Return the data used by plot_bkg_delchi.
plot_bkg_chisqr : Plot the chi-squared value for each point of the background of a PHA data set.
plot_bkg_ratio : Plot the ratio of data to model values for the background of a PHA data set.
plot_bkg_resid : Plot the residual (data-model) values for the background of a PHA data set.
set_bkg_model : Set the background model expression for a PHA data set.
Notes
-----
The ylog setting is ignored, and the Y axis is drawn using a
linear scale.
Examples
--------
>>> plot_bkg_delchi()
>>> plot_bkg_delchi('jet', bkg_id=1)
>>> plot_bkg_delchi('jet', bkg_id=2, overplot=True)
"""
plotobj = self.get_bkg_delchi_plot(id, bkg_id, recalc=not replot)
self._plot(plotobj, overplot=overplot, clearwindow=clearwindow,
**kwargs)
def plot_bkg_chisqr(self, id=None, bkg_id=None, replot=False,
overplot=False, clearwindow=True, **kwargs):
"""Plot the chi-squared value for each point of the background of a PHA data set.
Display the square of the residuals (data-model) divided by
the error values for the background of a PHA data set when it
is being fit, rather than subtracted from the source.
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
bkg_id : int or str, optional
Identify the background component to use, if there are
multiple ones associated with the data set.
replot : bool, optional
Set to ``True`` to use the values calculated by the last
call to `plot_bkg_chisqr`. The default is ``False``.
overplot : bool, optional
If ``True`` then add the data to an existing plot, otherwise
create a new plot. The default is ``False``.
clearwindow : bool, optional
Should the existing plot area be cleared before creating this
new plot (e.g. for multi-panel plots)?
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
sherpa.utils.err.IdentifierErr
If the ``bkg_id`` parameter is invalid.
sherpa.utils.err.ModelErr
If no model expression has been created for the background
data.
See Also
--------
get_bkg_chisqr_plot : Return the data used by plot_bkg_chisqr.
plot_bkg_delchi : Plot the ratio of residuals to error for the background of a PHA data set.
plot_bkg_ratio : Plot the ratio of data to model values for the background of a PHA data set.
plot_bkg_resid : Plot the residual (data-model) values for the background of a PHA data set.
set_bkg_model : Set the background model expression for a PHA data set.
Examples
--------
>>> plot_bkg_chisqr()
>>> plot_bkg_chisqr('jet', bkg_id=1)
>>> plot_bkg_chisqr('jet', bkg_id=2, overplot=True)
"""
plotobj = self.get_bkg_chisqr_plot(id, bkg_id, recalc=not replot)
self._plot(plotobj, overplot=overplot, clearwindow=clearwindow,
**kwargs)
def plot_bkg_fit(self, id=None, bkg_id=None, replot=False,
overplot=False, clearwindow=True, **kwargs):
"""Plot the fit results (data, model) for the background of a PHA data set.
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
bkg_id : int or str, optional
Identify the background component to use, if there are
multiple ones associated with the data set.
replot : bool, optional
Set to ``True`` to use the values calculated by the last
call to `plot_bkg_fit`. The default is ``False``.
overplot : bool, optional
If ``True`` then add the data to an existing plot, otherwise
create a new plot. The default is ``False``.
clearwindow : bool, optional
Should the existing plot area be cleared before creating this
new plot (e.g. for multi-panel plots)?
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
sherpa.utils.err.IdentifierErr
If the ``bkg_id`` parameter is invalid.
sherpa.utils.err.ModelErr
If no model expression has been created for the background
data.
See Also
--------
get_bkg_fit_plot : Return the data used by plot_bkg_fit.
plot : Create one or more plot types.
plot_bkg : Plot the background values for a PHA data set.
plot_bkg_model : Plot the model for the background of a PHA data set.
plot_bkg_fit_delchi : Plot the fit results, and the residuals, for the background of a PHA data set.
plot_bkg_fit_ratio : Plot the fit results, and the data/model ratio, for the background of a PHA data set.
plot_bkg_fit_resid : Plot the fit results, and the residuals, for the background of a PHA data set.
plot_fit : Plot the fit results (data, model) for a data set.
set_analysis : Set the units used when fitting and displaying spectral data.
Examples
--------
Plot the background fit to the default data set:
>>> plot_bkg_fit()
"""
plotobj = self.get_bkg_fit_plot(id, bkg_id, recalc=not replot)
self._plot(plotobj, overplot=overplot, clearwindow=clearwindow,
**kwargs)
def plot_bkg_source(self, id=None, lo=None, hi=None, bkg_id=None,
replot=False, overplot=False, clearwindow=True,
**kwargs):
"""Plot the model expression for the background of a PHA data set.
This function plots the model for the background of a PHA data
set. It does not include the instrument response (the ARF and
RMF).
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
lo : number, optional
The low value to plot.
hi : number, optional
The high value to plot.
bkg_id : int or str, optional
Identify the background component to use, if there are
multiple ones associated with the data set.
replot : bool, optional
Set to ``True`` to use the values calculated by the last
call to `plot_bkg_model`. The default is ``False``.
overplot : bool, optional
If ``True`` then add the data to an existing plot, otherwise
create a new plot. The default is ``False``.
clearwindow : bool, optional
Should the existing plot area be cleared before creating this
new plot (e.g. for multi-panel plots)?
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
sherpa.utils.err.IdentifierErr
If the ``bkg_id`` parameter is invalid.
sherpa.utils.err.ModelErr
If no model expression has been created for the background
data.
See Also
--------
get_bkg_source_plot : Return the data used by plot_bkg_source.
plot_bkg_model : Plot the model for the background of a PHA data set.
set_bkg_model : Set the background model expression for a PHA data set.
Examples
--------
>>> plot_bkg_source()
>>> plot_bkg_source('jet', bkg_id=1)
>>> plot_bkg_source('jet', bkg_id=2, overplot=True)
"""
plotobj = self.get_bkg_source_plot(id, bkg_id=bkg_id, lo=lo, hi=hi,
recalc=not replot)
self._plot(plotobj, overplot=overplot, clearwindow=clearwindow,
**kwargs)
def plot_energy_flux(self, lo=None, hi=None, id=None, num=7500, bins=75,
correlated=False, numcores=None, bkg_id=None,
scales=None, model=None, otherids=(),
recalc=True, clip='hard',
overplot=False, clearwindow=True,
**kwargs):
"""Display the energy flux distribution.
For each iteration, draw the parameter values of the model
from a normal distribution, evaluate the model, and sum the
model over the given range (the flux). Plot up the
distribution of this flux. The units for the flux are as
returned by `calc_energy_flux`. The `sample_energy_flux` and
`get_energy_flux_hist` functions return the data used to
create this plot.
.. versionchanged:: 4.12.2
The scales parameter is no longer ignored when set and the
model and otherids parameters have been added. The clip
argument has been added.
Parameters
----------
lo : number, optional
The lower limit to use when summing up the signal. If not
given then the lower value of the data grid is used.
hi : optional
The upper limit to use when summing up the signal. If not
given then the upper value of the data grid is used.
id : int or string, optional
The identifier of the data set to use. If `None`, the
default value, then all datasets with associated models are
used to calculate the errors and the model evaluation is
done using the default dataset.
num : int, optional
The number of samples to create. The default is 7500.
bins : int, optional
The number of bins to use for the histogram.
correlated : bool, optional
If ``True`` (the default is ``False``) then ``scales`` is the
full covariance matrix, otherwise it is just a 1D array
containing the variances of the parameters (the diagonal
elements of the covariance matrix).
numcores : optional
The number of CPU cores to use. The default is to use all
the cores on the machine.
bkg_id : int or string, optional
The identifier of the background component to use. This
should only be set when the line to be measured is in the
background model.
scales : array, optional
The scales used to define the normal distributions for the
parameters. The size and shape of the array depends on the
number of free parameters in the fit (n) and the value of
the `correlated` parameter. When the parameter is `True`,
scales must be given the covariance matrix for the free
parameters (a n by n matrix that matches the parameter
ordering used by Sherpa). For un-correlated parameters
the covariance matrix can be used, or a one-dimensional
array of n elements can be used, giving the width (specified
as the sigma value of a normal distribution) for each
parameter (e.g. the square root of the diagonal elements
of the covariance matrix). If the scales parameter is not
given then the covariance matrix is evaluated for the
current model and best-fit parameters.
model : model, optional
The model to integrate. If left as `None` then the source
model for the dataset will be used. This can be used to
calculate the unabsorbed flux, as shown in the examples.
The model must be part of the source expression.
otherids : sequence of integer and string ids, optional
The list of other datasets that should be included when
calculating the errors to draw values from.
recalc : bool, optional
If ``True``, the default, then re-calculate the values rather
than use the values from the last time the function was
run.
clip : {'hard', 'soft', 'none'}, optional
What clipping strategy should be applied to the sampled
parameters. The default ('hard') is to fix values at their
hard limits if they exceed them. A value of 'soft' uses
the soft limits instead, and 'none' applies no
clipping.
overplot : bool, optional
If ``True`` then add the data to an existing plot, otherwise
create a new plot. The default is ``False``.
clearwindow : bool, optional
Should the existing plot area be cleared before creating this
new plot (e.g. for multi-panel plots)?
See Also
--------
calc_photon_flux : Integrate the unconvolved source model over a pass band.
calc_energy_flux : Integrate the unconvolved source model over a pass band.
covar : Estimate the confidence intervals using the confidence method.
get_energy_flux_hist : Return the data displayed by plot_energy_flux.
get_photon_flux_hist : Return the data displayed by plot_photon_flux.
plot_cdf : Plot the cumulative density function of an array.
plot_pdf : Plot the probability density function of an array.
plot_photon_flux : Display the photon flux distribution.
plot_trace : Create a trace plot of row number versus value.
sample_energy_flux : Return the energy flux distribution of a model.
sample_flux : Return the flux distribution of a model.
sample_photon_flux : Return the photon flux distribution of a model.
Examples
--------
Plot the energy flux distribution for the range 0.5 to 7 for
the default data set:
>>> plot_energy_flux(0.5, 7, num=1000)
Overplot the 0.5 to 2 energy flux distribution from the "core"
data set on top of the values from the "jet" data set:
>>> plot_energy_flux(0.5, 2, id="jet", num=1000)
>>> plot_energy_flux(0.5, 2, id="core", num=1000, overplot=True)
Overplot the flux distribution for just the pl component (which
must be part of the source expression) on top of the full model.
If the full model was xsphabs.gal * powlaw1d.pl then this will
compare the unabsorbed to absorbed flux distributions:
>>> plot_energy_flux(0.5, 2, num=1000, bins=20)
>>> plot_energy_flux(0.5, 2, model=pl, num=1000, bins=20)
If you have multiple datasets loaded, each with a model, then
all datasets will be used to calculate the errors when the
id parameter is not set. A single dataset can be used by
specifying a dataset (in this example the overplot is just with
dataset 1):
>>> mdl = xsphabs.gal * xsapec.src
>>> set_source(1, mdl)
>>> set_source(2, mdl)
...
>>> plot_energy_flux(0.5, 2, model=src num=1000, bins=20)
>>> plot_energy_flux(0.5, 2, model=src num=1000, bins=20,
... id=1, overplot=True)
If you have multiple datasets then you can use the otherids
argument to specify exactly what set of data is used:
>>> plot_energy_flux(0.5, 2, model=src num=1000, bins=20,
... id=1, otherids=(2, 3, 4))
"""
efplot = self.get_energy_flux_hist(lo=lo, hi=hi, id=id, num=num, bins=bins,
correlated=correlated, numcores=numcores,
bkg_id=bkg_id, scales=scales, model=model,
otherids=otherids, clip=clip, recalc=recalc)
self._plot(efplot, overplot=overplot, clearwindow=clearwindow,
**kwargs)
def plot_photon_flux(self, lo=None, hi=None, id=None, num=7500, bins=75,
correlated=False, numcores=None, bkg_id=None,
scales=None, model=None, otherids=(),
recalc=True, clip='hard',
overplot=False, clearwindow=True,
**kwargs):
"""Display the photon flux distribution.
For each iteration, draw the parameter values of the model
from a normal distribution, evaluate the model, and sum the
model over the given range (the flux). Plot up the
distribution of this flux. The units for the flux are as
returned by `calc_photon_flux`. The `sample_photon_flux` and
`get_photon_flux_hist` functions return the data used to
create this plot.
.. versionchanged:: 4.12.2
The scales parameter is no longer ignored when set and the
model and otherids parameters have been added. The clip
argument has been added.
Parameters
----------
lo : number, optional
The lower limit to use when summing up the signal. If not
given then the lower value of the data grid is used.
hi : optional
The upper limit to use when summing up the signal. If not
given then the upper value of the data grid is used.
id : int or string, optional
The identifier of the data set to use. If `None`, the
default value, then all datasets with associated models are
used to calculate the errors and the model evaluation is
done using the default dataset.
num : int, optional
The number of samples to create. The default is 7500.
bins : int, optional
The number of bins to use for the histogram.
correlated : bool, optional
If ``True`` (the default is ``False``) then ``scales`` is the
full covariance matrix, otherwise it is just a 1D array
containing the variances of the parameters (the diagonal
elements of the covariance matrix).
numcores : optional
The number of CPU cores to use. The default is to use all
the cores on the machine.
bkg_id : int or string, optional
The identifier of the background component to use. This
should only be set when the line to be measured is in the
background model.
scales : array, optional
The scales used to define the normal distributions for the
parameters. The size and shape of the array depends on the
number of free parameters in the fit (n) and the value of
the `correlated` parameter. When the parameter is `True`,
scales must be given the covariance matrix for the free
parameters (a n by n matrix that matches the parameter
ordering used by Sherpa). For un-correlated parameters
the covariance matrix can be used, or a one-dimensional
array of n elements can be used, giving the width (specified
as the sigma value of a normal distribution) for each
parameter (e.g. the square root of the diagonal elements
of the covariance matrix). If the scales parameter is not
given then the covariance matrix is evaluated for the
current model and best-fit parameters.
model : model, optional
The model to integrate. If left as `None` then the source
model for the dataset will be used. This can be used to
calculate the unabsorbed flux, as shown in the examples.
The model must be part of the source expression.
otherids : sequence of integer and string ids, optional
The list of other datasets that should be included when
calculating the errors to draw values from.
recalc : bool, optional
If ``True``, the default, then re-calculate the values rather
than use the values from the last time the function was
run.
clip : {'hard', 'soft', 'none'}, optional
What clipping strategy should be applied to the sampled
parameters. The default ('hard') is to fix values at their
hard limits if they exceed them. A value of 'soft' uses
the soft limits instead, and 'none' applies no
clipping.
overplot : bool, optional
If ``True`` then add the data to an existing plot, otherwise
create a new plot. The default is ``False``.
clearwindow : bool, optional
Should the existing plot area be cleared before creating this
new plot (e.g. for multi-panel plots)?
See Also
--------
calc_photon_flux : Integrate the unconvolved source model over a pass band.
calc_energy_flux : Integrate the unconvolved source model over a pass band.
covar : Estimate the confidence intervals using the confidence method.
get_energy_flux_hist : Return the data displayed by plot_energy_flux.
get_photon_flux_hist : Return the data displayed by plot_photon_flux.
plot_cdf : Plot the cumulative density function of an array.
plot_pdf : Plot the probability density function of an array.
plot_energy_flux : Display the energy flux distribution.
plot_trace : Create a trace plot of row number versus value.
sample_energy_flux : Return the energy flux distribution of a model.
sample_flux : Return the flux distribution of a model.
sample_photon_flux : Return the photon flux distribution of a model.
Examples
--------
Plot the photon flux distribution for the range 0.5 to 7 for
the default data set:
>>> plot_photon_flux(0.5, 7, num=1000)
Overplot the 0.5 to 2 photon flux distribution from the "core"
data set on top of the values from the "jet" data set:
>>> plot_photon_flux(0.5, 2, id="jet", num=1000)
>>> plot_photon_flux(0.5, 2, id="core", num=1000, overplot=True)
Overplot the flux distribution for just the pl component (which
must be part of the source expression) on top of the full model.
If the full model was xsphabs.gal * powlaw1d.pl then this will
compare the unabsorbed to absorbed flux distributions:
>>> plot_photon_flux(0.5, 2, num=1000, bins=20)
>>> plot_photon_flux(0.5, 2, model=pl, num=1000, bins=20)
If you have multiple datasets loaded, each with a model, then
all datasets will be used to calculate the errors when the
id parameter is not set. A single dataset can be used by
specifying a dataset (in this example the overplot is just with
dataset 1):
>>> mdl = xsphabs.gal * xsapec.src
>>> set_source(1, mdl)
>>> set_source(2, mdl)
...
>>> plot_photon_flux(0.5, 2, model=src num=1000, bins=20)
>>> plot_photon_flux(0.5, 2, model=src num=1000, bins=20,
... id=1, overplot=True)
If you have multiple datasets then you can use the otherids
argument to specify exactly what set of data is used:
>>> plot_photon_flux(0.5, 2, model=src num=1000, bins=20,
... id=1, otherids=(2, 3, 4))
"""
pfplot = self.get_photon_flux_hist(lo=lo, hi=hi, id=id, num=num, bins=bins,
correlated=correlated, numcores=numcores,
bkg_id=bkg_id, scales=scales, model=model,
otherids=otherids, clip=clip, recalc=recalc)
self._plot(pfplot, overplot=overplot, clearwindow=clearwindow,
**kwargs)
def _bkg_jointplot2(self, plot1, plot2, overplot=False,
clearwindow=True, **kwargs):
"""Create a joint plot for bkg, vertically aligned, fit data on the top.
Parameters
----------
plot1 : sherpa.plot.Plot instance
The plot to appear in the top panel.
plot2 : sherpa.plot.Plot instance
The plot to appear in the bottom panel.
overplot : bool, optional
If ``True`` then add the data to an existing plot, otherwise
create a new plot. The default is ``False``.
clearwindow : bool, optional
Should the existing plot area be cleared before creating this
new plot (e.g. for multi-panel plots)?
"""
self._jointplot.reset()
try:
sherpa.plot.begin()
self._jointplot.plottop(plot1, overplot=overplot,
clearwindow=clearwindow, **kwargs)
# Unlike the plot version we can assume we are dealing
# with histogram plots here.
#
oldval = plot2.plot_prefs['xlog']
dprefs = plot1.dataplot.histo_prefs
mprefs = plot1.modelplot.histo_prefs
if dprefs['xlog'] or mprefs['xlog']:
plot2.plot_prefs['xlog'] = True
self._jointplot.plotbot(plot2, overplot=overplot, **kwargs)
plot2.plot_prefs['xlog'] = oldval
except:
sherpa.plot.exceptions()
raise
else:
sherpa.plot.end()
def plot_bkg_fit_ratio(self, id=None, bkg_id=None, replot=False,
overplot=False, clearwindow=True, **kwargs):
"""Plot the fit results, and the data/model ratio, for the background of
a PHA data set.
This creates two plots - the first from `plot_bkg_fit` and the
second from `plot_bkg_ratio` - for a data set.
.. versionchanged:: 4.12.2
The ``overplot`` option now works.
.. versionadded:: 4.12.0
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
bkg_id : int or str, optional
Identify the background component to use, if there are
multiple ones associated with the data set.
replot : bool, optional
Set to ``True`` to use the values calculated by the last
call to `plot_bkg_fit_ratio`. The default is ``False``.
overplot : bool, optional
If ``True`` then add the data to an existing plot, otherwise
create a new plot. The default is ``False``.
clearwindow : bool, optional
Should the existing plot area be cleared before creating this
new plot (e.g. for multi-panel plots)?
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
sherpa.utils.err.IdentifierErr
If the ``bkg_id`` parameter is invalid.
sherpa.utils.err.ModelErr
If no model expression has been created for the background
data.
See Also
--------
get_bkg_fit_plot : Return the data used by plot_bkg_fit.
get_bkg_resid_plot : Return the data used by plot_bkg_resid.
plot : Create one or more plot types.
plot_bkg : Plot the background values for a PHA data set.
plot_bkg_model : Plot the model for the background of a PHA data set.
plot_bkg_fit : Plot the fit results (data, model) for the background of a PHA data set.
plot_bkg_fit_delchi : Plot the fit results, and the residuals, for the background of a PHA data set.
plot_bkg_fit_resid : Plot the fit results, and the residuals, for the background of a PHA data set.
plot_fit : Plot the fit results (data, model) for a data set.
plot_fit_resid : Plot the fit results, and the residuals, for a data set.
set_analysis : Set the units used when fitting and displaying spectral data.
Notes
-----
For the residual plot, the ylog setting is ignored, and the Y axis
is drawn using a linear scale.
Examples
--------
Plot the background fit and the ratio of the background to
this fit for the default data set:
>>> plot_bkg_fit_ratio()
"""
plot1obj = self.get_bkg_fit_plot(id, bkg_id, recalc=not replot)
plot2obj = self.get_bkg_ratio_plot(id, bkg_id, recalc=not replot)
self._bkg_jointplot2(plot1obj, plot2obj,
overplot=overplot, clearwindow=clearwindow,
**kwargs)
def plot_bkg_fit_resid(self, id=None, bkg_id=None, replot=False,
overplot=False, clearwindow=True, **kwargs):
"""Plot the fit results, and the residuals, for the background of
a PHA data set.
This creates two plots - the first from `plot_bkg_fit` and the
second from `plot_bkg_resid` - for a data set.
.. versionchanged:: 4.12.2
The ``overplot`` option now works.
.. versionchanged:: 4.12.0
The Y axis of the residual plot is now always drawn using a
linear scale.
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
bkg_id : int or str, optional
Identify the background component to use, if there are
multiple ones associated with the data set.
replot : bool, optional
Set to ``True`` to use the values calculated by the last
call to `plot_bkg_fit_resid`. The default is ``False``.
overplot : bool, optional
If ``True`` then add the data to an existing plot, otherwise
create a new plot. The default is ``False``.
clearwindow : bool, optional
Should the existing plot area be cleared before creating this
new plot (e.g. for multi-panel plots)?
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
sherpa.utils.err.IdentifierErr
If the ``bkg_id`` parameter is invalid.
sherpa.utils.err.ModelErr
If no model expression has been created for the background
data.
See Also
--------
get_bkg_fit_plot : Return the data used by plot_bkg_fit.
get_bkg_resid_plot : Return the data used by plot_bkg_resid.
plot : Create one or more plot types.
plot_bkg : Plot the background values for a PHA data set.
plot_bkg_model : Plot the model for the background of a PHA data set.
plot_bkg_fit : Plot the fit results (data, model) for the background of a PHA data set.
plot_bkg_fit_ratio : Plot the fit results, and the data/model ratio, for the background of a PHA data set.
plot_bkg_fit_delchi : Plot the fit results, and the residuals, for the background of a PHA data set.
plot_fit : Plot the fit results (data, model) for a data set.
plot_fit_resid : Plot the fit results, and the residuals, for a data set.
set_analysis : Set the units used when fitting and displaying spectral data.
Notes
-----
For the residual plot, the ylog setting is ignored, and the Y axis
is drawn using a linear scale.
Examples
--------
Plot the background fit and residuals to the default data set:
>>> plot_bkg_fit_resid()
"""
plot1obj = self.get_bkg_fit_plot(id, bkg_id, recalc=not replot)
plot2obj = self.get_bkg_resid_plot(id, bkg_id, recalc=not replot)
self._bkg_jointplot2(plot1obj, plot2obj,
overplot=overplot, clearwindow=clearwindow,
**kwargs)
def plot_bkg_fit_delchi(self, id=None, bkg_id=None, replot=False,
overplot=False, clearwindow=True, **kwargs):
"""Plot the fit results, and the residuals, for the background of
a PHA data set.
This creates two plots - the first from `plot_bkg_fit` and the
second from `plot_bkg_delchi` - for a data set.
.. versionchanged:: 4.12.2
The ``overplot`` option now works.
.. versionchanged:: 4.12.0
The Y axis of the residual plot is now always drawn using a
linear scale.
Parameters
----------
id : int or str, optional
The data set that provides the data. If not given then the
default identifier is used, as returned by `get_default_id`.
bkg_id : int or str, optional
Identify the background component to use, if there are
multiple ones associated with the data set.
replot : bool, optional
Set to ``True`` to use the values calculated by the last
call to `plot_bkg_fit_delchi`. The default is ``False``.
overplot : bool, optional
If ``True`` then add the data to an existing plot, otherwise
create a new plot. The default is ``False``.
clearwindow : bool, optional
Should the existing plot area be cleared before creating this
new plot (e.g. for multi-panel plots)?
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
sherpa.utils.err.IdentifierErr
If the ``bkg_id`` parameter is invalid.
sherpa.utils.err.ModelErr
If no model expression has been created for the background
data.
See Also
--------
get_bkg_fit_plot : Return the data used by plot_bkg_fit.
get_bkg_delchi_plot : Return the data used by plot_bkg_delchi.
plot : Create one or more plot types.
plot_bkg : Plot the background values for a PHA data set.
plot_bkg_model : Plot the model for the background of a PHA data set.
plot_bkg_fit : Plot the fit results (data, model) for the background of a PHA data set.
plot_bkg_fit_ratio : Plot the fit results, and the data/model ratio, for the background of a PHA data set.
plot_bkg_fit_resid : Plot the fit results, and the residuals, for the background of a PHA data set.
plot_fit : Plot the fit results (data, model) for a data set.
plot_fit_delchi : Plot the fit results, and the residuals, for a data set.
set_analysis : Set the units used when fitting and displaying spectral data.
Notes
-----
For the residual plot, the ylog setting is ignored, and the Y axis
is drawn using a linear scale.
Examples
--------
Plot the background fit and residuals (normalised by the
error) to the default data set:
>>> plot_bkg_fit_delchi()
"""
plot1obj = self.get_bkg_fit_plot(id, bkg_id, recalc=not replot)
plot2obj = self.get_bkg_delchi_plot(id, bkg_id, recalc=not replot)
self._bkg_jointplot2(plot1obj, plot2obj,
overplot=overplot, clearwindow=clearwindow,
**kwargs)
###########################################################################
# Analysis Functions
###########################################################################
def resample_data(self, id=None, niter=1000, seed=None):
"""Resample data with asymmetric error bars.
The function performs a parametric bootstrap assuming a skewed
normal distribution centered on the observed data point with
the variance given by the low and high measurement errors. The
function simulates niter realizations of the data and fits
each realization with the assumed model to obtain the best fit
parameters. The function returns the best fit parameters for
each realization, and displays the average and standard
deviation for each parameter.
.. versionadded:: 4.12.2
The samples and statistic keys were added to the return
value and the parameter values are returned as NumPy arrays
rather than as lists.
Parameters
----------
id : int or str, optional
The identifier of the data set to use.
niter : int, optional
The number of iterations to use. The default is ``1000``.
seed : int, optional
The seed for the random number generator. The default is ```None```.
Returns
-------
sampled : dict
The keys are statistic, which contains the best-fit
statistic value for each iteration, samples, which contains
the resampled data used in the fits as a niter by ndata
array, and the free parameters in the fit, containing a
NumPy array containing the fit parameter for each iteration
(of size niter).
See Also
--------
load_ascii_with_errors : Load an ASCII file with asymmetric errors as a data set.
Examples
--------
Account for of asymmetric errors when calculating parameter
uncertainties:
>>> load_ascii_with_errors(1, 'test.dat')
>>> set_model(polynom1d.p0)
>>> thaw(p0.c1)
>>> fit()
Dataset = 1
Method = levmar
Statistic = leastsq
Initial fit statistic = 4322.56
Final fit statistic = 247.768 at function evaluation 6
Data points = 61
Degrees of freedom = 59
Change in statistic = 4074.79
p0.c0 3.2661 +/- 0.193009
p0.c1 2162.19 +/- 65.8445
>>> result = resample_data(1, niter=10)
p0.c0 : avg = 4.159973865314249 , std = 1.0575403309799554
p0.c1 : avg = 1943.5489865678633 , std = 268.64478808013547
>>> print(result['p0.c0'])
[5.856479033432613, 3.8252624107243465, ... 2.8704270612985345]
>>> print(result['p0.c1'])
[1510.049972062868, 1995.4742750432902, ... 2235.9753113309894]
Display the PDF of the parameter values of the p0.c0 component
from a run with 5000 iterations:
>>> sample = resample_data(1, 5000)
p0.c0 : avg = 3.966543284267264 , std = 0.9104639711036427
p0.c1 : avg = 1988.8417667057342 , std = 220.21903089622705
>>> plot_pdf(sample['p0.c0'], bins=40)
The samples used for the analysis are returned as the samples
key (as a 2D NumPy array of size number of iterations by
number of data points), that can be used if further analysis
is desired. In this case, the distribution of the first bin
is shown as a CDF:
>>> sample = resample_data(1, 5000)
>>> samples = sample['samples']
>>> plot_cdf(samples[:, 0])
"""
data = self.get_data(id)
model = self.get_model(id)
resampledata = sherpa.sim.ReSampleData(data, model)
return resampledata(niter=niter, seed=seed)
def sample_photon_flux(self, lo=None, hi=None, id=None, num=1,
scales=None, correlated=False,
numcores=None, bkg_id=None, model=None,
otherids=(), clip='hard'):
"""Return the photon flux distribution of a model.
For each iteration, draw the parameter values of the model
from a normal distribution, evaluate the model, and sum the
model over the given range (the flux). The return array
contains the flux and parameter values for each iteration.
The units for the flux are as returned by `calc_photon_flux`.
.. versionchanged:: 4.12.2
The model, otherids, and clip parameters were added and
the return value has an extra column.
Parameters
----------
lo : number, optional
The lower limit to use when summing up the signal. If not
given then the lower value of the data grid is used.
hi : optional
The upper limit to use when summing up the signal. If not
given then the upper value of the data grid is used.
id : int or string, optional
The identifier of the data set to use. If `None`, the
default value, then all datasets with associated models are
used to calculate the errors and the model evaluation is
done using the default dataset.
num : int, optional
The number of samples to create. The default is 1.
scales : array, optional
The scales used to define the normal distributions for the
parameters. The size and shape of the array depends on the
number of free parameters in the fit (n) and the value of
the `correlated` parameter. When the parameter is `True`,
scales must be given the covariance matrix for the free
parameters (a n by n matrix that matches the parameter
ordering used by Sherpa). For un-correlated parameters
the covariance matrix can be used, or a one-dimensional
array of n elements can be used, giving the width (specified
as the sigma value of a normal distribution) for each
parameter (e.g. the square root of the diagonal elements
of the covariance matrix). If the scales parameter is not
given then the covariance matrix is evaluated for the
current model and best-fit parameters.
correlated : bool, optional
Should the correlation between the parameters be included
when sampling the parameters? If not, then each parameter
is sampled from independent distributions. In both cases
a normal distribution is used.
numcores : optional
The number of CPU cores to use. The default is to use all
the cores on the machine.
bkg_id : int or string, optional
The identifier of the background component to use. This
should only be set when the line to be measured is in the
background model.
model : model, optional
The model to integrate. If left as `None` then the source
model for the dataset will be used. This can be used to
calculate the unabsorbed flux, as shown in the examples.
The model must be part of the source expression.
otherids : sequence of integer and string ids, optional
The list of other datasets that should be included when
calculating the errors to draw values from.
clip : {'hard', 'soft', 'none'}, optional
What clipping strategy should be applied to the sampled
parameters. The default ('hard') is to fix values at their
hard limits if they exceed them. A value of 'soft' uses
the soft limits instead, and 'none' applies no
clipping. The last column in the returned arrays indicates
if the row had any clipped parameters (even when clip is
set to 'none').
Returns
-------
vals
The return array has the shape ``(num, N+2)``, where ``N``
is the number of free parameters in the fit and num is the
`num` parameter. The rows of this array contain the flux
value, as calculated by `calc_photon_flux`, followed by the
values of the thawed parameters used for that iteration,
and then a flag column indicating if the parameters were
clipped (1) or not (0). The order of the parameters
matches the data returned by `get_fit_results`.
See Also
--------
calc_photon_flux : Integrate the unconvolved source model over a pass band.
calc_energy_flux : Integrate the unconvolved source model over a pass band.
covar : Estimate the confidence intervals using the confidence method.
plot_cdf : Plot the cumulative density function of an array.
plot_pdf : Plot the probability density function of an array.
plot_energy_flux : Display the energy flux distribution.
plot_photon_flux : Display the photon flux distribution.
plot_trace : Create a trace plot of row number versus value.
sample_energy_flux : Return the energy flux distribution of a model.
sample_flux : Return the flux distribution of a model.
Notes
-----
There are two ways to use this function to calculate fluxes
from multiple sources. The first is to leave the `id` argument
as `None`, in which case all available datasets will be used.
Alternatively, the `id` and `otherids` arguments can be set to
list the exact datasets to use, such as `id=1,
otherids=(2,3,4)`.
The returned value contains all free parameters in the fit,
even if they are not included in the model argument (e.g.
when calculating an unabsorbed flux).
Examples
--------
Calculate the photon flux distribution for the range 0.5 to 7,
and plot up the resulting flux distribution (as a cumulative
distribution):
>>> vals = sample_photon_flux(0.5, 7, num=1000)
>>> plot_cdf(vals[:, 0], name='flux')
Repeat the above, but allowing the parameters to be
correlated, and then calculate the 5, 50, and 95 percent
quantiles of the photon flux distribution:
>>> cvals = sample_photon_flux(0.5, 7, num=1000, correlated=True)
>>> np.percentile(cvals[:, 0], [5, 50, 95])
The photon flux of a component (or sub-set of components) can be
calculated using the model argument. For the following case,
an absorbed power-law was used to fit the data -
`xsphabs.gal * powerlaw.pl` - and then the flux of just the
power-law component is calculated. Note that the returned
array has columns 'flux', 'gal.nh', 'pl.gamma', and 'pl.ampl'
(that is flux and then the free parameters in the full model).
>>> vals = sample_photon_flux(0.5, 7, model=pl, num=1000, correlated=True)
Calculate the 2-10 keV flux for the pl model using a joint fit
to the datasets 1, 2, 3, and 4:
>>> vals = sample_photon_flux(2, 10, model=pl, id=1, otherids=(2,3,4),
... num=1000)
Use the given parameter errors for sampling the parameter distribution.
The fit must have three free parameters, and each parameter is
sampled independently (in this case parerrs gives the sigma
values for each parameter):
>>> parerrs = [0.25, 1.22, 1.04e-4]
>>> vals = sample_photon_flux(2, 10, num=5000, scales=parerrs)
In this case the parameter errors are taken from the covariance
analysis, using the `parmaxes` field since these are positive.
>>> covar()
>>> parerrs = get_covar_results().parmaxes
>>> vals = sample_photon_flux(0.5, 2, num=1000, scales=parerrs)
Run covariance to estimate the parameter errors and then
extract the covariance matrix from the results (as the `cmat`
variable). This matrix is then used to define the parameter
widths - including correlated terms - in the flux sampling,
after being increased by ten percent. This is used to
calculate both the absorbed (`vals1`) and unabsorbed (`vals2`)
fluxes. Both arrays have columns: flux, gal.nh, pl.gamma, and
pl.ampl.
>>> set_source(xsphabs.gal * powlaw1d.pl)
>>> fit()
>>> covar()
>>> cmat = get_covar_results().extra_output
>>> vals1 = sample_photon_flux(2, 10, num=5000, correlated=True,
... scales=1.1 * cmat)
>>> vals2 = sample_photon_flux(2, 10, num=5000, correlated=True,
... model=pl, scales=1.1 * cmat)
Calculate the flux and error distribution using fits
to all datasets:
>>> set_source(xsphabs.gal * xsapec.clus)
>>> set_source(2, gal * clus)
>>> set_source(3, gal * clus)
... fit the data
>>> vals = sample_photon_flux(0.5, 10, model=clus, num=10000)
Calculate the flux and error distribution using fits
to an explicit set of datasets (in this case datasets
1 and 2):
>>> vals = sample_photon_flux(0.5, 10, id=1, otherids=[2],
... model=clus, num=10000)
Generate two sets of parameter values, where the parameter
values in v1 are generated from a random distribution and then
clipped to the hard limits of the parameters, and the values
in v2 use the soft limits of the parameters. The last column
in both v1 and v2 indicates whether the row had any clipped
parameters. The flux1_filt and flux2_filt arrays indicate the
photon-flux distribution after it has been filtered to remove
any row with clipped parameters:
>>> v1 = sample_photon_flux(0.5, 2, num=1000)
>>> v2 = sample_photon_flux(0.5, 2, num=1000, clip='soft')
>>> flux1 = v1[:, 0]
>>> flux2 = v2[:, 0]
>>> flux1_filt = flux1[v1[:, -1] == 0]
>>> flux2_filt = flux2[v2[:, -1] == 0]
"""
_, fit = self._get_fit(id, otherids=otherids)
if bkg_id is None:
data = self.get_data(id)
if model is None:
model = self.get_source(id)
else:
data = self.get_bkg(id, bkg_id)
if model is None:
model = self.get_bkg_source(id, bkg_id)
correlated = sherpa.utils.bool_cast(correlated)
return sherpa.astro.flux.sample_flux(fit, data, model,
method=sherpa.astro.utils.calc_photon_flux,
correlated=correlated,
num=num, lo=lo, hi=hi,
numcores=numcores,
samples=scales, clip=clip)
def sample_energy_flux(self, lo=None, hi=None, id=None, num=1,
scales=None, correlated=False,
numcores=None, bkg_id=None, model=None,
otherids=(), clip='hard'):
"""Return the energy flux distribution of a model.
For each iteration, draw the parameter values of the model
from a normal distribution, evaluate the model, and sum the
model over the given range (the flux). The return array
contains the flux and parameter values for each iteration.
The units for the flux are as returned by `calc_energy_flux`.
.. versionchanged:: 4.12.2
The model, otherids, and clip parameters were added and
the return value has an extra column.
Parameters
----------
lo : number, optional
The lower limit to use when summing up the signal. If not
given then the lower value of the data grid is used.
hi : optional
The upper limit to use when summing up the signal. If not
given then the upper value of the data grid is used.
id : int or string, optional
The identifier of the data set to use. If `None`, the
default value, then all datasets with associated models are
used to calculate the errors and the model evaluation is
done using the default dataset.
num : int, optional
The number of samples to create. The default is 1.
scales : array, optional
The scales used to define the normal distributions for the
parameters. The size and shape of the array depends on the
number of free parameters in the fit (n) and the value of
the `correlated` parameter. When the parameter is `True`,
scales must be given the covariance matrix for the free
parameters (a n by n matrix that matches the parameter
ordering used by Sherpa). For un-correlated parameters
the covariance matrix can be used, or a one-dimensional
array of n elements can be used, giving the width (specified
as the sigma value of a normal distribution) for each
parameter (e.g. the square root of the diagonal elements
of the covariance matrix). If the scales parameter is not
given then the covariance matrix is evaluated for the
current model and best-fit parameters.
correlated : bool, optional
Should the correlation between the parameters be included
when sampling the parameters? If not, then each parameter
is sampled from independent distributions. In both cases
a normal distribution is used.
numcores : optional
The number of CPU cores to use. The default is to use all
the cores on the machine.
bkg_id : int or string, optional
The identifier of the background component to use. This
should only be set when the line to be measured is in the
background model.
model : model, optional
The model to integrate. If left as `None` then the source
model for the dataset will be used. This can be used to
calculate the unabsorbed flux, as shown in the examples.
The model must be part of the source expression.
otherids : sequence of integer and string ids, optional
The list of other datasets that should be included when
calculating the errors to draw values from.
clip : {'hard', 'soft', 'none'}, optional
What clipping strategy should be applied to the sampled
parameters. The default ('hard') is to fix values at their
hard limits if they exceed them. A value of 'soft' uses
the soft limits instead, and 'none' applies no
clipping. The last column in the returned arrays indicates
if the row had any clipped parameters (even when clip is
set to 'none').
Returns
-------
vals
The return array has the shape ``(num, N+2)``, where ``N``
is the number of free parameters in the fit and num is the
`num` parameter. The rows of this array contain the flux
value, as calculated by `calc_energy_flux`, followed by the
values of the thawed parameters used for that iteration,
and then a flag column indicating if the parameters were
clipped (1) or not (0). The order of the parameters
matches the data returned by `get_fit_results`.
See Also
--------
calc_photon_flux : Integrate the unconvolved source model over a pass band.
calc_energy_flux : Integrate the unconvolved source model over a pass band.
covar : Estimate the confidence intervals using the confidence method.
plot_cdf : Plot the cumulative density function of an array.
plot_pdf : Plot the probability density function of an array.
plot_energy_flux : Display the energy flux distribution.
plot_photon_flux : Display the photon flux distribution.
plot_trace : Create a trace plot of row number versus value.
sample_photon_flux : Return the flux distribution of a model.
sample_flux : Return the flux distribution of a model.
Notes
-----
There are two ways to use this function to calculate fluxes
from multiple sources. The first is to leave the `id` argument
as `None`, in which case all available datasets will be used.
Alternatively, the `id` and `otherids` arguments can be set to
list the exact datasets to use, such as `id=1,
otherids=(2,3,4)`.
The returned value contains all free parameters in the fit,
even if they are not included in the model argument (e.g.
when calculating an unabsorbed flux).
Examples
--------
Calculate the energy flux distribution for the range 0.5 to 7,
and plot up the resulting flux distribution (as a cumulative
distribution):
>>> vals = sample_energy_flux(0.5, 7, num=1000)
>>> plot_cdf(vals[:, 0], name='flux')
Repeat the above, but allowing the parameters to be
correlated, and then calculate the 5, 50, and 95 percent
quantiles of the energy flux distribution:
>>> cvals = sample_energy_flux(0.5, 7, num=1000, correlated=True)
>>> np.percentile(cvals[:, 0], [5, 50, 95])
The energy flux of a component (or sub-set of components) can be
calculated using the model argument. For the following case,
an absorbed power-law was used to fit the data -
`xsphabs.gal * powerlaw.pl` - and then the flux of just the
power-law component is calculated. Note that the returned
array has columns 'flux', 'gal.nh', 'pl.gamma', and 'pl.ampl'
(that is flux and then the free parameters in the full model).
>>> vals = sample_energy_flux(0.5, 7, model=pl, num=1000, correlated=True)
Calculate the 2-10 keV flux for the pl model using a joint fit
to the datasets 1, 2, 3, and 4:
>>> vals = sample_energy_flux(2, 10, model=pl, id=1, otherids=(2,3,4),
... num=1000)
Use the given parameter errors for sampling the parameter distribution.
The fit must have three free parameters, and each parameter is
sampled independently (in this case parerrs gives the sigma
values for each parameter):
>>> parerrs = [0.25, 1.22, 1.04e-4]
>>> vals = sample_energy_flux(2, 10, num=5000, scales=parerrs)
In this case the parameter errors are taken from the covariance
analysis, using the `parmaxes` field since these are positive.
>>> covar()
>>> parerrs = get_covar_results().parmaxes
>>> vals = sample_energy_flux(0.5, 2, num=1000, scales=parerrs)
Run covariance to estimate the parameter errors and then
extract the covariance matrix from the results (as the `cmat`
variable). This matrix is then used to define the parameter
widths - including correlated terms - in the flux sampling,
after being increased by ten percent. This is used to
calculate both the absorbed (`vals1`) and unabsorbed (`vals2`)
fluxes. Both arrays have columns: flux, gal.nh, pl.gamma, and
pl.ampl.
>>> set_source(xsphabs.gal * powlaw1d.pl)
>>> fit()
>>> covar()
>>> cmat = get_covar_results().extra_output
>>> vals1 = sample_energy_flux(2, 10, num=5000, correlated=True,
... scales=1.1 * cmat)
>>> vals2 = sample_energy_flux(2, 10, num=5000, correlated=True,
... model=pl, scales=1.1 * cmat)
Calculate the flux and error distribution using fits
to all datasets:
>>> set_source(xsphabs.gal * xsapec.clus)
>>> set_source(2, gal * clus)
>>> set_source(3, gal * clus)
... fit the data
>>> vals = sample_energy_flux(0.5, 10, model=clus, num=10000)
Calculate the flux and error distribution using fits
to an explicit set of datasets (in this case datasets
1 and 2):
>>> vals = sample_energy_flux(0.5, 10, id=1, otherids=[2],
... model=clus, num=10000)
Generate two sets of parameter values, where the parameter
values in v1 are generated from a random distribution and then
clipped to the hard limits of the parameters, and the values
in v2 use the soft limits of the parameters. The last column
in both v1 and v2 indicates whether the row had any clipped
parameters. The flux1_filt and flux2_filt arrays indicate the
energy-flux distribution after it has been filtered to remove
any row with clipped parameters:
>>> v1 = sample_energy_flux(0.5, 2, num=1000)
>>> v2 = sample_energy_flux(0.5, 2, num=1000, clip='soft')
>>> flux1 = v1[:, 0]
>>> flux2 = v2[:, 0]
>>> flux1_filt = flux1[v1[:, -1] == 0]
>>> flux2_filt = flux2[v2[:, -1] == 0]
"""
_, fit = self._get_fit(id, otherids=otherids)
if bkg_id is None:
data = self.get_data(id)
if model is None:
model = self.get_source(id)
else:
data = self.get_bkg(id, bkg_id)
if model is None:
model = self.get_bkg_source(id, bkg_id)
correlated = sherpa.utils.bool_cast(correlated)
return sherpa.astro.flux.sample_flux(fit, data, model,
method=sherpa.astro.utils.calc_energy_flux,
correlated=correlated,
num=num, lo=lo, hi=hi,
numcores=numcores,
samples=scales, clip=clip)
def sample_flux(self, modelcomponent=None, lo=None, hi=None, id=None,
num=1, scales=None, correlated=False,
numcores=None, bkg_id=None, Xrays=True, confidence=68):
"""Return the flux distribution of a model.
For each iteration, draw the parameter values of the model
from a normal distribution, filter out samples that lie
outside the soft limits of the parameters, evaluate the model,
and sum the model over the given range (the flux). Return the
parameter values used, together with the median, upper, and
lower quantiles of the flux distribution.
.. versionchanged:: 4.13.1
The `id` parameter is now used if set (previously the
default dataset was always used). The screen output is now
controlled by the Sherpa logging setup. The flux
calculation no-longer excludes samples at the parameter
soft limits, as this could cause an over-estimation of the
flux when a parameter is only an upper limit. The statistic
value is now returned for each row, even those that were
excluded from the flux calculation. The last-but-one column
of the returned `vals` array now records the rows that were
excluded from the flux calculation.
Parameters
----------
modelcomponent : optional
The model to use. It can be a single component or
a combination. If not given, then the full source
expression for the data set is used.
lo : number, optional
The lower limit to use when summing up the signal. If not
given then the lower value of the data grid is used.
hi : optional
The upper limit to use when summing up the signal. If not
given then the upper value of the data grid is used.
id : int or string, optional
The identifier of the data set to use. The default value
(``None``) means that the default identifier, as returned by
`get_default_id`, is used.
num : int, optional
The number of samples to create. The default is 1.
scales : array, optional
The scales used to define the normal distributions for the
parameters. The form depends on the `correlated`
parameter: when ``True``, the array should be a symmetric
positive semi-definite (N, N) array, otherwise a 1D array
of length N, where N is the number of free parameters.
correlated : bool, optional
If ``True`` (the default is ``False``) then `scales` is the
full covariance matrix, otherwise it is just a 1D array
containing the variances of the parameters (the diagonal
elements of the covariance matrix).
numcores : optional
The number of CPU cores to use. The default is to use all
the cores on the machine.
bkg_id : int or string, optional
The identifier of the background component to use. This
should only be set when the line to be measured is in the
background model.
Xrays : bool, optional
When ``True`` (the default), assume that the model has
units of photon/cm^2/s, and use `calc_energy_flux`
to convert to erg/cm^2/s. This should not be changed from
the default value.
confidence : number, optional
The confidence level for the upper and lower values, as a
percentage (0 to 100). The default is 68, so as to return
the one-sigma range.
Returns
-------
(fullflux, cptflux, vals)
The fullflux and cptflux arrays contain the results for the
full source model and the flux of the `modelcomponent`
argument (they can be the same). They have three elements
and give the median value, the value containing 100 -
confidence/2 of the data, and the fraction containing
confidence/2 of the flux distribution. For the default
confidence argument of 68 this means the last two give the
one-sigma upper and lower bounds. The vals array has a
shape of ``(num+1, N+3)``, where ``N`` is the number of
free parameters and num is the `num` parameter. The rows of
this array contain the flux value for the iteration (for
the full source model), the parameter values, a flag
indicating whether any parameter in that row was clipped
(and so was excluded from the flux calculation), and the
statistic value for this set of parameters.
See Also
--------
calc_photon_flux : Integrate the unconvolved source model over a pass band.
calc_energy_flux : Integrate the unconvolved source model over a pass band.
covar : Estimate the confidence intervals using the confidence method.
plot_energy_flux : Display the energy flux distribution.
plot_photon_flux : Display the photon flux distribution.
sample_energy_flux : Return the energy flux distribution of a model.
sample_photon_flux : Return the photon flux distribution of a model.
Notes
-----
Setting the Xrays parameter to False is currently unsupported.
The summary output displayed by this routine - giving the
median and confidence ranges - is controlled by the standard
Sherpa logging instance, and can be hidden by changing the
logging to a level greater than "INFO" (e.g. with
`sherpa.utils.logging.SherpaVerbosity`).
This routine can not be used if you have used set_full_model:
the calc_energy_flux routine should be used instead.
Examples
--------
Estimate the flux distribution for the "src" component using
the default data set. The parameters are assumed to be
uncorrelated.
>>> set_source(xsphabs.gal * xsapec.src)
>>> fit()
>>> fflux, cflux, vals = sample_flux(src, 0.5, 2, num=1000)
original model flux = 2.88993e-14, + 1.92575e-15, - 1.81963e-15
model component flux = 7.96865e-14, + 4.65144e-15, - 4.41222e-15
>>> f0, fhi, flo = cflux
>>> print("Flux: {:.2e} {:+.2e} {:+.2e}".format(f0, fhi-f0, flo-f0))
Flux: 7.97e-14 +4.65e-15 -4.41e-15
This time the parameters are assumed to be correlated, using
the covariance matrix for the fit:
>>> ans = sample_flux(src, 0.5, 2, num=1000, correlated=True)
Explicitly send in the parameter widths (sigma values), using
the estimates generated by `covar`:
>>> covar()
>>> errs = get_covar_results().parmaxes
>>> ans = sample_flux(correlated=False, scales=errs, num=500)
Explicitly send in a covariance matrix:
>>> cmatrix = get_covar_results().extra_output
>>> ans = sample_flux(correlated=True, scales=cmatrix, num=500)
Run sample_flux after changing the logging level, so that the
screen output from sample_flux is not displayed. We use the
SherpaVerbosity function from `sherpa.utils.logging` to
only change the logging level while runnng sample_flux:
>>> from sherpa.utils.logging import SherpaVerbosity
>>> with SherpaVerbosity('WARN'):
... ans = sample_flux(num=1000, lo=0.5, hi=7)
"""
if (confidence <= 0) or (confidence > 100):
raise ArgumentErr('bad', 'confidence', 'must be > 0 and <= 100')
if not Xrays:
raise NotImplementedError("sample_flux(Xrays=False) is currently unsupported")
_, fit = self._get_fit(id)
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
else:
data = self.get_data(id)
if (modelcomponent is not None) and \
not isinstance(modelcomponent, sherpa.models.model.Model):
raise ArgumentTypeErr('badarg', 'modelcomponent', 'a model')
# We can not have a "full model" expression so error-out nicely here.
# Thanks to _get_fit we know we have a model so any error below can
# not be because no model is set but must be a "need full model"
# error. TODO: should we have a nicer way to determine this?
# Also note that it appears we have different ways the two code
# paths can error out
#
if bkg_id is not None:
try:
self.get_bkg_source(id, bkg_id)
except (IdentifierErr, ModelErr):
# At present ModelErr is thrown but keep in IdentifierErr
# just in case
raise IdentifierErr('Please use calc_energy_flux as set_bkg_full_model was used') from None
else:
try:
self.get_source(id)
except IdentifierErr:
raise IdentifierErr('Please use calc_energy_flux as set_full_model was used') from None
correlated = sherpa.utils.bool_cast(correlated)
# Why is this +1? The original comment was
# "num+1 cause sample energy flux is under-reporting its result?"
#
niter = num + 1
if not Xrays:
# NOTE: calc_energy_flux only returns a scalar, which means you can
# not pass the results to calc_sample_flux, so this code
# is currently broken
#
samples = self.calc_energy_flux(lo=lo, hi=hi, id=id,
bkg_id=bkg_id)
else:
# NOTE: the samples are drawn from the full model expression
# as this is how it was originally written
#
samples = self.sample_energy_flux(lo=lo, hi=hi, id=id, num=niter,
scales=scales, clip='soft',
correlated=correlated,
numcores=numcores,
bkg_id=bkg_id)
return sherpa.astro.flux.calc_sample_flux(lo=lo, hi=hi,
fit=fit, data=data, samples=samples,
modelcomponent=modelcomponent,
confidence=confidence)
def eqwidth(self, src, combo, id=None, lo=None, hi=None, bkg_id=None,
error=False, params=None, otherids=(), niter=1000,
covar_matrix=None):
"""Calculate the equivalent width of an emission or absorption line.
The equivalent width [1]_ is calculated in the selected units
for the data set (which can be retrieved with `get_analysis`).
.. versionchanged:: 4.10.1
The `error` parameter was added which controls whether the
return value is a scalar (the calculated equivalent width),
when set to `False`, or the median value, error limits, and
ancillary values.
Parameters
----------
src
The continuum model (this may contain multiple components).
combo
The continuum plus line (absorption or emission) model.
lo : optional
The lower limit for the calculation (the units are set by
`set_analysis` for the data set). The default value (``None``)
means that the lower range of the data set is used.
hi : optional
The upper limit for the calculation (the units are set by
`set_analysis` for the data set). The default value (``None``)
means that the upper range of the data set is used.
id : int or string, optional
The data set that provides the data. If not given then
all data sets with an associated model are used simultaneously.
bkg_id : int or string, optional
The identifier of the background component to use. This
should only be set when the line to be measured is in the
background model.
error : bool, optional
The parameter indicates whether the errors are to be calculated
or not. The default value is False
params : 2D array, optional
The default is None, in which case get_draws shall be called.
The user can input the parameter array (e.g. from running
`sample_flux`).
otherids : sequence of integer or strings, optional
Other data sets to use in the calculation.
niter : int, optional
The number of draws to use. The default is ``1000``.
covar_matrix : 2D array, optional
The covariance matrix to use. If ``None`` then the
result from `get_covar_results().extra_output` is used.
Returns
-------
retval
If ``error`` is ``False``, then returns the equivalent width,
otherwise the median, 1 sigma lower bound, 1 sigma upper
bound, the parameters array, and the array of the equivalent
width values used to determine the errors.
See Also
--------
calc_model_sum : Sum up the fitted model over a pass band.
calc_source_sum : Calculate the un-convolved model signal.
get_default_id : Return the default data set identifier.
set_model : Set the source model expression.
References
----------
.. [1] http://en.wikipedia.org/wiki/Equivalent_width
Examples
--------
Set a source model (a powerlaw for the continuum and a
gaussian for the line), fit it, and then evaluate the
equivalent width of the line. The example assumes that
this is a PHA data set, with an associated response,
so that the analysis can be done in wavelength units.
>>> set_source(powlaw1d.cont + gauss1d.line)
>>> set_analysis('wavelength')
>>> fit()
>>> eqwidth(cont, cont+line)
2.1001988282497308
The calculation is restricted to the range 20 to 20
Angstroms.
>>> eqwidth(cont, cont+line, lo=20, hi=24)
1.9882824973082310
The calculation is done for the background model of
data set 2, over the range 0.5 to 2 (the units of this
are whatever the analysis setting for this data set id).
>>> set_bkg_source(2, const1d.flat + gauss1d.bline)
>>> eqwidth(flat, flat+bline, id=2, bkg_id=1, lo=0.5, hi=2)
0.45494599793003426
With the `error` flag set to `True`, the return value is
enhanced with extra information, such as the median and
one-sigma ranges on the equivalent width::
>>> res = eqwidth(p1, p1 + g1, error=True)
>>> ewidth = res[0] # the median equivalent width
>>> errlo = res[1] # the one-sigma lower limit
>>> errhi = res[2] # the one-sigma upper limit
>>> pars = res[3] # the parameter values used
>>> ews = res[4] # array of eq. width values
which can be used to display the probability density or
cumulative distribution function of the equivalent widths::
>>> plot_pdf(ews)
>>> plot_cdf(ews)
"""
data = self.get_data(id)
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
####################################################
if error:
def is_numpy_ndarray(arg, name, npars, dim1=None):
if not isinstance(arg, numpy.ndarray):
msg = name + ' must be of type numpy.ndarray'
raise IOErr(msg)
shape = arg.shape
if len(shape) != 2:
msg = name + ' must be 2d numpy.ndarray'
raise IOErr(msg)
if shape[0] != npars:
msg = name + ' must be of dimension (%d, x)' % npars
raise IOErr(msg)
if dim1 is not None:
if shape[1] != npars:
msg = name + ' must be of dimension (%d, %d)' % \
(npars, npars)
raise IOErr(msg)
_, fit = self._get_fit(id)
fit_results = self.get_fit_results()
parnames = fit_results.parnames
npar = len(parnames)
orig_par_vals = numpy.array(fit_results.parvals)
if params is None:
# run get_draws or normal distribution depending on fit stat
if covar_matrix is None:
try:
# check just in case usr has run covar()
covar_results = self.get_covar_results()
covar_matrix = covar_results.extra_output
except sherpa.utils.err.SessionErr:
# usr has not run covar, will have to run it
covar_matrix = fit.est_errors().extra_output
is_numpy_ndarray(covar_matrix, 'covar_matrix', npar, npar)
# Have enough stuff to generate samples
if isinstance(self._current_stat, (Cash, CStat, WStat)):
_, _, params = \
self.get_draws(id, otherids=otherids, niter=niter,
covar_matrix=covar_matrix)
else:
sampler = NormalParameterSampleFromScaleMatrix()
tmp = sampler.get_sample(fit, covar_matrix, niter + 1)
params = tmp.transpose()
else:
is_numpy_ndarray(params, 'params', npar)
mins = fit.model._get_thawed_par_mins()
maxs = fit.model._get_thawed_par_maxes()
eqw = numpy.zeros_like(params[0, :])
for params_index in range(len(params[0, :])):
for parnames_index, parname in enumerate(parnames):
val = params[parnames_index, params_index]
# Note: the normal dist does not respect the soft limits
mymin = mins[parnames_index]
mymax = maxs[parnames_index]
val = max(mymin, min(val, mymax))
self.set_par(parname, val)
eqw[params_index] = \
sherpa.astro.utils.eqwidth(data, src, combo, lo, hi)
median, lower, upper = sherpa.utils.get_error_estimates(eqw)
fit.model.thawedpars = orig_par_vals
return median, lower, upper, params, eqw
####################################################
return sherpa.astro.utils.eqwidth(data, src, combo, lo, hi)
def calc_photon_flux(self, lo=None, hi=None, id=None, bkg_id=None,
model=None):
"""Integrate the unconvolved source model over a pass band.
Calculate the integral of S(E) over a pass band, where S(E) is
the spectral model evaluated for each bin (that is, the model
without any instrumental responses applied to it).
.. versionchanged:: 4.12.1
The model parameter was added.
Parameters
----------
lo, hi : number, optional
If both are None or both are set then calculate the flux
over the given band. If only one is set then calculate
the flux density at that point. The units for `lo` and `hi`
are given by the current analysis setting.
id : int or str, optional
Use the source expression associated with this data set. If
not given then the default identifier is used, as returned
by `get_default_id`.
bkg_id : int or str, optional
If set, use the model associated with the given background
component rather than the source model.
model : model, optional
The model to integrate. If left as `None` then the source
model for the dataset will be used. This can be used to
calculate the unabsorbed flux, as shown in the examples.
Returns
-------
flux : number
The flux or flux density. For X-Spec style models the
flux units will be photon/cm^2/s and the flux density units
will be either photon/cm^2/s/keV or photon/cm^2/s/Angstrom,
depending on the analysis setting.
See Also
--------
calc_data_sum : Sum up the observed counts over a pass band.
calc_model_sum : Sum up the fitted model over a pass band.
calc_energy_flux : Integrate the unconvolved source model over a pass band.
calc_source_sum: Sum up the source model over a pass band.
set_analysis : Set the units used when fitting and displaying spectral data
set_model : Set the source model expression for a data set.
Notes
-----
The units of `lo` and `hi` are determined by the analysis
setting for the data set (e.g. `get_analysis`).
Any existing filter on the data set - e.g. as created by
`ignore` or `notice` - is ignored by this function.
The units of the answer depend on the model components used in
the source expression and the axis or axes of the data set.
It is unlikely to give sensible results for 2D data sets.
Examples
--------
Calculate the integral of the unconvolved model over the
full range of the default data set:
>>> calc_photon_flux()
Return the flux for the data set labelled "core":
>>> calc_photon_flux(id='core')
Calculate the photon flux over the ranges 0.5 to 2 and 0.5 to
7 keV, and compared to the energy fluxes for the same bands:
>>> set_analysis('energy')
>>> calc_photon_flux(0.5, 2)
0.35190275
>>> calc_photon_flux(0.5, 7)
0.49050927
>>> calc_energy_flux(0.5, 2)
5.7224906878061796e-10
>>> calc_energy_flux(0.5, 7)
1.3758131915063825e-09
Calculate the photon flux density at 0.5 keV for the source
"core":
>>> calc_photon_flux(0.5, id="core")
0.64978176
Calculate the flux for the model applied to the second background
component of the 'jet' data set, for the wavelength range 20 to 22
Angstroms:
>>> set_analysis('jet', 'wave')
>>> calc_photon_flux(20, 22, id='jet', bkg_id=2)
For the following example, the source model is an absorbed
powerlaw - `xsphabs.gal * powerlaw.pl` - so that the `fabs`
value represents the absorbed flux, and `funabs` the unabsorbed
flux (i.e. just the power-law component):
>>> fabs = calc_photon_flux(0.5, 7)
>>> funabs = calc_photon_flux(0.5, 7, model=pl)
"""
if bkg_id is None:
data = self.get_data(id)
else:
data = self.get_bkg(id, bkg_id)
if model is None:
if bkg_id is None:
model = self.get_source(id)
else:
model = self.get_bkg_source(id, bkg_id)
else:
_check_type(model, sherpa.models.Model, 'model',
'a model object')
return sherpa.astro.utils.calc_photon_flux(data, model, lo, hi)
def calc_energy_flux(self, lo=None, hi=None, id=None, bkg_id=None,
model=None):
"""Integrate the unconvolved source model over a pass band.
Calculate the integral of E * S(E) over a pass band, where E
is the energy of the bin and S(E) the spectral model evaluated
for that bin (that is, the model without any instrumental
responses applied to it).
.. versionchanged:: 4.12.1
The model parameter was added.
Parameters
----------
lo, hi : number, optional
If both are None or both are set then calculate the flux
over the given band. If only one is set then calculate
the flux density at that point. The units for `lo` and `hi`
are given by the current analysis setting.
id : int or str, optional
Use the source expression associated with this data set. If
not given then the default identifier is used, as returned
by ``get_default_id``.
bkg_id : int or str, optional
If set, use the model associated with the given background
component rather than the source model.
model : model, optional
The model to integrate. If left as `None` then the source
model for the dataset will be used. This can be used to
calculate the unabsorbed flux, as shown in the examples.
Returns
-------
flux : number
The flux or flux density. For X-Spec style models the
flux units will be erg/cm^2/s and the flux density units
will be either erg/cm^2/s/keV or erg/cm^2/s/Angstrom,
depending on the analysis setting.
See Also
--------
calc_data_sum : Sum up the data values over a pass band.
calc_model_sum : Sum up the fitted model over a pass band.
calc_source_sum: Sum up the source model over a pass band.
calc_photon_flux : Integrate the unconvolved source model over a pass band.
set_analysis : Set the units used when fitting and displaying spectral data
set_model : Set the source model expression for a data set.
Notes
-----
The units of ``lo`` and ``hi`` are determined by the analysis
setting for the data set (e.g. ``get_analysis``).
Any existing filter on the data set - e.g. as created by
``ignore`` or ``notice`` - is ignored by this function.
The units of the answer depend on the model components used in
the source expression and the axis or axes of the data set.
It is unlikely to give sensible results for 2D data sets.
Examples
--------
Calculate the integral of the unconvolved model over the
full range of the default data set:
>>> calc_energy_flux()
Return the flux for the data set labelled "core":
>>> calc_energy_flux(id='core')
Calculate the energy flux over the ranges 0.5 to 2 and 0.5 to
7 keV:
>>> set_analysis('energy')
>>> calc_energy_flux(0.5, 2)
5.7224906878061796e-10
>>> calc_energy_flux(0.5, 7)
1.3758131915063825e-09
Calculate the energy flux density at 0.5 keV for the source
"core":
>>> calc_energy_flux(0.5, id="core")
5.2573786652855304e-10
Calculate the flux for the model applied to the second background
component of the 'jet' data set, for the wavelength range 20 to 22
Angstroms:
>>> set_analysis('jet', 'wave')
>>> calc_energy_flux(20, 22, id='jet', bkg_id=2)
For the following example, the source model is an absorbed
powerlaw - `xsphabs.gal * powerlaw.pl` - so that the `fabs`
value represents the absorbed flux, and `funabs` the unabsorbed
flux (i.e. just the power-law component):
>>> fabs = calc_energy_flux(0.5, 7)
>>> funabs = calc_energy_flux(0.5, 7, model=pl)
"""
if bkg_id is None:
data = self.get_data(id)
else:
data = self.get_bkg(id, bkg_id)
if model is None:
if bkg_id is None:
model = self.get_source(id)
else:
model = self.get_bkg_source(id, bkg_id)
else:
_check_type(model, sherpa.models.Model, 'model',
'a model object')
return sherpa.astro.utils.calc_energy_flux(data, model, lo, hi)
# DOC-TODO: how do lo/hi limits interact with bin edges;
# is it all in or partially in or ...
def calc_data_sum(self, lo=None, hi=None, id=None, bkg_id=None):
"""Sum up the data values over a pass band.
This function is for one-dimensional data sets: use
`calc_data_sum2d` for two-dimensional data sets.
Parameters
----------
lo, hi : number, optional
If both are None or both are set then sum up the data
over the given band. If only one is set then return
the data count in the given bin.
id : int or str, optional
Use the source expression associated with this data set. If
not given then the default identifier is used, as returned
by `get_default_id`.
bkg_id : int or str, optional
If set, use the model associated with the given background
component rather than the source model.
Returns
-------
dsum : number
If a background estimate has been subtracted from the data
set then the calculation will use the background-subtracted
values.
See Also
--------
calc_data_sum2d : Sum up the data values of a 2D data set.
calc_model_sum : Sum up the fitted model over a pass band.
calc_energy_flux : Integrate the unconvolved source model over a pass band.
calc_photon_flux : Integrate the unconcolved source model over a pass band.
calc_source_sum: Sum up the source model over a pass band.
set_model : Set the source model expression for a data set.
Notes
-----
The units of ``lo`` and ``hi`` are determined by the analysis
setting for the data set (e.g. `get_analysis`). The summation
occurs over those points in the data set that lie within this
range, not the range itself.
Any existing filter on the data set - e.g. as created by
`ignore` or `notice` - is ignored by this function.
If a grouping scheme has been applied to the data set that it
will be used. This can change the results, since the first and
last bins of the selected range may extend outside the
requested range.
Examples
--------
Sum up the data values (the dependent axis) for all points or
bins in the default data set:
>>> dsum = calc_data_sum()
Calculate the number of counts over the ranges 0.5 to 2 and 0.5 to
7 keV for the default data set, first using the observed signal
and then, for the 0.5 to 2 keV band - the background-subtraced
estimate:
>>> set_analysis('energy')
>>> calc_data_sum(0.5, 2)
745.0
>>> calc_data_sum(0.5, 7)
60.0
>>> subtract()
>>> calc_data_sum(0.5, 2)
730.9179738207356
Calculate the data value in the bin containing 0.5 keV for the
source "core":
>>> calc_data_sum(0.5, id="core")
0.0
Calculate the sum of the second background component for data
set 3 over the independent axis range 12 to 45:
>>> calc_data_sum(12, 45, id=3, bkg_id=2)
"""
data = self.get_data(id)
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
return sherpa.astro.utils.calc_data_sum(data, lo, hi)
# DOC-TODO: better comparison of calc_source_sum and calc_model_sum
# needed (e.g. integration or results in PHA case?)
#
# DOC-TODO: add some form of convolution to the last example
# to show the difference between calc_model_sum and
# calc_source_sum
#
def calc_model_sum(self, lo=None, hi=None, id=None, bkg_id=None):
"""Sum up the fitted model over a pass band.
Sum up M(E) over a range of bins, where M(E) is the per-bin model
value after it has been convolved with any instrumental response
(e.g. RMF and ARF or PSF). This is intended for one-dimensional
data sets: use `calc_model_sum2d` for two-dimensional data sets.
The `calc_source_sum` function is used to calculate the sum of the
model before any instrumental response is applied.
Parameters
----------
lo, hi : number, optional
If both are None or both are set then sum up over the given
band. If only one is set then use the model value in the
selected bin. The units for `lo` and `hi` are given by the
current analysis setting.
id : int or str, optional
Use the source expression associated with this data set. If
not given then the default identifier is used, as returned
by `get_default_id`.
bkg_id : int or str, optional
If set, use the model associated with the given background
component rather than the source model.
Returns
-------
signal : number
The model value (sum or individual bin).
See Also
--------
calc_data_sum : Sum up the observed counts over a pass band.
calc_energy_flux : Integrate the unconvolved source model over a pass band.
calc_photon_flux : Integrate the unconvolved source model over a pass band.
calc_source_sum: Sum up the source model over a pass band.
set_model : Set the source model expression for a data set.
Notes
-----
The units of ``lo`` and ``hi`` are determined by the analysis
setting for the data set (e.g. `get_analysis`). The summation
occurs over those points in the data set that lie within this
range, not the range itself.
Any existing filter on the data set - e.g. as created by
`ignore` or `notice` - is ignored by this function.
The units of the answer depend on the model components used in
the source expression and the axis or axes of the data set.
Examples
--------
Calculate the model evaluated over the full data set (all points
or pixels of the independent axis) for the default data set,
and compare it to the sum for th first background component:
>>> tsrc = calc_model_sum()
>>> tbkg = calc_model_sum(bkg_id=1)
Sum up the model over the data range 0.5 to 2 for the default
data set, and compared to the data over the same range:
>>> calc_model_sum(0.5, 2)
404.97796489631639
>>> calc_data_sum(0.5, 2)
745.0
Calculate the model sum, evaluated over the range 20 to 22
Angstroms, for the first background component of the "histate"
data set:
>>> set_analysis("histate", "wavelength")
>>> calc_model_sum(20, 22, "histate", bkg_id=1)
In the following example, a small data set is created, covering
the axis range of -5 to 5, and an off-center gaussian model
created (centered at 1). The model is evaluated over the full
data grid and then a subset of pixels. As the summation is done
over those points in the data set that lie within the requested
range, the sum for lo=-2 to hi=1 is the same as that for
lo=-1.5 to hi=1.5:
>>> load_arrays('test', [-5, -2.5, 0, 2.5, 5], [2, 5, 12, 7, 3])
>>> set_source('test', gauss1d.gmdl)
>>> gmdl.pos = 1
>>> gmdl.fwhm = 2.4
>>> gmdl.ampl = 10
>>> calc_model_sum(id='test')
9.597121089731253
>>> calc_model_sum(-2, 1, id='test')
6.179472329646446
>>> calc_model_sum(-1.5, 1.5, id='test')
6.179472329646446
"""
data = self.get_data(id)
model = None
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
model = self.get_bkg_model(id, bkg_id)
else:
model = self.get_model(id)
return sherpa.astro.utils.calc_model_sum(data, model, lo, hi)
def calc_data_sum2d(self, reg=None, id=None):
"""Sum up the data values of a 2D data set.
This function is for two-dimensional data sets: use
`calc_model_sum` for one-dimensional data sets.
Parameters
----------
reg : str, optional
The spatial filter to use. The default, ``None``, is to
use the whole data set.
id : int or str, optional
Use the source expression associated with this data set. If
not given then the default identifier is used, as returned
by `get_default_id`.
Returns
-------
dsum : number
The sum of the data values that lie within the given
region.
See Also
--------
calc_data_sum : Sum up the data values of a data set.
calc_model_sum2d : Sum up the convolved model for a 2D data set.
calc_source_sum2d: Sum up the unconvolved model for a 2D data set.
set_model : Set the source model expression for a data set.
Notes
-----
The coordinate system of the region filter is determined by
the coordinate setting for the data set (e.g. `get_coord`).
Any existing filter on the data set - e.g. as created by
`ignore2d` or `notice2d` - is ignored by this function.
Examples
--------
The following examples use the data in the default data set
created with the following calls, which sets the y (data)
values to be 0 to 11 in a 3 row by 4 column image:
>>> ivals = np.arange(12)
>>> y, x = np.mgrid[10:13, 20:24]
>>> y = y.flatten()
>>> x = x.flatten()
>>> load_arrays(1, x, y, ivals, (3, 4), DataIMG)
with no argument, the full data set is used:
>>> calc_data_sum2d()
66
>>> ivals.sum()
66
and a spatial filter can be used to restrict the region
used for the summation:
>>> calc_data_sum2d('circle(22,12,1)')
36
>>> calc_data_sum2d('field()-circle(2,2,1)')
30
Apply the spatial filter to the data set labelled "a2142":
>>> calc_data_sum2d('rotbox(4232.3,3876,300,200,43)', 'a2142')
"""
data = self.get_data(id)
return sherpa.astro.utils.calc_data_sum2d(data, reg)
# DOC-TODO: show an example with psf
# and change the model (to a non-flat distribution, otherwise
# the PSF doesn't really help)
# DOC-TODO: this needs testing as doesn't seem to be working for me
def calc_model_sum2d(self, reg=None, id=None):
"""Sum up the convolved model for a 2D data set.
This function is for two-dimensional data sets: use
`calc_model_sum` for one-dimensional data sets.
Parameters
----------
reg : str, optional
The spatial filter to use. The default, ``None``, is to
use the whole data set.
id : int or str, optional
Use the source expression associated with this data set. If
not given then the default identifier is used, as returned
by `get_default_id`.
Returns
-------
msum : number
The sum of the model values, as fitted to the data, that
lie within the given region. This includes any PSF
included by `set_psf`.
See Also
--------
calc_model_sum : Sum up the fitted model over a pass band.
calc_source_sum2d: Sum up the unconvolved model for a 2D data set.
set_psf : Add a PSF model to a data set.
set_model : Set the source model expression for a data set.
Notes
-----
The coordinate system of the region filter is determined by
the coordinate setting for the data set (e.g. `get_coord`).
Any existing filter on the data set - e.g. as created by
`ignore2d` or `notice2d` - is ignored by this function.
Examples
--------
The following examples use the data in the default data set
created with the following calls, which sets the y (data)
values to be 0 to 11 in a 3 row by 4 column image:
>>> ivals = np.arange(12)
>>> y, x = np.mgrid[10:13, 20:24]
>>> y = y.flatten()
>>> x = x.flatten()
>>> load_arrays(1, x, y, ivals, (3, 4), DataIMG)
>>> set_source(const2d.bgnd)
>>> bgnd.c0 = 2
with no argument, the full data set is used. Since the model
evaluates to 2 per pixel, and there are 12 pixels in the
data set, the result is 24:
>>> calc_model_sum2d()
24.0
and a spatial filter can be used to restrict the region
used for the summation:
>>> calc_model_sum2d('circle(22,12,1)')
8.0
>>> calc_model_sum2d('field()-circle(22,12,1)')
16.0
Apply the spatial filter to the model for the data set
labelled "a2142":
>>> calc_model_sum2d('rotbox(4232.3,3876,300,200,43)', 'a2142')
"""
data = self.get_data(id)
model = self.get_model(id)
return sherpa.astro.utils.calc_model_sum2d(data, model, reg)
def calc_source_sum2d(self, reg=None, id=None):
"""Sum up the unconvolved model for a 2D data set.
This function is for two-dimensional data sets: use
`calc_source_sum` for one-dimensional data sets.
Parameters
----------
reg : str, optional
The spatial filter to use. The default, ``None``, is to
use the whole data set.
id : int or str, optional
Use the source expression associated with this data set. If
not given then the default identifier is used, as returned
by `get_default_id`.
Returns
-------
msum : number
The sum of the model values that lie within the given
region. This does not include any PSF included by
`set_psf`.
See Also
--------
calc_model_sum2d : Sum up the convolved model for a 2D data set.
calc_source_sum : Sum up the model over a pass band.
set_psf : Add a PSF model to a data set.
set_model : Set the source model expression for a data set.
Notes
-----
The coordinate system of the region filter is determined by
the coordinate setting for the data set (e.g. `get_coord`).
Any existing filter on the data set - e.g. as created by
`ignore2d` or `notice2d` - is ignored by this function.
Examples
--------
The following examples use the data in the default data set
created with the following calls, which sets the y (data)
values to be 0 to 11 in a 3 row by 4 column image:
>>> ivals = np.arange(12)
>>> y, x = np.mgrid[10:13, 20:24]
>>> y = y.flatten()
>>> x = x.flatten()
>>> load_arrays(1, x, y, ivals, (3, 4), DataIMG)
>>> set_source(const2d.bgnd)
>>> bgnd.c0 = 2
with no argument, the full data set is used. Since the model
evaluates to 2 per pixel, and there are 12 pixels in the
data set, the result is 24:
>>> calc_source_sum2d()
24.0
and a spatial filter can be used to restrict the region
used for the summation:
>>> calc_source_sum2d('circle(22,12,1)')
8.0
>>> calc_source_sum2d('field()-circle(22,12,1)')
16.0
Apply the spatial filter to the model for the data set
labelled "a2142":
>>> calc_source_sum2d('rotbox(4232.3,3876,300,200,43)', 'a2142')
"""
data = self.get_data(id)
src = self.get_source(id)
return sherpa.astro.utils.calc_model_sum2d(data, src, reg)
def calc_source_sum(self, lo=None, hi=None, id=None, bkg_id=None):
"""Sum up the source model over a pass band.
Sum up S(E) over a range of bins, where S(E) is the per-bin model
value before it has been convolved with any instrumental response
(e.g. RMF and ARF or PSF). This is intended for one-dimensional
data sets: use `calc_source_sum2d` for two-dimensional data sets.
The `calc_model_sum` function is used to calculate the sum of the
model after any instrumental response is applied.
Parameters
----------
lo, hi : number, optional
If both are None or both are set then sum up over the given
band. If only one is set then use the model value in the
selected bin. The units for `lo` and `hi` are given by the
current analysis setting.
id : int or str, optional
Use the source expression associated with this data set. If
not given then the default identifier is used, as returned
by `get_default_id`.
bkg_id : int or str, optional
If set, use the model associated with the given background
component rather than the source model.
Returns
-------
signal : number
The model value (sum or individual bin).
See Also
--------
calc_data_sum : Sum up the observed counts over a pass band.
calc_model_sum : Sum up the fitted model over a pass band.
calc_energy_flux : Integrate the unconvolved source model over a pass band.
calc_photon_flux : Integrate the unconvolved source model over a pass band.
set_model : Set the source model expression for a data set.
Notes
-----
The units of ``lo`` and ``hi`` are determined by the analysis
setting for the data set (e.g. `get_analysis`). The summation
occurs over those points in the data set that lie within this
range, not the range itself.
Any existing filter on the data set - e.g. as created by
`ignore` or `notice` - is ignored by this function.
The units of the answer depend on the model components used in
the source expression and the axis or axes of the data set.
Examples
--------
Calculate the model evaluated over the full data set (all points
or pixels of the independent axis) for the default data set,
and compare it to the sum for th first background component:
>>> tsrc = calc_source_sum()
>>> tbkg = calc_source_sum(bkg_id=1)
Sum up the model over the data range 0.5 to 2 for the default
data set:
>>> calc_source_sum(0.5, 2)
139.12819041922018
Compare the output of the `calc_source_sum` and `calc_photon_flux`
routines. A 1099-bin data space is created, with a model which has
a value of 1 for each bin. As the bin width is constant, at 0.01,
the integrated value, calculated by `calc_photon_flux`, is one
hundredth the value returned by `calc_data_sum`:
>>> dataspace1d(0.01, 11, 0.01, id="test")
>>> set_source("test", const1d.bflat)
>>> bflat.c0 = 1
>>> calc_source_sum(id="test")
1099.0
>>> calc_photon_flux(id="test")
10.99
In the following example, a small data set is created, covering
the axis range of -5 to 5, and an off-center gaussian model
created (centered at 1). The model is evaluated over the full
data grid and then a subset of pixels. As the summation is done
over those points in the data set that lie within the requested
range, the sum for lo=-2 to hi=1 is the same as that for
lo=-1.5 to hi=1.5:
>>> load_arrays('test', [-5, -2.5, 0, 2.5, 5], [2, 5, 12, 7, 3])
>>> set_source('test', gauss1d.gmdl)
>>> gmdl.pos = 1
>>> gmdl.fwhm = 2.4
>>> gmdl.ampl = 10
>>> calc_source_sum(id='test')
9.597121089731253
>>> calc_source_sum(-2, 1, id='test')
6.179472329646446
>>> calc_source_sum(-1.5, 1.5, id='test')
6.179472329646446
"""
data = self.get_data(id)
model = None
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
model = self.get_bkg_source(id, bkg_id)
else:
model = self.get_source(id)
return sherpa.astro.utils.calc_source_sum(data, model, lo, hi)
# DOC-TODO: no reason can't k-correct wavelength range,
# but need to work out how to identify the units
def calc_kcorr(self, z, obslo, obshi, restlo=None, resthi=None,
id=None, bkg_id=None):
"""Calculate the K correction for a model.
The K correction ([1]_, [2]_, [3]_, [4]_) is the numeric
factor applied to measured energy fluxes in an observed
energy band to estimate the flux in a given rest-frame
energy band. It accounts for the change in spectral energy
distribution between the desired rest-frame band and the
rest-frame band corresponding to the observed band. This is
often used when converting a flux into a luminosity.
Parameters
----------
z : number or array, >= 0
The redshift, or redshifts, of the source.
obslo : number
The minimum energy of the observed band.
obshi : number
The maximum energy of the observed band, which must
be larger than ``obslo``.
restlo : number or ``None``
The minimum energy of the rest-frame band. If ``None`` then
use ``obslo``.
resthi : number or ``None``
The maximum energy of the rest-frame band. It must be
larger than ``restlo``. If ``None`` then use ``obshi``.
id : int or str, optional
Use the source expression associated with this data set. If
not given then the default identifier is used, as returned
by `get_default_id`.
bkg_id : int or str, optional
If set, use the model associated with the given background
component rather than the source model.
Returns
-------
kz : number or array of numbers
See Also
--------
calc_energy_flux : Integrate the unconvolved source model over a pass band.
dataspace1d : Create the independent axis for a 1D data set.
Notes
-----
This is only defined when the analysis is in 'energy' units.
If the model contains a redshift parameter then it should
be set to 0, rather than the source redshift.
If the source model is at zero redshift, the observed energy
band is olo to ohi, and the rest frame band is rlo to rhi
(which need not match the observed band), then the K
correction at a redshift z can be calculated as::
frest = calc_energy_flux(rlo, rhi)
fobs = calc_energy_flux(olo*(1+z), ohi*(1+z))
kz = frest / fobs
The energy ranges used - rlo to rhi and olo*(1+z) to ohi*(1+z)
- should be fully covered by the data grid, otherwise the flux
calculation will be truncated at the grid boundaries, leading
to incorrect results.
References
----------
.. [1] "The K correction", Hogg, D.W., et al.
http://arxiv.org/abs/astro-ph/0210394
.. [2] Appendix B of Jones et al. 1998, ApJ, vol 495,
p. 100-114.
http://adsabs.harvard.edu/abs/1998ApJ...495..100J
.. [3] "K and evolutionary corrections from UV to IR",
Poggianti, B.M., A&AS, 1997, vol 122, p. 399-407.
http://adsabs.harvard.edu/abs/1997A%26AS..122..399P
.. [4] "Galactic evolution and cosmology - Probing the
cosmological deceleration parameter", Yoshii, Y. &
Takahara, F., ApJ, 1988, vol 326, p. 1-18.
http://adsabs.harvard.edu/abs/1988ApJ...326....1Y
Examples
--------
Calculate the K correction for an X-Spec apec model, with a
source temperature of 6 keV and abundance of 0.3 solar, for
the energy band of 0.5 to 2 keV:
>>> dataspace1d(0.01, 10, 0.01)
>>> set_source(xsapec.clus)
>>> clus.kt = 6
>>> clus.abundanc = 0.3
>>> calc_kcorr(0.5, 0.5, 2)
0.82799195070436793
Calculate the K correction for a range of redshifts (0 to 2)
using an observed frame of 0.5 to 2 keV and a rest frame of 0.1
to 10 keV (the energy grid is set to ensure that it covers the
full energy range; that is the rest-frame band and the
observed frame band multiplied by the smallest and largest
(1+z) terms):
>>> dataspace1d(0.01, 11, 0.01)
>>> zs = np.linspace(0, 2, 21)
>>> ks = calc_kcorr(zs, 0.5, 2, restlo=0.1, resthi=10)
Calculate the k correction for the background dataset
bkg_id=2 for a redshift of 0.5 over the energy range
0.5 to 2 keV with rest-frame energy limits of 2 to 10 keV.
>>> calc_kcorr(0.5, 0.5, 2, 2, 10, bkg_id=2)
"""
data = self.get_data(id)
model = None
if bkg_id is not None:
data = self.get_bkg(id, bkg_id)
model = self.get_bkg_source(id, bkg_id)
else:
model = self.get_source(id)
return sherpa.astro.utils.calc_kcorr(data, model, z, obslo, obshi,
restlo, resthi)
###########################################################################
# Session Text Save Function
###########################################################################
def save_all(self, outfile=None, clobber=False):
"""Save the information about the current session to a text file.
This differs to the `save` command in that the output is human
readable. Three consequences are:
1. numeric values may not be recorded to their full precision
2. data sets are not included in the file
3. some settings and values may not be recorded.
Parameters
----------
outfile : str or file-like, optional
If given, the output is written to this file, and the
`clobber` parameter controls what happens if the
file already exists.
`outfile` can be a filename string or a file handle
(or file-like object, such as ``StringIO``) to write
to. If not set then the standard output is used.
clobber : bool, optional
If `outfile` is a filename, then this flag controls
whether an existing file can be overwritten (``True``)
or if it raises an exception (``False``, the default
setting).
Raises
------
sherpa.utils.err.IOErr
If `outfile` already exists and `clobber` is ``False``.
See Also
--------
save : Save the current Sherpa session to a file.
restore : Load in a Sherpa session from a file.
Notes
-----
This command will create a series of commands that restores
the current Sherpa set up. It does not save the set of commands
used. Not all Sherpa settings are saved. Items not fully restored
include:
- data created by calls to `load_arrays`, or changed from the
version on disk - e.g. by calls to `sherpa.astro.ui.set_counts`.
- any optional keywords to comands such as `load_data`
or `load_pha`
- user models may not be restored correctly
- only a subset of Sherpa commands are saved.
Examples
--------
Write the current Sherpa session to the screen:
>>> save_all()
Save the session to the file 'fit.sherpa', overwriting
it if it already exists:
>>> save_all('fit.sherpa', clobber=True)
Write the contents to a StringIO object:
>>> from io import StringIO
>>> store = StringIO()
>>> save_all(store)
"""
if isinstance(outfile, string_types):
if os.path.isfile(outfile):
if sherpa.utils.bool_cast(clobber):
os.remove(outfile)
else:
raise IOErr('filefound', outfile)
with open(outfile, 'w') as fh:
serialize.save_all(self, fh)
else:
if outfile is not None:
fh = outfile
else:
fh = sys.stdout
serialize.save_all(self, fh)
|
anetasie/sherpa
|
sherpa/astro/ui/utils.py
|
Python
|
gpl-3.0
| 574,542
|
[
"Gaussian"
] |
f4d2687b9668c344f9383da939e84b0eb482a7658be13c718dfb5e53c0beadec
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Crawler implementation for gcp resources."""
# pylint: disable=too-many-lines, no-self-use, bad-docstring-quotes
from builtins import str
from builtins import object
import ctypes
from functools import partial
import hashlib
import json
import os
from google.cloud.forseti.common.gcp_api import errors as api_errors
from google.cloud.forseti.common.util import date_time
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.common.util import string_formats
from google.cloud.forseti.services import utils
from google.cloud.forseti.services.inventory.base.gcp import (
ResourceNotSupported)
from google.cloud.forseti.services.inventory.base import iam_helpers
LOGGER = logger.get_logger(__name__)
def size_t_hash(key):
"""Hash the key using size_t.
Args:
key (str): The key to hash.
Returns:
str: The hashed key.
"""
hash_digest = hashlib.blake2b(key.encode()).hexdigest() # pylint: disable=no-member
return '%u' % ctypes.c_size_t(int(hash_digest, 16)).value
def from_root_id(client, root_id, root=True):
"""Start the crawling from root if the root type is supported.
Args:
client (object): GCP API client.
root_id (str): id of the root.
root (bool): Set this as the root resource in the hierarchy.
Returns:
Resource: the root resource instance.
Raises:
Exception: Unsupported root id.
"""
root_map = {
'organizations': ResourceManagerOrganization.fetch,
'projects': ResourceManagerProject.fetch,
'folders': ResourceManagerFolder.fetch,
}
for prefix, func in root_map.items():
if root_id.startswith(prefix):
return func(client, root_id, root=root)
raise Exception(
'Unsupported root id, must be one of {}'.format(
','.join(list(root_map.keys()))))
def cached(field_name):
"""Decorator to perform caching.
Args:
field_name (str): The name of the attribute to cache.
Returns:
wrapper: Function wrapper to perform caching.
"""
field_name = '__cached_{}'.format(field_name)
def _cached(f):
"""Cache wrapper.
Args:
f (func): function to be decorated.
Returns:
wrapper: Function wrapper to perform caching.
"""
def wrapper(*args, **kwargs):
"""Function wrapper to perform caching.
Args:
*args: args to be passed to the function.
**kwargs: kwargs to be passed to the function.
Returns:
object: Results of executing f.
"""
if hasattr(args[0], field_name):
return getattr(args[0], field_name)
result = f(*args, **kwargs)
setattr(args[0], field_name, result)
return result
return wrapper
return _cached
class ResourceFactory(object):
"""ResourceFactory for visitor pattern."""
def __init__(self, attributes):
"""Initialize.
Args:
attributes (dict): attributes for a specific type of resource.
"""
self.attributes = attributes
def create_new(self, data, root=False, metadata=None):
"""Create a new instance of a Resource type.
Args:
data (str): raw data.
root (Resource): root of this resource.
metadata (AssetMetadata): asset metadata.
Returns:
Resource: Resource instance.
"""
attrs = self.attributes
cls = attrs['cls']
return cls(data, root=root, metadata=metadata, **attrs)
# pylint: disable=too-many-instance-attributes, too-many-public-methods
class Resource(object):
"""The base Resource class."""
def __init__(self, data, root=False,
contains=None, metadata=None, **kwargs):
"""Initialize.
Args:
data (dict): raw data.
root (Resource): the root of this crawling.
contains (list): child types to crawl.
metadata (AssetMetadata): Asset metadata.
**kwargs (dict): arguments.
"""
del kwargs # Unused.
self._data = data
self._metadata = metadata
self._root = root
self._stack = None
self._visitor = None
self._contains = [] if contains is None else contains
self._warning = []
self._timestamp = self._utcnow()
self._inventory_key = None
self._full_resource_name = None
@staticmethod
def _utcnow():
"""Wrapper for datetime.datetime.now() injection.
Returns:
datatime: the datetime.
"""
return date_time.get_utc_now_datetime()
def __delitem__(self, key):
"""Delete item.
Args:
key (str): key of this resource.
"""
self._data.pop(key, None)
def __getitem__(self, key):
"""Get Item.
Args:
key (str): key of this resource.
Returns:
str: data of this resource.
Raises:
KeyError: 'key: {}, data: {}'
"""
try:
return self._data[key]
except KeyError:
raise KeyError('key: {}, data: {}'.format(key, self._data))
def __setitem__(self, key, value):
"""Set the value of an item.
Args:
key (str): key of this resource.
value (str): value to set on this resource.
"""
self._data[key] = value
def set_inventory_key(self, key):
"""Set the inventory unique id for the resource.
Args:
key (int): The unique id for the resource from the storage.
"""
self._inventory_key = key
def metadata(self):
"""Gets the asset metadata.
Returns:
AssetMetadata: Asset metadata.
"""
return self._metadata
def inventory_key(self):
"""Gets the inventory key for this resource, if set.
Returns:
int: The unique id for the resource in storage.
"""
return self._inventory_key
def get_full_resource_name(self):
"""Gets the full unique resource name for this resource.
Builds the full name on first call and caches it.
Returns:
str: The full unique name for this resource.
"""
if not self._full_resource_name:
type_name = utils.to_type_name(self.type(), self.key())
if self._root or not self.parent():
parent_full_res_name = ''
else:
parent_full_res_name = self.parent().get_full_resource_name()
self._full_resource_name = utils.to_full_resource_name(
parent_full_res_name, type_name)
return self._full_resource_name
@staticmethod
def type():
"""Get type of this resource.
Raises:
NotImplementedError: method not implemented.
"""
raise NotImplementedError()
def data(self):
"""Get data on this resource.
Returns:
dict: raw data.
"""
return self._data
def parent(self):
"""Get parent of this resource.
Returns:
Resource: parent of this resource.
"""
if self._root:
return self
try:
return self._stack[-1]
except IndexError:
return None
def key(self):
"""Get key of this resource.
Raises:
NotImplementedError: key method not implemented.
"""
raise NotImplementedError('Class: {}'.format(self.__class__.__name__))
def add_warning(self, warning):
"""Add warning on this resource.
Args:
warning (str): warning to be added.
"""
self._warning.append(str(warning))
def get_warning(self):
"""Get warning on this resource.
Returns:
str: warning message.
"""
return '\n'.join(self._warning)
# pylint: disable=broad-except
def try_accept(self, visitor, stack=None):
"""Handle exceptions on the call the accept.
Args:
visitor (object): The class implementing the visitor pattern.
stack (list): The resource stack from the root to immediate parent
of this resource.
"""
try:
self.accept(visitor, stack)
except Exception as e:
err_msg = 'Exception raised processing %s: %s' % (self, e)
LOGGER.exception(err_msg)
visitor.on_child_error(self.get_full_resource_name(), e)
def accept(self, visitor, stack=None):
"""Accept of resource in visitor pattern.
Args:
visitor (Crawler): visitor instance.
stack (list): resource hierarchy stack.
"""
skip_errors = ['Not found',
'Unknown project id',
'scheduled for deletion']
stack = [] if not stack else stack
self._stack = stack
# Skip the current resource if it's in the excluded_resources list.
excluded_resources = visitor.config.variables.get(
'excluded_resources', {})
cur_resource_repr = set()
resource_name = '{}/{}'.format(self.type(), self.key())
cur_resource_repr.add(resource_name)
if self.type() == 'project':
# Supports matching on projectNumber.
project_number = '{}/{}'.format(self.type(), self['projectNumber'])
cur_resource_repr.add(project_number)
if cur_resource_repr.intersection(excluded_resources):
return
self._visitor = visitor
visitor.visit(self)
for yielder_cls in self._contains:
yielder = yielder_cls(self, visitor.get_client())
try:
for resource in yielder.iter():
new_stack = stack + [self]
# Parallelization for resource subtrees.
if resource.should_dispatch():
callback = partial(resource.try_accept,
visitor,
new_stack)
visitor.dispatch(callback)
else:
resource.try_accept(visitor, new_stack)
except Exception as e:
# Use string phrases and not error codes since error codes
# can mean multiple things.
if (isinstance(e, api_errors.ApiExecutionError) and
any(error_str in str(e) for error_str
in skip_errors)):
pass
else:
err_msg = 'Exception raised processing %s: %s' % (self, e)
LOGGER.exception(err_msg)
self.add_warning(err_msg)
if self._warning:
visitor.on_child_error(self.get_full_resource_name(),
self.get_warning())
# pylint: enable=broad-except
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""Get iam policy template.
Args:
client (object): GCP API client.
"""
del client # Unused.
return None
@cached('org_policy')
def get_org_policy(self, client=None):
"""Gets org policy template.
Args:
client (object): GCP API client.
"""
del client # Unused.
return None
@cached('access_policy')
def get_access_policy(self, client=None):
"""Gets access policy template.
Args:
client (object): GCP API client.
"""
del client # Unused.
return None
@cached('gcs_policy')
def get_gcs_policy(self, client=None):
"""Get gcs policy template.
Args:
client (object): GCP API client.
"""
del client # Unused.
return None
@cached('sql_policy')
def get_cloudsql_policy(self, client=None):
"""Get cloudsql policy template.
Args:
client (object): GCP API client.
"""
del client # Unused.
return None
@cached('dataset_policy')
def get_dataset_policy(self, client=None):
"""Get dataset policy template.
Args:
client (object): GCP API client.
"""
del client # Unused.
return None
@cached('group_members')
def get_group_members(self, client=None):
"""Get group member template.
Args:
client (object): GCP API client.
"""
del client # Unused.
return None
@cached('billing_info')
def get_billing_info(self, client=None):
"""Get billing info template.
Args:
client (object): GCP API client.
"""
del client # Unused.
return None
@cached('enabled_apis')
def get_enabled_apis(self, client=None):
"""Get enabled apis template.
Args:
client (object): GCP API client.
"""
del client # Unused.
return None
@cached('service_config')
def get_kubernetes_service_config(self, client=None):
"""Get kubernetes service config method template.
Args:
client (object): GCP API client.
"""
del client # Unused.
return None
def get_timestamp(self):
"""Template for timestamp when the resource object.
Returns:
str: a string timestamp when the resource object was created.
"""
return self._timestamp.strftime(string_formats.TIMESTAMP_UTC_OFFSET)
def stack(self):
"""Get resource hierarchy stack of this resource.
Returns:
list: resource hierarchy stack of this resource.
Raises:
Exception: 'Stack not initialized yet'.
"""
if self._stack is None:
raise Exception('Stack not initialized yet')
return self._stack
def visitor(self):
"""Get visitor on this resource.
Returns:
Crawler: visitor on this resource.
Raises:
Exception: 'Visitor not initialized yet'.
"""
if self._visitor is None:
raise Exception('Visitor not initialized yet')
return self._visitor
def should_dispatch(self):
"""Whether resources should run in parallel threads.
Returns:
bool: whether this resource should run in parallel threads.
"""
return False
def __repr__(self):
"""String Representation.
Returns:
str: Resource representation.
"""
return ('{}<data="{}", parent_resource_type="{}", '
'parent_resource_id="{}">').format(
self.__class__.__name__,
json.dumps(self._data, sort_keys=True),
self.parent().type(),
self.parent().key())
# pylint: enable=too-many-instance-attributes, too-many-public-methods
def resource_class_factory(resource_type, key_field, hash_key=False):
"""Factory function to generate Resource subclasses.
Args:
resource_type (str): The static resource type for this subclass.
key_field (str): The field in the resource data to use as the resource
unique key.
hash_key (bool): If true, use a hash of the key field data instead of
the value of the key field.
Returns:
class: A new class object.
"""
class ResourceSubclass(Resource):
"""Subclass of Resource."""
@staticmethod
def type():
"""Get type of this resource.
Returns:
str: The static resource type for this subclass.
"""
return resource_type
def key(self):
"""Get key of this resource.
Returns:
str: key of this resource.
"""
if hash_key:
# Resource does not have a globally unique ID, use size_t hash
# of key data.
return size_t_hash(self[key_field])
return self[key_field]
return ResourceSubclass
def k8_resource_class_factory(resource_type):
"""Factory function to generate Kubernetes Resource subclasses.
Args:
resource_type (str): The static Kubernetes resource type for this
subclass.
Returns:
class: A new class object.
"""
class ResourceSubclass(Resource):
"""Subclass of Resource."""
@staticmethod
def type():
"""Get type of this resource.
Returns:
str: The static resource type for this subclass.
"""
return resource_type
def key(self):
"""Get key of this resource.
Returns:
str: key of this resource.
"""
# Resource does not have a globally unique ID, use size_t hash
# of uid under metadata key.
return size_t_hash(self['metadata']['uid'])
return ResourceSubclass
# Fake composite resource class
class CompositeRootResource(resource_class_factory('composite_root', None)):
"""The Composite Root fake resource."""
@classmethod
def create(cls, composite_root_resources):
"""Creates a new composite root.
Args:
composite_root_resources (list): The list of resources to crawl
using a composite root.
Returns:
CompositeRootResource: A new instance of the CompositeRootResource
class.
"""
data = {'name': 'Composite Root',
'composite_children': composite_root_resources}
resource = FACTORIES['composite_root'].create_new(data, root=True)
return resource
def key(self):
"""Get key of this resource.
Returns:
str: key of this resource.
"""
return 'root'
# Resource Manager resource classes
class ResourceManagerOrganization(resource_class_factory('organization', None)):
"""The Resource implementation for Organization."""
@classmethod
def fetch(cls, client, resource_key, root=True):
"""Get Organization.
Saves ApiExecutionErrors as warnings.
Args:
client (object): GCP API client.
resource_key (str): resource key to fetch.
root (bool): Set this as the root resource in the hierarchy.
Returns:
Organization: Organization resource.
"""
try:
data, metadata = client.fetch_crm_organization(resource_key)
return FACTORIES['organization'].create_new(
data, metadata=metadata, root=root)
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
err_msg = ('Unable to fetch Organization from API %s: %s, creating '
'fake resource.' % (resource_key, e))
LOGGER.warning(err_msg)
data = {'name': resource_key}
resource = FACTORIES['organization'].create_new(data, root=root)
resource.add_warning(err_msg)
return resource
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""Get iam policy for this organization.
Args:
client (object): GCP API client.
Returns:
dict: organization IAM Policy.
"""
try:
data, _ = client.fetch_crm_organization_iam_policy(self['name'])
return data
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
err_msg = ('Could not get IAM policy for organization %s: %s' %
(self.key(), e))
LOGGER.warning(err_msg)
self.add_warning(err_msg)
return None
@cached('org_policy')
def get_org_policy(self, client=None):
"""Gets Organization policy for this organization.
Args:
client (object): GCP API client.
Returns:
dict: Organization Policy.
"""
try:
org_policies = []
org_policies_iter = (
client.iter_crm_organization_org_policies(self['name']))
for org_policy in org_policies_iter:
org_policies.append(org_policy)
return org_policies
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
LOGGER.warning('Could not get Org policy: %s', e)
self.add_warning(e)
return None
@cached('access_policy')
def get_access_policy(self, client=None):
"""Gets access policy for this organization.
Args:
client (object): GCP API client.
Returns:
dict: Access Policy.
"""
try:
access_policies = []
access_policy_iter = (
client.iter_crm_org_access_policies(self['name']))
for access_policy in access_policy_iter:
access_policies.append(access_policy)
return access_policies
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
LOGGER.warning('Could not get Access Policy: %s', e)
self.add_warning(e)
return None
def has_directory_resource_id(self):
"""Whether this organization has a directoryCustomerId.
Returns:
bool: True if the data exists, else False.
"""
return ('owner' in self._data and
'directoryCustomerId' in self['owner'])
def key(self):
"""Get key of this resource.
Returns:
str: key of this resource.
"""
return self['name'].split('/', 1)[-1]
class ResourceManagerAccessPolicy(resource_class_factory('crm_access_policy',
None)):
"""The Resource implementation for Resource Manager Access Policy."""
def key(self):
"""Gets key of thisf resource.
Returns:
str: key of this resource
"""
return self['name']
class ResourceManagerAccessLevel(resource_class_factory('crm_access_level',
'name')):
"""The Resource implementation for Access Level."""
class ResourceManagerServicePerimeter(resource_class_factory(
'crm_service_perimeter', 'name')):
"""The Resource implementation for Service Perimeter."""
class ResourceManagerOrgPolicy(resource_class_factory('crm_org_policy', None)):
"""The Resource implementation for Resource Manager Organization Policy."""
def key(self):
"""Get key of this resource.
Returns:
str: key of this resource
"""
if 'constraint' not in self._data:
# A row is retrieved for each constraint on a resource.
unique_key = '/'.join([self.parent().type(),
self.parent().key(),
self[0]['constraint']])
else:
unique_key = '/'.join([self.parent().type(),
self.parent().key(),
self['constraint']])
return '%u' % ctypes.c_size_t(hash(unique_key)).value
class ResourceManagerFolder(resource_class_factory('folder', None)):
"""The Resource implementation for Folder."""
@classmethod
def fetch(cls, client, resource_key, root=True):
"""Get Folder.
Args:
client (object): GCP API client.
resource_key (str): resource key to fetch.
root (bool): Set this as the root resource in the hierarchy.
Returns:
Folder: Folder resource.
"""
try:
data, metadata = client.fetch_crm_folder(resource_key)
return FACTORIES['folder'].create_new(
data, metadata=metadata, root=root)
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
err_msg = ('Unable to fetch Folder from API %s: %s, creating '
'fake resource.' % (resource_key, e))
LOGGER.warning(err_msg)
data = {'name': resource_key}
resource = FACTORIES['folder'].create_new(data, root=root)
resource.add_warning(err_msg)
return resource
def key(self):
"""Get key of this resource.
Returns:
str: key of this resource.
"""
return self['name'].split('/', 1)[-1]
def should_dispatch(self):
"""Folder resources should run in parallel threads.
Returns:
bool: whether folder resources should run in parallel threads.
"""
return True
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""Get iam policy for this folder.
Args:
client (object): GCP API client.
Returns:
dict: Folder IAM Policy.
"""
try:
data, _ = client.fetch_crm_folder_iam_policy(self['name'])
return data
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
err_msg = ('Could not get IAM policy for folder %s: %s' %
(self.key(), e))
LOGGER.warning(err_msg)
self.add_warning(err_msg)
return None
@cached('org_policy')
def get_org_policy(self, client=None):
"""Gets Organization policy for this folder.
Args:
client (object): GCP API client.
Returns:
dict: Folder Organization Policy.
"""
try:
org_policies = []
org_policies_iter = (
client.iter_crm_folder_org_policies(self['name']))
for org_policy in org_policies_iter:
org_policies.append(org_policy)
return org_policies
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
LOGGER.warning('Could not get Org policy: %s', e)
self.add_warning(e)
return None
class ResourceManagerProject(resource_class_factory('project', 'projectId')):
"""The Resource implementation for Project."""
def __init__(self, data, root=False, contains=None, **kwargs):
"""Initialize.
Args:
data (str): raw data.
root (Resource): the root of this crawling.
contains (list): child types to crawl.
**kwargs (dict): arguments.
"""
super(ResourceManagerProject, self).__init__(data, root, contains,
**kwargs)
self._enabled_service_names = None
@classmethod
def fetch(cls, client, resource_key, root=True):
"""Get Project.
Args:
client (object): GCP API client.
resource_key (str): resource key to fetch.
root (bool): Set this as the root resource in the hierarchy.
Returns:
Project: created project.
"""
try:
project_number = resource_key.split('/', 1)[-1]
data, metadata = client.fetch_crm_project(project_number)
return FACTORIES['project'].create_new(
data, metadata=metadata, root=root)
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
err_msg = ('Unable to fetch Project from API %s: %s, creating '
'fake resource.' % (resource_key, e))
LOGGER.warning(err_msg)
data = {'name': resource_key}
resource = FACTORIES['project'].create_new(data, root=root)
resource.add_warning(err_msg)
return resource
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""Get iam policy for this project.
Args:
client (object): GCP API client.
Returns:
dict: Project IAM Policy.
"""
if self.enumerable():
try:
data, _ = client.fetch_crm_project_iam_policy(
project_number=self['projectNumber'])
return data
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
err_msg = ('Could not get IAM policy for project %s: %s' %
(self.key(), e))
LOGGER.warning(err_msg)
self.add_warning(err_msg)
return None
return {}
@cached('org_policy')
def get_org_policy(self, client=None):
"""Gets Organization policy for this project.
Args:
client (object): GCP API client.
Returns:
dict: Project Organization Policy.
"""
try:
org_policies = []
org_policies_iter = (
client.iter_crm_project_org_policies(self['name']))
for org_policy in org_policies_iter:
org_policies.append(org_policy)
return org_policies
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
LOGGER.warning('Could not get Org policy: %s', e)
self.add_warning(e)
return None
@cached('billing_info')
def get_billing_info(self, client=None):
"""Get billing info.
Args:
client (object): GCP API client.
Returns:
dict: Project Billing Info resource.
"""
if self.enumerable():
try:
data, _ = client.fetch_billing_project_info(
project_number=self['projectNumber'])
return data
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
err_msg = ('Could not get Billing Info for project %s: %s' %
(self.key(), e))
LOGGER.warning(err_msg)
self.add_warning(err_msg)
return None
return {}
@cached('enabled_apis')
def get_enabled_apis(self, client=None):
"""Get project enabled API services.
Args:
client (object): GCP API client.
Returns:
list: A list of ManagedService resource dicts.
"""
enabled_apis = []
if self.enumerable():
try:
enabled_apis, _ = client.fetch_services_enabled_apis(
project_number=self['projectNumber'])
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
err_msg = ('Could not get Enabled APIs for project %s: %s' %
(self.key(), e))
LOGGER.warning(err_msg)
self.add_warning(err_msg)
self._enabled_service_names = frozenset(
(api.get('config', {}).get('name') for api in enabled_apis))
return enabled_apis
def should_dispatch(self):
"""Project resources should run in parallel threads.
Returns:
bool: whether project resources should run in parallel threads.
"""
return True
def enumerable(self):
"""Check if this project is enumerable.
Returns:
bool: if this project is enumerable.
"""
return self['lifecycleState'] == 'ACTIVE'
def billing_enabled(self):
"""Check if billing is configured.
Returns:
bool: if billing is enabled on the project.
"""
if self.get_billing_info():
return self.get_billing_info().get('billingEnabled', False)
# If status is unknown, always return True so other APIs aren't blocked.
return True
def is_api_enabled(self, service_name):
"""Returns True if the API service is enabled on the project.
Args:
service_name (str): The API service name to check.
Returns:
bool: whether a service api is enabled
"""
if self._enabled_service_names:
return service_name in self._enabled_service_names
# If status is unknown, always return True so other APIs aren't blocked.
return True
def bigquery_api_enabled(self):
"""Check if the bigquery api is enabled.
Returns:
bool: if this API service is enabled on the project.
"""
# Bigquery API depends on billing being enabled
return (self.billing_enabled() and
self.is_api_enabled('bigquery-json.googleapis.com'))
def compute_api_enabled(self):
"""Check if the compute api is enabled.
Returns:
bool: if this API service is enabled on the project.
"""
# Compute API depends on billing being enabled
return (self.billing_enabled() and
self.is_api_enabled('compute.googleapis.com'))
def container_api_enabled(self):
"""Check if the container api is enabled.
Returns:
bool: if this API service is enabled on the project.
"""
# Compute API depends on billing being enabled
return (self.billing_enabled() and
self.is_api_enabled('container.googleapis.com'))
def storage_api_enabled(self):
"""whether storage api is enabled.
Returns:
bool: if this API service is enabled on the project.
"""
return self.is_api_enabled('storage-component.googleapis.com')
class ResourceManagerLien(resource_class_factory('lien', None)):
"""The Resource implementation for Resource Manager Lien."""
def key(self):
"""Get key of this resource.
Returns:
str: key of this resource
"""
return self['name'].split('/')[-1]
# AppEngine resource classes
class AppEngineApp(resource_class_factory('appengine_app', 'name',
hash_key=True)):
"""The Resource implementation for AppEngine App."""
class AppEngineService(resource_class_factory('appengine_service', 'name',
hash_key=True)):
"""The Resource implementation for AppEngine Service."""
class AppEngineVersion(resource_class_factory('appengine_version', 'name',
hash_key=True)):
"""The Resource implementation for AppEngine Version."""
class AppEngineInstance(resource_class_factory('appengine_instance', 'name',
hash_key=True)):
"""The Resource implementation for AppEngine Instance."""
# Bigquery resource classes
class BigqueryDataSet(resource_class_factory('dataset', 'id')):
"""The Resource implementation for Bigquery DataSet."""
def _set_cache(self, field_name, value):
"""Manually set a cache value if it isn't already set.
Args:
field_name (str): The name of the attribute to cache.
value (str): The value to cache.
"""
field_name = '__cached_{}'.format(field_name)
if not hasattr(self, field_name) or getattr(self, field_name) is None:
setattr(self, field_name, value)
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""IAM policy for this Dataset.
Args:
client (object): GCP API client.
Returns:
dict: Dataset Policy.
"""
try:
iam_policy, _ = client.fetch_bigquery_iam_policy(
self.parent()['projectId'],
self.parent()['projectNumber'],
self['datasetReference']['datasetId'])
dataset_policy = iam_helpers.convert_iam_to_bigquery_policy(
iam_policy)
self._set_cache('dataset_policy', dataset_policy)
return iam_policy
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
err_msg = ('Could not get Dataset IAM Policy for %s in project %s: '
'%s' % (self.key(), self.parent().key(), e))
LOGGER.warning(err_msg)
self.add_warning(err_msg)
return None
@cached('dataset_policy')
def get_dataset_policy(self, client=None):
"""Dataset policy for this Dataset.
Args:
client (object): GCP API client.
Returns:
dict: Dataset Policy.
"""
try:
dataset_policy, _ = client.fetch_bigquery_dataset_policy(
self.parent()['projectId'],
self.parent()['projectNumber'],
self['datasetReference']['datasetId'])
iam_policy = iam_helpers.convert_bigquery_policy_to_iam(
dataset_policy, self.parent()['projectId'])
self._set_cache('iam_policy', iam_policy)
return dataset_policy
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
err_msg = ('Could not get Dataset Policy for %s in project %s: '
'%s' % (self.key(), self.parent().key(), e))
LOGGER.warning(err_msg)
self.add_warning(err_msg)
return None
# BigqueryTable resource classes
class BigqueryTable(resource_class_factory('bigquery_table', 'id')):
"""The Resource implementation for bigquery table."""
# Bigtable resource classes
class BigtableCluster(resource_class_factory('bigtable_cluster', 'name',
hash_key=True)):
"""The Resource implementation for Bigtable Cluster."""
class BigtableInstance(resource_class_factory('bigtable_instance', 'name',
hash_key=True)):
"""The Resource implementation for Bigtable Instance."""
@property
def instance_id(self):
"""Get instance id of the Bigtable Instance
Returns:
str: id of this resource.
"""
return self['name'].split('/')[-1]
class BigtableTable(resource_class_factory('bigtable_table', 'name',
hash_key=True)):
"""The Resource implementation for Bigtable Table."""
# Billing resource classes
class BillingAccount(resource_class_factory('billing_account', None)):
"""The Resource implementation for BillingAccount."""
def key(self):
"""Get key of this resource.
Returns:
str: key of this resource.
"""
return self['name'].split('/', 1)[-1]
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""Get iam policy for this folder.
Args:
client (object): GCP API client.
Returns:
dict: Billing Account IAM Policy.
"""
try:
data, _ = client.fetch_billing_account_iam_policy(self['name'])
return data
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
err_msg = ('Could not get Billing Account IAM Policy for %s: '
'%s' % (self.key(), e))
LOGGER.warning(err_msg)
self.add_warning(err_msg)
return None
# CloudSQL resource classes
class CloudSqlInstance(resource_class_factory('cloudsqlinstance', 'selfLink',
hash_key=True)):
"""The Resource implementation for CloudSQL Instance."""
# Compute Engine resource classes
class ComputeAddress(resource_class_factory('compute_address', 'id')):
"""The Resource implementation for Compute Address."""
class ComputeAutoscaler(resource_class_factory('compute_autoscaler', 'id')):
"""The Resource implementation for Compute Autoscaler."""
class ComputeBackendBucket(resource_class_factory('compute_backendbucket',
'id')):
"""The Resource implementation for Compute Backend Bucket."""
class ComputeBackendService(resource_class_factory('backendservice', 'id')):
"""The Resource implementation for Compute Backend Service."""
class ComputeDisk(resource_class_factory('disk', 'id')):
"""The Resource implementation for Compute Disk."""
class ComputeFirewall(resource_class_factory('firewall', 'id')):
"""The Resource implementation for Compute Firewall."""
class ComputeForwardingRule(resource_class_factory('forwardingrule', 'id')):
"""The Resource implementation for Compute Forwarding Rule."""
class ComputeHealthCheck(resource_class_factory('compute_healthcheck', 'id')):
"""The Resource implementation for Compute HealthCheck."""
class ComputeHttpHealthCheck(resource_class_factory('compute_httphealthcheck',
'id')):
"""The Resource implementation for Compute HTTP HealthCheck."""
class ComputeHttpsHealthCheck(resource_class_factory('compute_httpshealthcheck',
'id')):
"""The Resource implementation for Compute HTTPS HealthCheck."""
class ComputeImage(resource_class_factory('image', 'id')):
"""The Resource implementation for Compute Image."""
class ComputeInstance(resource_class_factory('instance', 'id')):
"""The Resource implementation for Compute Instance."""
class ComputeInstanceGroup(resource_class_factory('instancegroup', 'id')):
"""The Resource implementation for Compute InstanceGroup."""
class ComputeInstanceGroupManager(resource_class_factory('instancegroupmanager',
'id')):
"""The Resource implementation for Compute InstanceGroupManager."""
class ComputeInstanceTemplate(resource_class_factory('instancetemplate', 'id')):
"""The Resource implementation for Compute InstanceTemplate."""
class ComputeInterconnect(resource_class_factory('compute_interconnect', 'id')):
"""The Resource implementation for Compute Interconnect."""
class ComputeInterconnectAttachment(resource_class_factory(
'compute_interconnect_attachment', 'id')):
"""The Resource implementation for Compute Interconnect Attachment."""
class ComputeLicense(resource_class_factory('compute_license', 'id')):
"""The Resource implementation for Compute License."""
class ComputeNetwork(resource_class_factory('network', 'id')):
"""The Resource implementation for Compute Network."""
class ComputeProject(resource_class_factory('compute_project', 'id')):
"""The Resource implementation for Compute Project."""
class ComputeRouter(resource_class_factory('compute_router', 'id')):
"""The Resource implementation for Compute Router."""
class ComputeSecurityPolicy(resource_class_factory('compute_securitypolicy',
'id')):
"""The Resource implementation for Compute SecurityPolicy."""
class ComputeSnapshot(resource_class_factory('snapshot', 'id')):
"""The Resource implementation for Compute Snapshot."""
class ComputeSslCertificate(resource_class_factory('compute_sslcertificate',
'id')):
"""The Resource implementation for Compute SSL Certificate."""
class ComputeSubnetwork(resource_class_factory('subnetwork', 'id')):
"""The Resource implementation for Compute Subnetwork."""
class ComputeTargetHttpProxy(resource_class_factory('compute_targethttpproxy',
'id')):
"""The Resource implementation for Compute TargetHttpProxy."""
class ComputeTargetHttpsProxy(resource_class_factory('compute_targethttpsproxy',
'id')):
"""The Resource implementation for Compute TargetHttpsProxy."""
class ComputeTargetInstance(resource_class_factory('compute_targetinstance',
'id')):
"""The Resource implementation for Compute TargetInstance."""
class ComputeTargetPool(resource_class_factory('compute_targetpool', 'id')):
"""The Resource implementation for Compute TargetPool."""
class ComputeTargetSslProxy(resource_class_factory('compute_targetsslproxy',
'id')):
"""The Resource implementation for Compute TargetSslProxy."""
class ComputeTargetTcpProxy(resource_class_factory('compute_targettcpproxy',
'id')):
"""The Resource implementation for Compute TargetTcpProxy."""
class ComputeTargetVpnGateway(resource_class_factory('compute_targetvpngateway',
'id')):
"""The Resource implementation for Compute TargetVpnGateway."""
class ComputeUrlMap(resource_class_factory('compute_urlmap', 'id')):
"""The Resource implementation for Compute UrlMap."""
class ComputeVpnTunnel(resource_class_factory('compute_vpntunnel', 'id')):
"""The Resource implementation for Compute VpnTunnel."""
# Cloud Dataproc resource classes
class DataprocCluster(resource_class_factory('dataproc_cluster',
'clusterUuid')):
"""The Resource implementation for Dataproc Cluster."""
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""Dataproc Cluster IAM policy.
Args:
client (object): GCP API client.
Returns:
dict: Dataproc Cluster IAM policy.
"""
try:
# Dataproc resource does not contain a direct reference to the
# region name except in an embedded label.
region = self['labels']['goog-dataproc-location']
cluster = 'projects/{}/regions/{}/clusters/{}'.format(
self['projectId'], region, self['clusterName'])
data, _ = client.fetch_dataproc_cluster_iam_policy(cluster)
return data
except (api_errors.ApiExecutionError,
ResourceNotSupported,
KeyError,
TypeError) as e:
if isinstance(e, TypeError):
e = 'Cluster has no labels.'
err_msg = ('Could not get Dataproc cluster IAM Policy for %s in '
'project %s: %s' % (self.key(), self.parent().key(), e))
LOGGER.warning(err_msg)
self.add_warning(err_msg)
return None
# Cloud DNS resource classes
class DnsManagedZone(resource_class_factory('dns_managedzone', 'id')):
"""The Resource implementation for Cloud DNS ManagedZone."""
class DnsPolicy(resource_class_factory('dns_policy', 'id')):
"""The Resource implementation for Cloud DNS Policy."""
# IAM resource classes
class IamCuratedRole(resource_class_factory('role', 'name')):
"""The Resource implementation for IAM Curated Roles."""
def parent(self):
"""Curated roles have no parent."""
return None
class IamRole(resource_class_factory('role', 'name')):
"""The Resource implementation for IAM Roles."""
class IamServiceAccount(resource_class_factory('serviceaccount', 'uniqueId')):
"""The Resource implementation for IAM ServiceAccount."""
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""Service Account IAM policy for this service account.
Args:
client (object): GCP API client.
Returns:
dict: Service Account IAM policy.
"""
try:
data, _ = client.fetch_iam_serviceaccount_iam_policy(
self['name'], self['uniqueId'])
return data
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
err_msg = ('Could not get Service Account IAM Policy for %s in '
'project %s: %s' % (self.key(), self.parent().key(), e))
LOGGER.warning(err_msg)
self.add_warning(err_msg)
return None
class IamServiceAccountKey(resource_class_factory('serviceaccount_key', 'name',
hash_key=True)):
"""The Resource implementation for IAM ServiceAccountKey."""
# Key Management Service resource classes
class KmsCryptoKey(resource_class_factory('kms_cryptokey', 'name',
hash_key=True)):
"""The Resource implementation for KMS CryptoKey."""
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""KMS CryptoKey IAM policy.
Args:
client (object): GCP API client.
Returns:
dict: CryptoKey IAM policy.
"""
try:
data, _ = client.fetch_kms_cryptokey_iam_policy(self['name'])
return data
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
err_msg = ('Could not get Crypto Key IAM Policy for %s in project '
'%s: %s' % (self['name'], self.parent().key(), e))
LOGGER.warning(err_msg)
self.add_warning(err_msg)
return None
class KmsCryptoKeyVersion(resource_class_factory('kms_cryptokeyversion', 'name',
hash_key=True)):
"""The Resource implementation for KMS CryptoKeyVersion."""
class KmsKeyRing(resource_class_factory('kms_keyring', 'name',
hash_key=True)):
"""The Resource implementation for KMS KeyRing."""
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""KMS Keyring IAM policy.
Args:
client (object): GCP API client.
Returns:
dict: Keyring IAM policy.
"""
try:
data, _ = client.fetch_kms_keyring_iam_policy(self['name'])
return data
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
err_msg = ('Could not get Key Ring IAM Policy for %s in project '
'%s: %s' % (self['name'], self.parent().key(), e))
LOGGER.warning(err_msg)
self.add_warning(err_msg)
return None
# Kubernetes Engine resource classes
class KubernetesCluster(resource_class_factory('kubernetes_cluster',
'selfLink',
hash_key=True)):
"""The Resource implementation for Kubernetes Cluster."""
@cached('service_config')
def get_kubernetes_service_config(self, client=None):
"""Get service config for KubernetesCluster.
Args:
client (object): GCP API client.
Returns:
dict: Generator of Kubernetes Engine Cluster resources.
"""
try:
data, _ = client.fetch_container_serviceconfig(
self.parent().key(), zone=self.zone(), location=self.location())
return data
except ValueError:
LOGGER.exception('Cluster has no zone or location: %s',
self._data)
return {}
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
err_msg = ('Could not get Cluster service config for %s : %s' %
(self['selfLink'], e))
LOGGER.warning(err_msg)
self.add_warning(err_msg)
return None
def location(self):
"""Get KubernetesCluster location.
Returns:
str: KubernetesCluster location.
"""
try:
self_link_parts = self['selfLink'].split('/')
return self_link_parts[self_link_parts.index('locations') + 1]
except (KeyError, ValueError):
LOGGER.debug('selfLink not found or contains no locations: %s',
self._data)
return None
def zone(self):
"""Get KubernetesCluster zone.
Returns:
str: KubernetesCluster zone.
"""
try:
self_link_parts = self['selfLink'].split('/')
return self_link_parts[self_link_parts.index('zones') + 1]
except (KeyError, ValueError):
LOGGER.debug('selfLink not found or contains no zones: %s',
self._data)
return None
# Kubernetes resource classes
class KubernetesNode(k8_resource_class_factory('kubernetes_node')):
"""The Resource implementation for Kubernetes Node."""
class KubernetesPod(k8_resource_class_factory('kubernetes_pod')):
"""The Resource implementation for Kubernetes Pod."""
class KubernetesService(k8_resource_class_factory('kubernetes_service')):
"""The Resource implementation for Kubernetes Service."""
class KubernetesNamespace(k8_resource_class_factory('kubernetes_namespace')):
"""The Resource implementation for Kubernetes Namespace."""
class KubernetesRole(k8_resource_class_factory('kubernetes_role')):
"""The Resource implementation for Kubernetes Role."""
class KubernetesRoleBinding(k8_resource_class_factory(
'kubernetes_rolebinding')):
"""The Resource implementation for Kubernetes RoleBinding."""
class KubernetesClusterRole(k8_resource_class_factory(
'kubernetes_clusterrole')):
"""The Resource implementation for Kubernetes ClusterRole."""
class KubernetesClusterRoleBinding(k8_resource_class_factory(
'kubernetes_clusterrolebinding')):
"""The Resource implementation for Kubernetes ClusterRoleBinding."""
# Stackdriver Logging resource classes
class LoggingSink(resource_class_factory('sink', None)):
"""The Resource implementation for Stackdriver Logging sink."""
def key(self):
"""Get key of this resource.
Returns:
str: key of this resource
"""
sink_name = '/'.join([self.parent().type(), self.parent().key(),
self.type(), self['name']])
return sink_name
# GSuite resource classes
class GsuiteUser(resource_class_factory('gsuite_user', 'id')):
"""The Resource implementation for GSuite User."""
class GsuiteGroup(resource_class_factory('gsuite_group', 'id')):
"""The Resource implementation for GSuite User."""
def should_dispatch(self):
"""GSuite Groups should always dispatch to another thread.
Returns:
bool: Always returns True.
"""
return True
class GsuiteGroupsSettings(resource_class_factory(
'gsuite_groups_settings', 'email')):
"""The Resource implementation for GSuite Settings."""
class GsuiteUserMember(resource_class_factory('gsuite_user_member', 'id')):
"""The Resource implementation for GSuite User."""
class GsuiteGroupMember(resource_class_factory('gsuite_group_member', 'id')):
"""The Resource implementation for GSuite User."""
# Cloud Pub/Sub resource classes
class PubsubSubscription(resource_class_factory('pubsub_subscription', 'name',
hash_key=True)):
"""The Resource implementation for PubSub Subscription."""
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""Get IAM policy for this Pubsub Subscription.
Args:
client (object): GCP API client.
Returns:
dict: Pubsub Subscription IAM policy.
"""
try:
data, _ = client.fetch_pubsub_subscription_iam_policy(self['name'])
return data
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
err_msg = ('Could not get PubSub Subscription IAM Policy for %s in '
'project %s: %s' %
(self['name'], self.parent().key(), e))
LOGGER.warning(err_msg)
self.add_warning(err_msg)
return None
class PubsubTopic(resource_class_factory('pubsub_topic', 'name',
hash_key=True)):
"""The Resource implementation for PubSub Topic."""
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""Get IAM policy for this Pubsub Topic.
Args:
client (object): GCP API client.
Returns:
dict: Pubsub Topic IAM policy.
"""
try:
data, _ = client.fetch_pubsub_topic_iam_policy(self['name'])
return data
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
err_msg = ('Could not get PubSub Topic IAM Policy for %s in '
'project %s: %s' %
(self['name'], self.parent().key(), e))
LOGGER.warning(err_msg)
self.add_warning(err_msg)
return None
# Service Usage resource classes
class ServiceUsageService(resource_class_factory('service', 'name',
hash_key=True)):
"""The Resource implementation for Service Usage Service."""
# Cloud Spanner resource classes
class SpannerDatabase(resource_class_factory('spanner_database', 'name',
hash_key=True)):
"""The Resource implementation for Spanner Database."""
class SpannerInstance(resource_class_factory('spanner_instance', 'name',
hash_key=True)):
"""The Resource implementation for Spanner Instance."""
# Cloud storage resource classes
class StorageBucket(resource_class_factory('bucket', 'id')):
"""The Resource implementation for Storage Bucket."""
@cached('iam_policy')
def get_iam_policy(self, client=None):
"""Get IAM policy for this Storage bucket.
Args:
client (object): GCP API client.
Returns:
dict: bucket IAM policy.
"""
try:
data, _ = client.fetch_storage_bucket_iam_policy(self.key())
return data
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
err_msg = ('Could not get Bucket IAM Policy for %s in project %s: '
'%s' % (self.key(), self.parent().key(), e))
LOGGER.warning(err_msg)
self.add_warning(err_msg)
return None
@cached('gcs_policy')
def get_gcs_policy(self, client=None):
"""Get Bucket Access Control policy for this storage bucket.
Args:
client (object): GCP API client.
Returns:
list: bucket access controls.
"""
try:
# Full projection returns GCS policy with the resource.
if self['acl']:
return self['acl']
except KeyError:
pass
try:
data, _ = client.fetch_storage_bucket_acls(
self.key(),
self.parent()['projectId'],
self.parent()['projectNumber'])
return data
except (api_errors.ApiExecutionError, ResourceNotSupported) as e:
err_msg = ('Could not get Bucket ACL Policy for %s in project %s: '
'%s' % (self.key(), self.parent().key(), e))
LOGGER.warning(err_msg)
self.add_warning(err_msg)
return None
class StorageObject(resource_class_factory('storage_object', 'id')):
"""The Resource implementation for Storage Object."""
def get_gcs_policy(self, client=None):
"""Full projection returns GCS policy with the resource.
Args:
client (object): GCP API client.
Returns:
dict: Object acl.
"""
try:
return self['acl']
except KeyError:
return []
class ResourceIterator(object):
"""The Resource iterator template."""
def __init__(self, resource, client):
"""Initialize.
Args:
resource (Resource): The parent resource.
client (object): GCP API Client.
"""
self.resource = resource
self.client = client
def iter(self):
"""Resource iterator.
Raises:
NotImplementedError: Abstract class method not implemented.
"""
raise NotImplementedError()
class CompositeRootIterator(ResourceIterator):
"""The resource iterator for the fake composite root resource."""
def iter(self):
"""Creates a new resource child resource for each configured resource.
Yields:
Resource: resource returned from client.
"""
gcp = self.client
for composite_child in self.resource['composite_children']:
resource = from_root_id(gcp, composite_child, root=False)
yield resource
def resource_iter_class_factory(api_method_name,
resource_name,
api_method_arg_key=None,
additional_arg_keys=None,
resource_validation_method_name=None,
**kwargs):
"""Factory function to generate ResourceIterator subclasses.
Args:
api_method_name (str): The method to call on the API client class to
iterate resources.
resource_name (str): The name of the resource to create from the
resource factory.
api_method_arg_key (str): An optional key from the resource dict to
lookup for the value to send to the api method.
additional_arg_keys (list): An optional list of additional keys from the
resource dict to lookup for the values to send to the api method.
resource_validation_method_name (str): An optional method name to call
to validate that the resource supports iterating resources of this
type.
**kwargs (dict): Additional keyword args to send to the api method.
Returns:
class: A new class object.
"""
def always_true():
"""Helper function that always returns True.
Returns:
bool: True
"""
return True
class ResourceIteratorSubclass(ResourceIterator):
"""Subclass of ResourceIterator."""
def iter(self):
"""Resource iterator.
Yields:
Resource: resource returned from client.
"""
gcp = self.client
if resource_validation_method_name:
resource_validation_check = getattr(
self.resource, resource_validation_method_name)
else:
# Always return True if no check is configured.
resource_validation_check = always_true
if resource_validation_check():
try:
iter_method = getattr(gcp, api_method_name)
args = []
if api_method_arg_key:
args.append(self.resource[api_method_arg_key])
if additional_arg_keys:
args.extend(
self.resource[key] for key in additional_arg_keys)
for data, metadata in iter_method(*args, **kwargs):
yield FACTORIES[resource_name].create_new(
data, metadata=metadata)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
return ResourceIteratorSubclass
class AccessLevelIterator(resource_iter_class_factory(
api_method_name='iter_crm_organization_access_levels',
resource_name='crm_access_level',
api_method_arg_key='name')):
""" The Resource iterator implementation for Access Level."""
class ServicePerimeterIterator(resource_iter_class_factory(
api_method_name='fetch_crm_organization_service_perimeter',
resource_name='crm_service_perimeter',
api_method_arg_key='name')):
""" The Resource iterator implementation for Service Perimeter."""
class ResourceManagerFolderIterator(resource_iter_class_factory(
api_method_name='iter_crm_folders',
resource_name='folder',
api_method_arg_key='name')):
"""The Resource iterator implementation for Resource Manager Folder."""
class ResourceManagerFolderOrgPolicyIterator(resource_iter_class_factory(
api_method_name='iter_crm_folder_org_policies',
resource_name='crm_org_policy',
api_method_arg_key='name')):
"""The Resource iterator implementation for CRM Folder Org Policies."""
class ResourceManagerOrganizationOrgPolicyIterator(resource_iter_class_factory(
api_method_name='iter_crm_organization_org_policies',
resource_name='crm_org_policy',
api_method_arg_key='name')):
"""The Resource iterator for CRM Organization Org Policies."""
# Project iterator requires looking up parent type, so cannot use class factory.
class ResourceManagerProjectIterator(ResourceIterator):
"""The Resource iterator implementation for Resource Manager Project."""
def iter(self):
"""Resource iterator.
Yields:
Resource: Project created
"""
gcp = self.client
parent_type = self.resource.type()
parent_id = self.resource.key()
try:
for data, metadata in gcp.iter_crm_projects(
parent_type=parent_type, parent_id=parent_id):
yield FACTORIES['project'].create_new(data, metadata=metadata)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class ResourceManagerProjectOrgPolicyIterator(resource_iter_class_factory(
api_method_name='iter_crm_project_org_policies',
resource_name='crm_org_policy',
api_method_arg_key='projectNumber')):
"""The Resource iterator implementation for CRM Project Org Policies."""
# AppEngine iterators do not support using the class factory.
class AppEngineAppIterator(ResourceIterator):
"""The Resource iterator implementation for AppEngineApp"""
def iter(self):
"""Resource iterator.
Yields:
Resource: AppEngineApp created
"""
gcp = self.client
if self.resource.enumerable():
try:
data, metadata = gcp.fetch_gae_app(
project_id=self.resource['projectId'])
if data:
yield FACTORIES['appengine_app'].create_new(
data, metadata=metadata)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class AppEngineServiceIterator(ResourceIterator):
"""The Resource iterator implementation for AppEngineService"""
def iter(self):
"""Resource iterator.
Yields:
Resource: AppEngineService created
"""
gcp = self.client
try:
for data, metadata in gcp.iter_gae_services(
project_id=self.resource['id']):
yield FACTORIES['appengine_service'].create_new(
data, metadata=metadata)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class AppEngineVersionIterator(ResourceIterator):
"""The Resource iterator implementation for AppEngineVersion"""
def iter(self):
"""Resource iterator.
Yields:
Resource: AppEngineVersion created
"""
gcp = self.client
try:
for data, metadata in gcp.iter_gae_versions(
project_id=self.resource.parent()['id'],
service_id=self.resource['id']):
yield FACTORIES['appengine_version'].create_new(
data, metadata=metadata)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class AppEngineInstanceIterator(ResourceIterator):
"""The Resource iterator implementation for AppEngineInstance"""
def iter(self):
"""Resource iterator.
Yields:
Resource: AppEngineInstance created
"""
gcp = self.client
try:
for data, metadata in gcp.iter_gae_instances(
project_id=self.resource.parent().parent()['id'],
service_id=self.resource.parent()['id'],
version_id=self.resource['id']):
yield FACTORIES['appengine_instance'].create_new(
data, metadata=metadata)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class BigqueryDataSetIterator(resource_iter_class_factory(
api_method_name='iter_bigquery_datasets',
resource_name='bigquery_dataset',
api_method_arg_key='projectNumber')):
"""The Resource iterator implementation for Bigquery Dataset."""
class BigqueryTableIterator(resource_iter_class_factory(
api_method_name='iter_bigquery_tables',
resource_name='bigquery_table',
api_method_arg_key='datasetReference')):
"""The Resource iterator implementation for Bigquery Table."""
class BigtableClusterIterator(ResourceIterator):
"""The Resource iterator implementation for Bigtable Cluster"""
def iter(self):
"""Resource iterator.
Yields:
Resource: BigtableCluster created
"""
gcp = self.client
if not getattr(self.resource, 'instance_id', ''):
return
try:
for data, metadata in gcp.iter_bigtable_clusters(
project_id=self.resource.parent()['projectId'],
instance_id=self.resource.instance_id):
yield FACTORIES['bigtable_cluster'].create_new(
data, metadata=metadata)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class BigtableInstanceIterator(resource_iter_class_factory(
api_method_name='iter_bigtable_instances',
resource_name='bigtable_instance',
api_method_arg_key='projectNumber')):
"""The Resource iterator implementation for Bigtable Instance."""
class BigtableTableIterator(ResourceIterator):
"""The Resource iterator implementation for Bigtable Table"""
def iter(self):
"""Resource iterator.
Yields:
Resource: BigtableTable created
"""
gcp = self.client
if not getattr(self.resource, 'instance_id', ''):
return
try:
for data, metadata in gcp.iter_bigtable_tables(
project_id=self.resource.parent()['projectId'],
instance_id=self.resource.instance_id):
yield FACTORIES['bigtable_table'].create_new(
data, metadata=metadata)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class BillingAccountIterator(resource_iter_class_factory(
api_method_name='iter_billing_accounts',
resource_name='billing_account')):
"""The Resource iterator implementation for Billing Account."""
class ResourceManagerOrganizationAccessPolicyIterator(
resource_iter_class_factory(
api_method_name='iter_crm_org_access_policies',
resource_name='crm_access_policy',
api_method_arg_key='name')):
"""The Resource iterator implementation for Access Policy."""
class CloudSqlInstanceIterator(resource_iter_class_factory(
api_method_name='iter_cloudsql_instances',
resource_name='cloudsql_instance',
api_method_arg_key='projectId',
additional_arg_keys=['projectNumber'],
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for CloudSQL Instance."""
def compute_iter_class_factory(api_method_name, resource_name):
"""Factory function to generate ResourceIterator subclasses for Compute.
Args:
api_method_name (str): The method to call on the API client class to
iterate resources.
resource_name (str): The name of the resource to create from the
resource factory.
Returns:
class: A new class object.
"""
return resource_iter_class_factory(
api_method_name, resource_name, api_method_arg_key='projectNumber',
resource_validation_method_name='compute_api_enabled')
class ComputeAddressIterator(compute_iter_class_factory(
api_method_name='iter_compute_address',
resource_name='compute_address')):
"""The Resource iterator implementation for Compute Address."""
class ComputeAutoscalerIterator(compute_iter_class_factory(
api_method_name='iter_compute_autoscalers',
resource_name='compute_autoscaler')):
"""The Resource iterator implementation for Compute Autoscaler."""
class ComputeBackendBucketIterator(compute_iter_class_factory(
api_method_name='iter_compute_backendbuckets',
resource_name='compute_backendbucket')):
"""The Resource iterator implementation for Compute BackendBucket."""
class ComputeBackendServiceIterator(compute_iter_class_factory(
api_method_name='iter_compute_backendservices',
resource_name='compute_backendservice')):
"""The Resource iterator implementation for Compute BackendService."""
class ComputeDiskIterator(compute_iter_class_factory(
api_method_name='iter_compute_disks',
resource_name='compute_disk')):
"""The Resource iterator implementation for Compute Disk."""
class ComputeFirewallIterator(compute_iter_class_factory(
api_method_name='iter_compute_firewalls',
resource_name='compute_firewall')):
"""The Resource iterator implementation for Compute Firewall."""
class ComputeForwardingRuleIterator(compute_iter_class_factory(
api_method_name='iter_compute_forwardingrules',
resource_name='compute_forwardingrule')):
"""The Resource iterator implementation for Compute ForwardingRule."""
class ComputeHealthCheckIterator(compute_iter_class_factory(
api_method_name='iter_compute_healthchecks',
resource_name='compute_healthcheck')):
"""The Resource iterator implementation for Compute HealthCheck."""
class ComputeHttpHealthCheckIterator(compute_iter_class_factory(
api_method_name='iter_compute_httphealthchecks',
resource_name='compute_httphealthcheck')):
"""The Resource iterator implementation for Compute HttpHealthCheck."""
class ComputeHttpsHealthCheckIterator(compute_iter_class_factory(
api_method_name='iter_compute_httpshealthchecks',
resource_name='compute_httpshealthcheck')):
"""The Resource iterator implementation for Compute HttpsHealthCheck."""
class ComputeImageIterator(compute_iter_class_factory(
api_method_name='iter_compute_images',
resource_name='compute_image')):
"""The Resource iterator implementation for Compute Image."""
# TODO: Refactor IAP scanner to not expect additional data to be included
# with the instancegroup resource.
class ComputeInstanceGroupIterator(ResourceIterator):
"""The Resource iterator implementation for Compute InstanceGroup."""
def iter(self):
"""Compute InstanceGroup iterator.
Yields:
Resource: Compute InstanceGroup resource.
"""
gcp = self.client
if self.resource.compute_api_enabled():
try:
for data, metadata in gcp.iter_compute_instancegroups(
self.resource['projectNumber']):
# IAP Scanner expects instance URLs to be included with the
# instance groups.
try:
instance_urls, _ = gcp.fetch_compute_ig_instances(
self.resource['projectNumber'],
data['name'],
zone=os.path.basename(data.get('zone', '')),
region=os.path.basename(data.get('region', ''))
)
data['instance_urls'] = instance_urls
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
yield FACTORIES['compute_instancegroup'].create_new(
data, metadata=metadata)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class ComputeInstanceGroupManagerIterator(compute_iter_class_factory(
api_method_name='iter_compute_ig_managers',
resource_name='compute_instancegroupmanager')):
"""The Resource iterator implementation for Compute InstanceGroupManager."""
class ComputeInstanceIterator(compute_iter_class_factory(
api_method_name='iter_compute_instances',
resource_name='compute_instance')):
"""The Resource iterator implementation for Compute Instance."""
class ComputeInstanceTemplateIterator(compute_iter_class_factory(
api_method_name='iter_compute_instancetemplates',
resource_name='compute_instancetemplate')):
"""The Resource iterator implementation for Compute InstanceTemplate."""
class ComputeInterconnectIterator(compute_iter_class_factory(
api_method_name='iter_compute_interconnects',
resource_name='compute_interconnect')):
"""The Resource iterator implementation for Interconnect."""
class ComputeInterconnectAttachmentIterator(compute_iter_class_factory(
api_method_name='iter_compute_interconnect_attachments',
resource_name='compute_interconnect_attachment')):
"""The Resource iterator implementation for InterconnectAttachment."""
class ComputeLicenseIterator(compute_iter_class_factory(
api_method_name='iter_compute_licenses',
resource_name='compute_license')):
"""The Resource iterator implementation for Compute License."""
class ComputeNetworkIterator(compute_iter_class_factory(
api_method_name='iter_compute_networks',
resource_name='compute_network')):
"""The Resource iterator implementation for Compute Network."""
class ComputeProjectIterator(compute_iter_class_factory(
api_method_name='iter_compute_project',
resource_name='compute_project')):
"""The Resource iterator implementation for Compute Project."""
class ComputeRouterIterator(compute_iter_class_factory(
api_method_name='iter_compute_routers',
resource_name='compute_router')):
"""The Resource iterator implementation for Compute Router."""
class ComputeSecurityPolicyIterator(compute_iter_class_factory(
api_method_name='iter_compute_securitypolicies',
resource_name='compute_securitypolicy')):
"""The Resource iterator implementation for Compute SecurityPolicy."""
class ComputeSnapshotIterator(compute_iter_class_factory(
api_method_name='iter_compute_snapshots',
resource_name='compute_snapshot')):
"""The Resource iterator implementation for Compute Snapshot."""
class ComputeSslCertificateIterator(compute_iter_class_factory(
api_method_name='iter_compute_sslcertificates',
resource_name='compute_sslcertificate')):
"""The Resource iterator implementation for Compute SSL Certificate."""
class ComputeSubnetworkIterator(compute_iter_class_factory(
api_method_name='iter_compute_subnetworks',
resource_name='compute_subnetwork')):
"""The Resource iterator implementation for Compute Subnetwork."""
class ComputeTargetHttpProxyIterator(compute_iter_class_factory(
api_method_name='iter_compute_targethttpproxies',
resource_name='compute_targethttpproxy')):
"""The Resource iterator implementation for Compute TargetHttpProxy."""
class ComputeTargetHttpsProxyIterator(compute_iter_class_factory(
api_method_name='iter_compute_targethttpsproxies',
resource_name='compute_targethttpsproxy')):
"""The Resource iterator implementation for Compute TargetHttpsProxy."""
class ComputeTargetInstanceIterator(compute_iter_class_factory(
api_method_name='iter_compute_targetinstances',
resource_name='compute_targetinstance')):
"""The Resource iterator implementation for Compute TargetInstance."""
class ComputeTargetPoolIterator(compute_iter_class_factory(
api_method_name='iter_compute_targetpools',
resource_name='compute_targetpool')):
"""The Resource iterator implementation for Compute TargetPool."""
class ComputeTargetSslProxyIterator(compute_iter_class_factory(
api_method_name='iter_compute_targetsslproxies',
resource_name='compute_targetsslproxy')):
"""The Resource iterator implementation for Compute TargetSslProxy."""
class ComputeTargetTcpProxyIterator(compute_iter_class_factory(
api_method_name='iter_compute_targettcpproxies',
resource_name='compute_targettcpproxy')):
"""The Resource iterator implementation for Compute TargetTcpProxy."""
class ComputeTargetVpnGatewayIterator(compute_iter_class_factory(
api_method_name='iter_compute_targetvpngateways',
resource_name='compute_targetvpngateway')):
"""The Resource iterator implementation for Compute TargetVpnGateway."""
class ComputeUrlMapIterator(compute_iter_class_factory(
api_method_name='iter_compute_urlmaps',
resource_name='compute_urlmap')):
"""The Resource iterator implementation for Compute UrlMap."""
class ComputeVpnTunnelIterator(compute_iter_class_factory(
api_method_name='iter_compute_vpntunnels',
resource_name='compute_vpntunnel')):
"""The Resource iterator implementation for Compute VpnTunnel."""
class DataprocClusterIterator(resource_iter_class_factory(
api_method_name='iter_dataproc_clusters',
resource_name='dataproc_cluster',
api_method_arg_key='projectId',
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for Cloud Dataproc Cluster."""
class DnsManagedZoneIterator(resource_iter_class_factory(
api_method_name='iter_dns_managedzones',
resource_name='dns_managedzone',
api_method_arg_key='projectNumber',
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for Cloud DNS ManagedZone."""
class DnsPolicyIterator(resource_iter_class_factory(
api_method_name='iter_dns_policies',
resource_name='dns_policy',
api_method_arg_key='projectNumber',
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for Cloud DNS Policy."""
# GSuite iterators do not support using the class factory.
class GsuiteGroupIterator(ResourceIterator):
"""The Resource iterator implementation for Gsuite Group"""
def iter(self):
"""Resource iterator.
Yields:
Resource: GsuiteGroup created
"""
gsuite = self.client
if self.resource.has_directory_resource_id():
try:
for data, _ in gsuite.iter_gsuite_groups(
self.resource['owner']['directoryCustomerId']):
yield FACTORIES['gsuite_group'].create_new(data)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class GsuiteMemberIterator(ResourceIterator):
"""The Resource iterator implementation for Gsuite Member"""
def iter(self):
"""Resource iterator.
Yields:
Resource: GsuiteUserMember or GsuiteGroupMember created
"""
gsuite = self.client
try:
for data, _ in gsuite.iter_gsuite_group_members(
self.resource['id']):
if data['type'] == 'USER':
yield FACTORIES['gsuite_user_member'].create_new(data)
elif data['type'] == 'GROUP':
yield FACTORIES['gsuite_group_member'].create_new(data)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class GsuiteUserIterator(ResourceIterator):
"""The Resource iterator implementation for Gsuite User"""
def iter(self):
"""Resource iterator.
Yields:
Resource: GsuiteUser created
"""
gsuite = self.client
if self.resource.has_directory_resource_id():
try:
for data, _ in gsuite.iter_gsuite_users(
self.resource['owner']['directoryCustomerId']):
yield FACTORIES['gsuite_user'].create_new(data)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class GsuiteGroupsSettingsIterator(ResourceIterator):
"""The Resource iterator implementation for Gsuite Group Settings"""
def iter(self):
"""Resource iterator.
Yields:
Resource: GsuiteGroupsSettings created
"""
gsuite = self.client
try:
data = gsuite.fetch_gsuite_groups_settings(self.resource['email'])
yield FACTORIES['gsuite_groups_settings'].create_new(data)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class IamOrganizationCuratedRoleIterator(resource_iter_class_factory(
api_method_name='iter_iam_curated_roles',
resource_name='iam_curated_role')):
"""The Resource iterator implementation for Organization Curated Role."""
class IamOrganizationRoleIterator(resource_iter_class_factory(
api_method_name='iter_iam_organization_roles',
resource_name='iam_role',
api_method_arg_key='name')):
"""The Resource iterator implementation for IAM Organization Role."""
class IamProjectRoleIterator(resource_iter_class_factory(
api_method_name='iter_iam_project_roles',
resource_name='iam_role',
api_method_arg_key='projectId',
additional_arg_keys=['projectNumber'],
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for IAM Project Role."""
class IamServiceAccountIterator(resource_iter_class_factory(
api_method_name='iter_iam_serviceaccounts',
resource_name='iam_serviceaccount',
api_method_arg_key='projectId',
additional_arg_keys=['projectNumber'],
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for IAM ServiceAccount."""
class IamServiceAccountKeyIterator(resource_iter_class_factory(
api_method_name='iter_iam_serviceaccount_keys',
resource_name='iam_serviceaccount_key',
api_method_arg_key='projectId',
additional_arg_keys=['uniqueId'])):
"""The Resource iterator implementation for IAM ServiceAccount Key."""
class KmsKeyRingIterator(resource_iter_class_factory(
api_method_name='iter_kms_keyrings',
resource_name='kms_keyring',
api_method_arg_key='projectId',
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for KMS KeyRing."""
class KmsCryptoKeyIterator(resource_iter_class_factory(
api_method_name='iter_kms_cryptokeys',
resource_name='kms_cryptokey',
api_method_arg_key='name')):
"""The Resource iterator implementation for KMS CryptoKey."""
class KmsCryptoKeyVersionIterator(resource_iter_class_factory(
api_method_name='iter_kms_cryptokeyversions',
resource_name='kms_cryptokeyversion',
api_method_arg_key='name')):
"""The Resource iterator implementation for KMS CryptoKeyVersion."""
class KubernetesClusterIterator(resource_iter_class_factory(
api_method_name='iter_container_clusters',
resource_name='kubernetes_cluster',
api_method_arg_key='projectNumber',
resource_validation_method_name='container_api_enabled')):
"""The Resource iterator implementation for Kubernetes Cluster."""
class KubernetesNodeIterator(ResourceIterator):
"""The Resource iterator implementation for KubernetesNode"""
def iter(self):
"""Resource iterator.
Yields:
Resource: KubernetesCluster created
"""
gcp = self.client
try:
for data, metadata in gcp.iter_kubernetes_nodes(
project_id=self.resource.parent()['projectId'],
zone=self.resource['zone'],
cluster=self.resource['name']):
yield FACTORIES['kubernetes_node'].create_new(
data, metadata=metadata)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class KubernetesPodIterator(ResourceIterator):
"""The Resource iterator implementation for KubernetesPod"""
def iter(self):
"""Resource iterator.
Yields:
Resource: KubernetesCluster created
"""
gcp = self.client
try:
for data, metadata in gcp.iter_kubernetes_pods(
project_id=self.resource.parent().parent()['projectId'],
zone=self.resource.parent()['zone'],
cluster=self.resource.parent()['name'],
namespace=self.resource['metadata']['name']):
yield FACTORIES['kubernetes_pod'].create_new(
data, metadata=metadata)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class KubernetesServiceIterator(ResourceIterator):
"""The Resource iterator implementation for Kubernetes Service"""
def iter(self):
"""Resource iterator.
Yields:
Resource: Kubernetes Service created
"""
gcp = self.client
try:
for data, metadata in gcp.iter_kubernetes_services(
project_id=self.resource.parent().parent()['projectId'],
zone=self.resource.parent()['zone'],
cluster=self.resource.parent()['name'],
namespace=self.resource['metadata']['name']):
yield FACTORIES['kubernetes_service'].create_new(
data, metadata=metadata)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class KubernetesNamespaceIterator(ResourceIterator):
"""The Resource iterator implementation for KubernetesNamespace"""
def iter(self):
"""Resource iterator.
Yields:
Resource: KubernetesCluster created
"""
gcp = self.client
try:
for data, metadata in gcp.iter_kubernetes_namespaces(
project_id=self.resource.parent()['projectId'],
zone=self.resource['zone'],
cluster=self.resource['name']):
yield FACTORIES['kubernetes_namespace'].create_new(
data, metadata=metadata)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class KubernetesRoleIterator(ResourceIterator):
"""The Resource iterator implementation for KubernetesRole"""
def iter(self):
"""Resource iterator.
Yields:
Resource: KubernetesCluster created
"""
gcp = self.client
try:
for data, metadata in gcp.iter_kubernetes_roles(
project_id=self.resource.parent().parent()['projectId'],
zone=self.resource.parent()['zone'],
cluster=self.resource.parent()['name'],
namespace=self.resource['metadata']['name']):
yield FACTORIES['kubernetes_role'].create_new(
data, metadata=metadata)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class KubernetesRoleBindingIterator(ResourceIterator):
"""The Resource iterator implementation for KubernetesRoleBinding"""
def iter(self):
"""Resource iterator.
Yields:
Resource: KubernetesCluster created
"""
gcp = self.client
try:
for data, metadata in gcp.iter_kubernetes_rolebindings(
project_id=self.resource.parent().parent()['projectId'],
zone=self.resource.parent()['zone'],
cluster=self.resource.parent()['name'],
namespace=self.resource['metadata']['name']):
yield FACTORIES['kubernetes_rolebinding'].create_new(
data, metadata=metadata)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class KubernetesClusterRoleIterator(ResourceIterator):
"""The Resource iterator implementation for KubernetesClusterRole"""
def iter(self):
"""Resource iterator.
Yields:
Resource: KubernetesCluster created
"""
gcp = self.client
try:
for data, metadata in gcp.iter_kubernetes_clusterroles(
project_id=self.resource.parent()['projectId'],
zone=self.resource['zone'],
cluster=self.resource['name']):
yield FACTORIES['kubernetes_clusterrole'].create_new(
data, metadata=metadata)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class KubernetesClusterRoleBindingIterator(ResourceIterator):
"""The Resource iterator implementation for KubernetesClusterRoleBinding"""
def iter(self):
"""Resource iterator.
Yields:
Resource: KubernetesCluster created
"""
gcp = self.client
try:
for data, metadata in gcp.iter_kubernetes_clusterrolebindings(
project_id=self.resource.parent()['projectId'],
zone=self.resource['zone'],
cluster=self.resource['name']):
yield FACTORIES['kubernetes_clusterrolebinding'].create_new(
data, metadata=metadata)
except ResourceNotSupported as e:
# API client doesn't support this resource, ignore.
LOGGER.debug(e)
class LoggingBillingAccountSinkIterator(resource_iter_class_factory(
api_method_name='iter_stackdriver_billing_account_sinks',
resource_name='logging_sink',
api_method_arg_key='name')):
"""The Resource iterator implementation for Logging Billing Account Sink."""
class LoggingFolderSinkIterator(resource_iter_class_factory(
api_method_name='iter_stackdriver_folder_sinks',
resource_name='logging_sink',
api_method_arg_key='name')):
"""The Resource iterator implementation for Logging Folder Sink."""
class LoggingOrganizationSinkIterator(resource_iter_class_factory(
api_method_name='iter_stackdriver_organization_sinks',
resource_name='logging_sink',
api_method_arg_key='name')):
"""The Resource iterator implementation for Logging Organization Sink"""
class LoggingProjectSinkIterator(resource_iter_class_factory(
api_method_name='iter_stackdriver_project_sinks',
resource_name='logging_sink',
api_method_arg_key='projectNumber',
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for Logging Project Sink."""
class PubsubSubscriptionIterator(resource_iter_class_factory(
api_method_name='iter_pubsub_subscriptions',
resource_name='pubsub_subscription',
api_method_arg_key='projectId',
additional_arg_keys=['projectNumber'],
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for PubSub Subscription."""
class PubsubTopicIterator(resource_iter_class_factory(
api_method_name='iter_pubsub_topics',
resource_name='pubsub_topic',
api_method_arg_key='projectId',
additional_arg_keys=['projectNumber'],
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for PubSub Topic."""
class ResourceManagerProjectLienIterator(resource_iter_class_factory(
api_method_name='iter_crm_project_liens',
resource_name='crm_lien',
api_method_arg_key='projectNumber',
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for Resource Manager Lien."""
class ServiceUsageServiceIterator(resource_iter_class_factory(
api_method_name='iter_serviceusage_services',
resource_name='service',
api_method_arg_key='projectNumber')):
"""The Resource Iterator implementation for Service Usage Services."""
class SpannerDatabaseIterator(resource_iter_class_factory(
api_method_name='iter_spanner_databases',
resource_name='spanner_database',
api_method_arg_key='name')):
"""The Resource iterator implementation for Cloud DNS ManagedZone."""
class SpannerInstanceIterator(resource_iter_class_factory(
api_method_name='iter_spanner_instances',
resource_name='spanner_instance',
api_method_arg_key='projectNumber',
resource_validation_method_name='enumerable')):
"""The Resource iterator implementation for Cloud DNS Policy."""
class StorageBucketIterator(resource_iter_class_factory(
api_method_name='iter_storage_buckets',
resource_name='storage_bucket',
api_method_arg_key='projectNumber')):
"""The Resource iterator implementation for Storage Bucket."""
class StorageObjectIterator(resource_iter_class_factory(
api_method_name='iter_storage_objects',
resource_name='storage_object',
api_method_arg_key='id')):
"""The Resource iterator implementation for Storage Object."""
FACTORIES = {
'composite_root': ResourceFactory({
'dependsOn': [],
'cls': CompositeRootResource,
'contains': [
CompositeRootIterator,
]}),
'organization': ResourceFactory({
'dependsOn': [],
'cls': ResourceManagerOrganization,
'contains': [
BillingAccountIterator,
GsuiteGroupIterator,
GsuiteUserIterator,
IamOrganizationCuratedRoleIterator,
IamOrganizationRoleIterator,
LoggingOrganizationSinkIterator,
ResourceManagerOrganizationOrgPolicyIterator,
ResourceManagerFolderIterator,
ResourceManagerProjectIterator,
ResourceManagerOrganizationAccessPolicyIterator,
]}),
'folder': ResourceFactory({
'dependsOn': ['organization'],
'cls': ResourceManagerFolder,
'contains': [
LoggingFolderSinkIterator,
ResourceManagerFolderOrgPolicyIterator,
ResourceManagerFolderIterator,
ResourceManagerProjectIterator,
]}),
'project': ResourceFactory({
'dependsOn': ['organization', 'folder'],
'cls': ResourceManagerProject,
'contains': [
AppEngineAppIterator,
BigqueryDataSetIterator,
BigtableInstanceIterator,
CloudSqlInstanceIterator,
ComputeAddressIterator,
ComputeAutoscalerIterator,
ComputeBackendBucketIterator,
ComputeBackendServiceIterator,
ComputeDiskIterator,
ComputeFirewallIterator,
ComputeForwardingRuleIterator,
ComputeHealthCheckIterator,
ComputeHttpHealthCheckIterator,
ComputeHttpsHealthCheckIterator,
ComputeImageIterator,
ComputeInstanceGroupIterator,
ComputeInstanceGroupManagerIterator,
ComputeInstanceIterator,
ComputeInstanceTemplateIterator,
ComputeInterconnectIterator,
ComputeInterconnectAttachmentIterator,
ComputeLicenseIterator,
ComputeNetworkIterator,
ComputeProjectIterator,
ComputeRouterIterator,
ComputeSecurityPolicyIterator,
ComputeSnapshotIterator,
ComputeSslCertificateIterator,
ComputeSubnetworkIterator,
ComputeTargetHttpProxyIterator,
ComputeTargetHttpsProxyIterator,
ComputeTargetInstanceIterator,
ComputeTargetPoolIterator,
ComputeTargetSslProxyIterator,
ComputeTargetTcpProxyIterator,
ComputeTargetVpnGatewayIterator,
ComputeUrlMapIterator,
ComputeVpnTunnelIterator,
DataprocClusterIterator,
DnsManagedZoneIterator,
DnsPolicyIterator,
IamProjectRoleIterator,
IamServiceAccountIterator,
KmsKeyRingIterator,
KubernetesClusterIterator,
LoggingProjectSinkIterator,
PubsubSubscriptionIterator,
PubsubTopicIterator,
ResourceManagerProjectLienIterator,
ResourceManagerProjectOrgPolicyIterator,
ServiceUsageServiceIterator,
SpannerInstanceIterator,
StorageBucketIterator,
]}),
'appengine_app': ResourceFactory({
'dependsOn': ['project'],
'cls': AppEngineApp,
'contains': [
AppEngineServiceIterator,
]}),
'appengine_service': ResourceFactory({
'dependsOn': ['appengine_app'],
'cls': AppEngineService,
'contains': [
AppEngineVersionIterator,
]}),
'appengine_version': ResourceFactory({
'dependsOn': ['appengine_service'],
'cls': AppEngineVersion,
'contains': [
AppEngineInstanceIterator,
]}),
'appengine_instance': ResourceFactory({
'dependsOn': ['appengine_version'],
'cls': AppEngineInstance,
'contains': []}),
'billing_account': ResourceFactory({
'dependsOn': ['organization'],
'cls': BillingAccount,
'contains': [
LoggingBillingAccountSinkIterator,
]}),
'bigquery_dataset': ResourceFactory({
'dependsOn': ['project'],
'cls': BigqueryDataSet,
'contains': [
BigqueryTableIterator
]}),
'bigquery_table': ResourceFactory({
'dependsOn': ['bigquery_dataset'],
'cls': BigqueryTable,
'contains': []}),
'bigtable_cluster': ResourceFactory({
'dependsOn': ['bigtable_instance'],
'cls': BigtableCluster,
'contains': []}),
'bigtable_instance': ResourceFactory({
'dependsOn': ['project'],
'cls': BigtableInstance,
'contains': [
BigtableClusterIterator,
BigtableTableIterator
]}),
'bigtable_table': ResourceFactory({
'dependsOn': ['bigtable_instance'],
'cls': BigtableTable,
'contains': []}),
'cloudsql_instance': ResourceFactory({
'dependsOn': ['project'],
'cls': CloudSqlInstance,
'contains': []}),
'compute_address': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeAddress,
'contains': []}),
'compute_autoscaler': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeAutoscaler,
'contains': []}),
'compute_backendservice': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeBackendService,
'contains': []}),
'compute_backendbucket': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeBackendBucket,
'contains': []}),
'compute_disk': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeDisk,
'contains': []}),
'compute_firewall': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeFirewall,
'contains': []}),
'compute_forwardingrule': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeForwardingRule,
'contains': []}),
'compute_healthcheck': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeHealthCheck,
'contains': []}),
'compute_httphealthcheck': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeHttpHealthCheck,
'contains': []}),
'compute_httpshealthcheck': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeHttpsHealthCheck,
'contains': []}),
'compute_image': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeImage,
'contains': []}),
'compute_instancegroup': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeInstanceGroup,
'contains': []}),
'compute_instancegroupmanager': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeInstanceGroupManager,
'contains': []}),
'compute_instance': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeInstance,
'contains': []}),
'compute_instancetemplate': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeInstanceTemplate,
'contains': []}),
'compute_interconnect': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeInterconnect,
'contains': []}),
'compute_interconnect_attachment': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeInterconnectAttachment,
'contains': []}),
'compute_license': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeLicense,
'contains': []}),
'compute_network': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeNetwork,
'contains': []}),
'compute_project': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeProject,
'contains': []}),
'compute_router': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeRouter,
'contains': []}),
'compute_securitypolicy': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeSecurityPolicy,
'contains': []}),
'compute_snapshot': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeSnapshot,
'contains': []}),
'compute_sslcertificate': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeSslCertificate,
'contains': []}),
'compute_subnetwork': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeSubnetwork,
'contains': []}),
'compute_targethttpproxy': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeTargetHttpProxy,
'contains': []}),
'compute_targethttpsproxy': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeTargetHttpsProxy,
'contains': []}),
'compute_targetinstance': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeTargetInstance,
'contains': []}),
'compute_targetpool': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeTargetPool,
'contains': []}),
'compute_targetsslproxy': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeTargetSslProxy,
'contains': []}),
'compute_targettcpproxy': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeTargetTcpProxy,
'contains': []}),
'compute_targetvpngateway': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeTargetVpnGateway,
'contains': []}),
'compute_urlmap': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeUrlMap,
'contains': []}),
'compute_vpntunnel': ResourceFactory({
'dependsOn': ['project'],
'cls': ComputeVpnTunnel,
'contains': []}),
'crm_lien': ResourceFactory({
'dependsOn': ['project'],
'cls': ResourceManagerLien,
'contains': []}),
'crm_org_policy': ResourceFactory({
'dependsOn': ['folder', 'organization', 'project'],
'cls': ResourceManagerOrgPolicy,
'contains': []}),
'crm_access_policy': ResourceFactory({
'dependsOn': ['organization'],
'cls': ResourceManagerAccessPolicy,
'contains': [
AccessLevelIterator,
ServicePerimeterIterator,
]}),
'crm_access_level': ResourceFactory({
'dependsOn': ['crm_access_policy'],
'cls': ResourceManagerAccessLevel,
'contains': []}),
'crm_service_perimeter': ResourceFactory({
'dependsOn': ['crm_access_policy'],
'cls': ResourceManagerServicePerimeter,
'contains': []}),
'dataproc_cluster': ResourceFactory({
'dependsOn': ['project'],
'cls': DataprocCluster,
'contains': []}),
'dns_managedzone': ResourceFactory({
'dependsOn': ['project'],
'cls': DnsManagedZone,
'contains': []}),
'dns_policy': ResourceFactory({
'dependsOn': ['project'],
'cls': DnsPolicy,
'contains': []}),
'service': ResourceFactory({
'dependsOn': ['project'],
'cls': ServiceUsageService,
'contains': []}),
'gsuite_group': ResourceFactory({
'dependsOn': ['organization'],
'cls': GsuiteGroup,
'contains': [
GsuiteMemberIterator,
GsuiteGroupsSettingsIterator,
]}),
'gsuite_groups_settings': ResourceFactory({
'dependsOn': ['gsuite_group'],
'cls': GsuiteGroupsSettings,
'contains': []}),
'gsuite_group_member': ResourceFactory({
'dependsOn': ['gsuite_group'],
'cls': GsuiteGroupMember,
'contains': []}),
'gsuite_user': ResourceFactory({
'dependsOn': ['organization'],
'cls': GsuiteUser,
'contains': []}),
'gsuite_user_member': ResourceFactory({
'dependsOn': ['gsuite_group'],
'cls': GsuiteUserMember,
'contains': []}),
'iam_curated_role': ResourceFactory({
'dependsOn': [],
'cls': IamCuratedRole,
'contains': []}),
'iam_role': ResourceFactory({
'dependsOn': ['organization', 'project'],
'cls': IamRole,
'contains': []}),
'iam_serviceaccount': ResourceFactory({
'dependsOn': ['project'],
'cls': IamServiceAccount,
'contains': [
IamServiceAccountKeyIterator
]}),
'iam_serviceaccount_key': ResourceFactory({
'dependsOn': ['iam_serviceaccount'],
'cls': IamServiceAccountKey,
'contains': []}),
'kms_keyring': ResourceFactory({
'dependsOn': ['project'],
'cls': KmsKeyRing,
'contains': [
KmsCryptoKeyIterator
]}),
'kms_cryptokey': ResourceFactory({
'dependsOn': ['kms_keyring'],
'cls': KmsCryptoKey,
'contains': [
KmsCryptoKeyVersionIterator
]}),
'kms_cryptokeyversion': ResourceFactory({
'dependsOn': ['kms_cryptokey'],
'cls': KmsCryptoKeyVersion,
'contains': []}),
'kubernetes_cluster': ResourceFactory({
'dependsOn': ['project'],
'cls': KubernetesCluster,
'contains': [
KubernetesNodeIterator,
KubernetesNamespaceIterator,
KubernetesClusterRoleIterator,
KubernetesClusterRoleBindingIterator,
]}),
'kubernetes_namespace': ResourceFactory({
'dependsOn': ['kubernetes_cluster'],
'cls': KubernetesNamespace,
'contains': [
KubernetesPodIterator,
KubernetesRoleIterator,
KubernetesRoleBindingIterator,
KubernetesServiceIterator,
]}),
'kubernetes_node': ResourceFactory({
'dependsOn': ['kubernetes_cluster'],
'cls': KubernetesNode,
'contains': []}),
'kubernetes_pod': ResourceFactory({
'dependsOn': ['kubernetes_namespace'],
'cls': KubernetesPod,
'contains': []}),
'kubernetes_service': ResourceFactory({
'dependsOn': ['kubernetes_namespace'],
'cls': KubernetesService,
'contains': []}),
'kubernetes_role': ResourceFactory({
'dependsOn': ['kubernetes_namespace'],
'cls': KubernetesRole,
'contains': []}),
'kubernetes_rolebinding': ResourceFactory({
'dependsOn': ['kubernetes_namespace'],
'cls': KubernetesRoleBinding,
'contains': []}),
'kubernetes_clusterrole': ResourceFactory({
'dependsOn': ['kubernetes_cluster'],
'cls': KubernetesClusterRole,
'contains': []}),
'kubernetes_clusterrolebinding': ResourceFactory({
'dependsOn': ['kubernetes_cluster'],
'cls': KubernetesClusterRoleBinding,
'contains': []}),
'logging_sink': ResourceFactory({
'dependsOn': ['organization', 'folder', 'project'],
'cls': LoggingSink,
'contains': []}),
'pubsub_subscription': ResourceFactory({
'dependsOn': ['project'],
'cls': PubsubSubscription,
'contains': []}),
'pubsub_topic': ResourceFactory({
'dependsOn': ['project'],
'cls': PubsubTopic,
'contains': []}),
'spanner_database': ResourceFactory({
'dependsOn': ['project'],
'cls': SpannerDatabase,
'contains': []}),
'spanner_instance': ResourceFactory({
'dependsOn': ['project'],
'cls': SpannerInstance,
'contains': [
SpannerDatabaseIterator
]}),
'storage_bucket': ResourceFactory({
'dependsOn': ['project'],
'cls': StorageBucket,
'contains': [
# StorageObjectIterator
]}),
'storage_object': ResourceFactory({
'dependsOn': ['bucket'],
'cls': StorageObject,
'contains': []}),
}
|
forseti-security/forseti-security
|
google/cloud/forseti/services/inventory/base/resources.py
|
Python
|
apache-2.0
| 115,431
|
[
"VisIt"
] |
c74bd69f0762fca1082b9f7f8139eb8d37fde9c7c56608d66dc3d014a679a70b
|
import pysal as ps
import json
from six.moves import urllib as urllib
import copy
import numpy as np
"""
Weights Meta Data
Prototyping meta data functions and classes for weights provenance
Based on Anselin, L., S.J. Rey and W. Li (2014) "Metadata and provenance for
spatial analysis: the case of spatial weights." International Journal of
Geographical Information Science. DOI:10.1080/13658816.2014.917313
TODO
----
- Document each public function with working doctest
- Abstract the test files as they currently assume location is source
directory
- have wmd_reader take either a wmd file or a wmd dictionary/object
"""
__author__ = "Sergio J. Rey <srey@asu.edu>, Wenwen Li <wenwen@asu.edu>"
def wmd_reader(fileName):
"""
Examples
--------
>>> import wmd
>>> wr = wmd.wmd_reader('w1rook.wmd')
wmd_reader failed: w1rook.wmd
>>> wr = wmd.wmd_reader('wrook1.wmd')
>>> wr.neighbors[2]
[0, 1, 3, 4]
"""
try:
meta_data = _uri_reader(fileName)
except:
try:
with open(fileName, 'r') as fp:
meta_data = json.load(fp)
global fullmeta
fullmeta = {}
fullmeta['root'] = copy.deepcopy(meta_data)
w = _wmd_parser(meta_data)
return w
except:
print 'wmd_reader failed: ', fileName
class WMD(ps.W):
"""Weights Meta Data Class"""
def __init__(self, neighbors=None, weights=None, id_order=None):
self.meta_data = {}
super(WMD, self).__init__(neighbors, weights, id_order)
# override transform property to record any post-instantiation
# transformations in meta data
@ps.W.transform.setter
def transform(self, value):
super(WMD, WMD).transform.__set__(self, value)
self.meta_data['transform'] = self._transform
def write(self, fileName, data=False):
"""
Examples
--------
>>> import wmd
>>> wr = wmd.wmd_reader('w1rook.wmd')
wmd_reader failed: w1rook.wmd
>>> wr = wmd.wmd_reader('wrook1.wmd')
>>> wr.write('wr1.wmd')
>>> wr1 = wmd.wmd_reader('wr1.wmd')
>>> wr.neighbors[2]
[0, 1, 3, 4]
>>> wr1.neighbors[2]
[0, 1, 3, 4]
>>>
"""
_wmd_writer(self, fileName, data=data)
######################### Private functions #########################
def _wmd_writer(wmd_object, fileName, data=False):
try:
with open(fileName, 'w') as f:
if data:
wmd_object.meta_data['data'] = {}
wmd_object.meta_data['data']['weights'] = wmd_object.weights
wmd_object.meta_data['data']['neighbors'] = wmd_object.neighbors
json.dump(wmd_object.meta_data,
f,
indent=4,
separators=(',', ': '))
except:
print 'wmd_writer failed.'
def _block(arg_dict):
"""
General handler for block weights
Examples
--------
>>> w = wmd_reader('taz_block.wmd')
>>> w.n
4109
>>> w.meta_data
{'root': {u'input1': {u'data1': {u'type': u'dbf',
u'uri': u'http://toae.org/pub/taz.dbf'}},
u'weight_type': u'block', u'transform': u'O',
u'parameters': {u'block_variable': u'CNTY'}}}
"""
input1 = arg_dict['input1']
for key in input1:
input1 = input1[key]
break
uri = input1['uri']
weight_type = arg_dict['weight_type'].lower()
var_name = arg_dict['parameters']['block_variable']
dbf = ps.open(uri)
block = np.array(dbf.by_col(var_name))
dbf.close()
w = ps.weights.util.block_weights(block)
w = WMD(w.neighbors, w.weights)
w.meta_data = {}
w.meta_data['input1'] = {"type": 'dbf', 'uri': uri}
w.meta_data['transform'] = w.transform
w.meta_data['weight_type'] = weight_type
w.meta_data['parameters'] = {'block_variable': var_name}
return w
def _contiguity(arg_dict):
"""
General handler for building contiguity weights from shapefiles
Examples
--------
>>> w = wmd_reader('wrook1.wmd')
>>> w.n
49
>>> w.meta_data
{'root': {u'input1': {u'data1': {u'type': u'shp',
u'uri': u'http://toae.org/pub/columbus.shp'}},
u'weight_type': u'rook', u'transform': u'O'}}
"""
input1 = arg_dict['input1']
for key in input1:
input1 = input1[key]
break
uri = input1['uri']
weight_type = arg_dict['weight_type']
weight_type = weight_type.lower()
if weight_type == 'rook':
w = ps.rook_from_shapefile(uri)
elif weight_type == 'queen':
w = ps.queen_from_shapefile(uri)
else:
print "Unsupported contiguity criterion: ", weight_type
return None
if 'parameters' in arg_dict:
order = arg_dict['parameters'].get('order', 1) # default to 1st order
lower = arg_dict['parameters'].get('lower', 0) # default to exclude lower orders
if order > 1:
w_orig = w
w = ps.higher_order(w, order)
if lower:
for o in xrange(order-1, 1, -1):
w = ps.weights.w_union(ps.higher_order(w_orig, o), w)
w = ps.weights.w_union(w, w_orig)
parameters = arg_dict['parameters']
else:
parameters = {'lower': 0, 'order': 1}
w = WMD(w.neighbors, w.weights)
w.meta_data = {}
w.meta_data["input1"] = {"type": 'shp', 'uri': uri}
w.meta_data["transform"] = w.transform
w.meta_data["weight_type"] = weight_type
w.meta_data['parameters'] = parameters
return w
def _kernel(arg_dict):
"""
General handler for building kernel based weights from shapefiles
Examples
--------
>>> w = wmd_reader('kernel.wmd')
>>> w.n
49
>>> w.meta_data
{'root': {u'input1': {u'data1': {u'type': u'shp',
u'uri': u'../examples/columbus.shp'}},
u'weight_type': u'kernel', u'transform': u'O',
u'parameters': {u'function': u'triangular',
u'bandwidths': None, u'k': 2}}}
"""
input1 = arg_dict['input1']['data1']
uri = input1['uri']
weight_type = arg_dict['weight_type']
weight_type = weight_type.lower()
k = 2
bandwidths = None
function = 'triangular'
if 'parameters' in arg_dict:
k = arg_dict['parameters'].get('k', k) # set default to 2
bandwidths = arg_dict['parameters'].get('bandwidths', bandwidths)
function = arg_dict['parameters'].get('function', function)
else:
parameters = {}
parameters['k'] = k
parameters['bandwidths'] = bandwidths
parameters['function'] = function
arg_dict['parameters'] = parameters
if weight_type == 'akernel':
# adaptive kernel
w = ps.adaptive_kernelW_from_shapefile(uri, bandwidths = bandwidths,
k=k, function = function)
elif weight_type == 'kernel':
w = ps.kernelW_from_shapefile(uri, k=k, function = function)
else:
print "Unsupported kernel: ",weight_type
return None
w = WMD(w.neighbors, w.weights)
w.meta_data = {}
w.meta_data["input1"] = {"type": 'shp', 'uri':uri}
w.meta_data["transform"] = w.transform
w.meta_data["weight_type"] = weight_type
w.meta_data['parameters'] = arg_dict['parameters']
return w
def _distance(arg_dict):
"""
General handler for distance based weights obtained from shapefiles
"""
input1 = arg_dict['input1']
uri = input1['uri']
weight_type = arg_dict['weight_type']
weight_type = weight_type.lower()
k = 2
id_variable = None
p = 2
radius = None
if 'parameters' in arg_dict:
k = arg_dict['parameters'].get('k',k) # set default to 2
id_variable = arg_dict['parameters'].get('id_variable', id_variable)
p = arg_dict['parameters'].get('p',p)
radius = arg_dict['parameters'].get('radius', radius)
else:
parameters = {}
parameters['k'] = 2
parameters['id_variable'] = None
parameters['radius'] = None
parameters['p'] = 2
arg_dict['parameters'] = parameters
if weight_type == 'knn':
w = ps.knnW_from_shapefile(uri,k=k,p=p,idVariable=id_variable,
radius=radius)
w = WMD(w.neighbors, w.weights)
w.meta_data = {}
w.meta_data["input1"] = {"type": 'shp', 'uri':uri}
w.meta_data["weight_type"] = 'knn'
w.meta_data["transform"] = w.transform
w.meta_data['parameters'] = arg_dict['parameters']
return w
def _higher_order(arg_dict):
wmd = arg_dict['wmd']
order = 2
if 'parameters' in arg_dict:
order = arg_dict['parameters'].get('order', order)
else:
parameters = {}
parameters['order'] = order
arg_dict['parameters'] = parameters
w = ps.higher_order(wmd, order)
w = WMD(w.neighbors, w.weights)
w.meta_data = {}
w.meta_data['input1'] = arg_dict['input1']
w.meta_data['parameters'] = arg_dict['parameters']
return w
def _intersection(arg_dict):
#wmd = arg_dict['wmd']
w1 = arg_dict['input1']['data1']['uri']
w2 = arg_dict['input1']['data2']['uri']
w = ps.w_intersection(w1,w2)
w = WMD(w.neighbors, w.weights)
return w
def _geojsonf(arg_dict):
"""
Handler for local geojson files
"""
input1 = arg_dict['input1']
uri = input1['uri']
weight_type = arg_dict['weight_type']
weight_type = weight_type.lower()
id_variable = None
if weight_type == 'queen_geojsonf':
w = ps.weights.user.queen_from_geojsonf(uri)
w.meta_data = {}
w.meta_data["input1"] = {"type": 'geojsonf', 'uri':uri}
w.meta_data["weight_type"] = 'queen'
w.meta_data["transform"] = w.transform
return w
# wrapper dict that maps specific weights types to a handler function that
# builds the specific weights instance
WEIGHT_TYPES = {}
WEIGHT_TYPES['rook'] = _contiguity
WEIGHT_TYPES['queen'] = _contiguity
WEIGHT_TYPES['akernel'] = _kernel
WEIGHT_TYPES['kernel'] = _kernel
WEIGHT_TYPES['knn'] = _distance
WEIGHT_TYPES['higher_order'] = _higher_order
WEIGHT_TYPES['block'] = _block
WEIGHT_TYPES['intersection'] = _intersection
#WEIGHT_TYPES['queen_geojsonf'] = geojsonf
#WEIGHT_TYPES['geojsons'] = geojsons
def _uri_reader(uri):
j = json.load(urllib.urlopen(uri))
return j
def _wmd_read_only(fileName):
try:
meta_data = _uri_reader(fileName)
except:
try:
with open(fileName, 'r') as fp:
meta_data = json.load(fp)
return meta_data
except:
print '_wmd_read_only failed: ', fileName
def _wmd_parser(wmd_object):
if 'root' in wmd_object:
wmd_object = wmd_object['root']
weight_type = wmd_object['weight_type'].lower()
for key in wmd_object['input1']:
#print key
if wmd_object['input1'][key]['type'] == 'prov':
# call wmd_reader
uri = wmd_object['input1'][key]['uri']
meta_data = _wmd_read_only(uri)
fullmeta[uri] = copy.deepcopy(meta_data) #add full metadata
wmd = _wmd_parser(meta_data)
wmd_object['input1'][key]['uri'] = wmd
else:
# handle distributed files
uri = wmd_object['input1'][key]['uri']
try:
tmp = open(uri)
#print ' tmp: ', tmp
wmd_object['input1'][key]['uri'] = uri
except:
_download_shapefiles(uri)
uri = uri.split("/")[-1]
wmd_object['input1'][key]['uri'] = uri # use local copy
if weight_type in WEIGHT_TYPES:
#print weight_type
wmd = WEIGHT_TYPES[weight_type](wmd_object)
wmd.meta_data = fullmeta
else:
print 'Unsupported weight type: ', weight_type
return wmd
def _download_shapefiles(file_name):
file_parts = file_name.split("/")
file_prefix = file_parts[-1].split(".")[0]
exts = [ ".shp", ".dbf", ".shx" ]
for ext in exts:
# rebuild url
file_name = file_prefix + ext
file_parts[-1] = file_name
new_url = "/".join(file_parts)
#print file_name, new_url
u = urllib.urlopen(new_url)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
#print "Downloading: %s Bytes: %s" % (file_name, file_size)
file_size_dl = 0
block_sz = 8192
while True:
bf = u.read(block_sz)
if not bf:
break
file_size_dl += len(bf)
f.write(bf)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. /
file_size)
status = status + chr(8)* (len(status)+1)
#print status, f.close()
if __name__ == '__main__':
# distributed file
w1 = wmd_reader("wrook1.wmd")
## # order
## w1o = wmd_reader('wrooko1.wmd')
## w2o = wmd_reader('wrooko2.wmd')
## w2ol = wmd_reader('wrooko2l.wmd')
##
## # kernels
ak1 = wmd_reader('akern1.wmd')
kern = wmd_reader('kernel.wmd')
##
## # knn
## knn = wmd_reader('knn.wmd')
##
##
##
## # moran workflow
## import pysal as ps
# geojson
#wj = wmd_reader("wgeojson.wmd")
# here we test chaining
# r1 = wmd_reader('chain2inputs.wmd')
# print "full metadata is listed below: \n", fullmeta
# r2 = wmd_reader('chain2.wmd')
taz_int = wmd_reader("taz_intersection.wmd")
## intersection between queen and block weights
#import numpy as np
#w = ps.lat2W(4,4)
#block_variable = np.ones((w.n,1))
#block_variable[:8] = 0
#w_block = ps.weights.util.block_weights(block_variable)
#w_intersection = ps.w_intersection(w, w_block)
## with Columbus example using EW as the block and queen
#dbf = ps.open("columbus.dbf")
#ew = np.array(dbf.by_col("EW"))
#dbf.close()
#w_ew = ps.weights.util.block_weights(ew)
#wr = ps.rook_from_shapefile("columbus.shp")
#w_int = ps.w_intersection(w_ew, wr)
#blk = wmd_reader('block2.wmd')
#taz_int = wmd_reader("http://spatial.csf.asu.edu/taz_intersection.wmd")
|
schmidtc/pysal
|
pysal/meta/wmd.py
|
Python
|
bsd-3-clause
| 14,443
|
[
"COLUMBUS"
] |
cc9af17ca7d4f6b0511aa7aa4f4c1aff347e6f3b30d28531372a4e4a72a0b351
|
# See http://ipython.org/ipython-doc/1/interactive/public_server.html for more information.
# Configuration file for ipython-notebook.
import os
c = get_config()
c.NotebookApp.ip = '0.0.0.0'
c.NotebookApp.port = 6789
c.NotebookApp.open_browser = False
c.NotebookApp.profile = u'default'
c.IPKernelApp.matplotlib = 'inline'
headers = {
'X-Frame-Options': 'ALLOWALL',
}
c.NotebookApp.allow_origin = '*'
c.NotebookApp.allow_credentials = True
# In case this Notebook was launched from Galaxy a config file exists in /import/
# For standalone usage we fall back to a port-less URL
c.NotebookApp.base_url = '%s/ipython/' % os.environ.get('PROXY_PREFIX', '')
c.NotebookApp.webapp_settings = {
'static_url_prefix': '%s/ipython/static/' % os.environ.get('PROXY_PREFIX', '')
}
if os.environ.get('NOTEBOOK_PASSWORD', 'none') != 'none':
c.NotebookApp.password = os.environ['NOTEBOOK_PASSWORD']
if os.environ.get('CORS_ORIGIN', 'none') != 'none':
c.NotebookApp.allow_origin = os.environ['CORS_ORIGIN']
c.NotebookApp.webapp_settings['headers'] = headers
|
bgruening/docker-ipython-notebook
|
ipython_notebook_config.py
|
Python
|
mit
| 1,063
|
[
"Galaxy"
] |
0151960767a4bf315d85464a99ec4241e2cbbc1410412c58a1bf0cbf93c6879b
|
# -*- coding: utf-8 -*-
"""
Module for functions for mito network normalization
@author: sweel_rafelski
"""
import os
import os.path as op
import vtk
import vtk.util.numpy_support as vnpy
from numpy import ceil, mean, percentile
# pylint: disable=C0103
datadir = op.join(os.getcwd())
def vtk_read(fpath, readertype='vtkPolyDataReader'):
"""
Reads vtk files and returns the VTK object
"""
reader = getattr(vtk, readertype)()
reader.SetFileName(fpath)
reader.Update()
data = reader.GetOutput()
return data
def point_cloud_scalars(skelpath, ch1path, ch2path, **kwargs):
"""
Returns scalar values from voxels data (eg. *resampledVTK*) lying within
a point cloud of a specified radius for each point
Parameters
----------
skelpath, ch1path, ch2path : str
filepaths to skeleton and volume/voxels VTK output of respective
channels to be normalized
kwargs :
radius value argument (float) for _pointcloud()
*will default use a value of 2.5 pixels*
Returns
-------
polydata : VTK poly
polydata object with raw scalar values and width
np_voxel1, np_voxel2 : Numpy Array
numpy array conversion of VTK voxel intensities
"""
dataSkel = vtk_read(skelpath)
voxels_ch1 = vtk_read(ch1path,
readertype='vtkStructuredPointsReader')
voxels_ch2 = vtk_read(ch2path,
readertype='vtkStructuredPointsReader')
ptcld_ch1, ptcld_ch2 = _pointcloud(dataSkel, voxels_ch1, voxels_ch2,
**kwargs)
polydata = vtk.vtkPolyData()
polydata.SetPoints(dataSkel.GetPoints())
polydata.SetLines(dataSkel.GetLines())
polydata.GetPointData().AddArray(ptcld_ch1)
polydata.GetPointData().AddArray(ptcld_ch2)
np_voxel1 = vnpy.vtk_to_numpy(voxels_ch1.GetPointData().GetScalars())
np_voxel2 = vnpy.vtk_to_numpy(voxels_ch2.GetPointData().GetScalars())
polydata.GetPointData().AddArray(dataSkel.
GetPointData().GetScalars('Width'))
polydata.GetPointData().GetArray(2).SetName("tubewidth")
return polydata, np_voxel1, np_voxel2
def _pointcloud(skel, ch1, ch2, radius=2.5):
vox_ch2 = vtk.vtkDoubleArray()
vox_ch2.SetName("vox_ch2")
vox_ch1 = vtk.vtkDoubleArray()
vox_ch1.SetName("vox_ch1")
# vtk.pointlocator() used to to find the set of points lying within the
# radius of the point of interest, returns results in a list called
# result
loc = vtk.vtkPointLocator()
loc.SetDataSet(ch2)
loc.BuildLocator()
inten_ch1 = ch1.GetPointData().GetScalars().GetTuple1
inten_ch2 = ch2.GetPointData().GetScalars().GetTuple1
result = vtk.vtkIdList()
# averaging of pts intensity value surrounding each point in skel
for n in range(skel.GetNumberOfPoints()):
pt_of_int = tuple(ceil(i/.055) for i in skel.GetPoint(n))
loc.FindPointsWithinRadius(radius, pt_of_int, result)
vox_id = [result.GetId(i) for i in range(result.GetNumberOfIds())]
vox_ch1.InsertNextValue(mean([inten_ch1(m) for m in vox_id]))
vox_ch2.InsertNextValue(mean([inten_ch2(m) for m in vox_id]))
return vox_ch1, vox_ch2
def normalize_skel(polydata, raw_vox_ch1, raw_vox_ch2,
background_thresh=5., **kwargs):
"""
Normalize channels to correct for focal plane intensity variations
Parameters
----------
polydata : vtkPolyData
vtk object returned from pt_cld_sclrs()
raw_vox_ch1, raw_vox_ch2 : Numpy array
Voxel intensity values in numpy array format
backgrnd_thresh : float
The default threshold of a background value is the 5th percentile of
voxel intensities in the respective channel. This might have to be
changed depending on the experimental conditions
kwargs
------
background : Dict
Background values provided by user, should be a dictionary with
cell ID as keys and channel labels as subkeys
eg. {`Cell_1`: {`ch1`: 3000.2, `ch2`: 2500.5} ...}
"""
temp = polydata.GetPointData()
vox_ch1 = vnpy.vtk_to_numpy(temp.GetArray('vox_ch1'))
vox_ch2 = vnpy.vtk_to_numpy(temp.GetArray('vox_ch2'))
# Check to see if background file provided by user
background = kwargs.pop('backgroundfile')
if background:
if background['ch2'] > min(vox_ch2):
min_ch2 = min(vox_ch2)-1. # ensure no zero divisions
else:
min_ch2 = background['ch2']-1. # USUAL CASE
min_ch1 = min(background['ch1'], min(vox_ch1))
print ("Background value of gfp={:4.0f} & rfp={:4.0f} was used".
format(background['ch1'], background['ch2']))
else:
min_ch1 = percentile(raw_vox_ch1, background_thresh)
min_ch2 = percentile(raw_vox_ch2, background_thresh)
# background Substracted rfp and gfps
ch2_bckgrnd = vox_ch2 - min_ch2
ch1_bckgrnd = vox_ch1 - min_ch1
# width equivalent
width_eqv = ch2_bckgrnd / min(ch2_bckgrnd)
unscaled_dy = ch1_bckgrnd / width_eqv # raw DY/W normalized values
# rescale DY to minmax
_min = min(unscaled_dy)
_max = max(unscaled_dy)
normalized_dy = ((unscaled_dy - _min)/(_max - _min))
tubewidth = temp.GetArray('tubewidth')
# Output results as a labelled dictionary
results = {'normalized_dy': normalized_dy,
'unscaled_dy': unscaled_dy,
'ch1_bckgrnd': ch1_bckgrnd,
'ch2_bckgrnd': ch2_bckgrnd,
'width_eqv': width_eqv,
'tubewidth': tubewidth}
return results
def write_vtk(dat, fname, **kwargs):
"""
Write out a vtk file using VTK polydata object *dat* and a filename *fname*
with optional labels dictionary *kwargs* for the outputs
kwargs
------
Default dictionary keys are:
* normalized_dy
* unscaled_dy
* ch1_bckgrnd
* ch2_bckgrnd
* width_eqv'
"""
for k in sorted(kwargs):
try:
temp = vnpy.numpy_to_vtk(kwargs[k])
except TypeError: # already a VTK type
temp = kwargs[k]
temp.SetName(k)
dat.GetPointData().AddArray(temp)
writer = vtk.vtkPolyDataWriter()
writer.SetFileName(fname)
writer.SetInputData(dat)
writer.Update()
|
moosekaka/sweepython
|
pipeline/pipefuncs.py
|
Python
|
mit
| 6,347
|
[
"VTK"
] |
c73b3cddea4567036e7f76a2a8f51924d9a7046b6b4e1f5706ae36659a70980d
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import math
import os.path as op
import numpy as np
import argparse
orgs = """HM101
HM058 HM125 HM056 HM129 HM060
HM095 HM185 HM034 HM004 HM050
HM023 HM010 HM022 HM324 HM340
HM056.AC HM034.AC HM340.AC""".split()
dirw = "/home/youngn/zhoup/Data/misc2/gene.cluster"
if not op.exists(dirw): os.makedirs(dirw)
def make_blastp_db(orgs):
dirg = os.environ['genome']
diro = op.join(os.environ['data'], "db", "blastp")
if not op.exists(diro): os.makedirs(diro)
for org in orgs:
ff = op.join(dirg, org, "51.fas")
fo = op.join(diro, org)
cmd = "makeblastdb -dbtype prot -in %s -out %s" % (ff, fo)
#print cmd
os.system(cmd)
return
def run_blast(org, fo):
dirg = op.join(os.environ['genome'], org)
ff = op.join(dirg, "51.fas")
fdb = op.join(os.environ['data'], 'db', 'blastp', org)
cmd = "blastp -db %s -query %s -out %s -num_threads %d -evalue 1e-5 -outfmt '7 qseqid qlen sseqid slen length bitscore evalue'" % (fdb, ff, fo, 24)
print cmd
def blast2tbl(fi, fo):
e_min = 1
fhi = open(fi, "r")
for line in fhi:
if line[0] == "#":
continue
line = line.strip("\n")
(qid, qlen, tid, tlen, alen, bit, e) = line.split("\t")
e = float(e)
if e != 0 and e < e_min: e_min = e
fhi.close()
fhi = open(fi, "r")
fho = open(fo, "w")
for line in fhi:
if line[0] == "#":
continue
line = line.strip("\n")
(qid, qlen, tid, tlen, alen, bit, e) = line.split("\t")
(qlen, tlen, alen) = (float(qlen), float(tlen), float(alen))
(e, bit) = (float(e), float(bit))
if e == 0: e = e_min
rlen = alen / min(qlen, tlen)
if qid != tid and rlen >= 0.5 and e < 1e-10:
score = - math.log10(e)
print >>fho, "%s\t%s\t%g" % (qid, tid, score)
fhi.close()
fho.close()
def run_blat(dirw, org):
dirg = os.environ['genome']
diro = op.join(dirw, "01_pro_blat")
if not op.exists(diro): os.makedirs(diro)
ff = op.join(dirg, org, "51.fas")
fo = op.join(diro, org + ".tbl")
cmd = "blat -prot -out=blast8 %s %s %s" % (ff, ff, fo)
print cmd
os.system(cmd)
return
def blat_filter(dirw, org):
diri = op.join(dirw, "01_pro_blat")
diro = op.join(dirw, "02_filtered")
if not op.exists(diro): os.makedirs(diro)
fi = op.join(diri, org + ".tbl")
fo = op.join(diro, org + ".tbl")
fhi = open(fi, "r")
fho = open(fo, "w")
for line in fhi:
line = line.strip("\n")
(qid, tid, idt, alen, mis, gap, qbeg, qend, tbeg, tend, e, bit) = line.split("\t")
(e, bit) = (float(e), float(bit))
if qid != tid and e < 1e-5 and alen:
print >>fho, line
fhi.close()
fho.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='JJJ')
parser.add_argument('--version', action="version", version="%(prog)s 1.0")
args = parser.parse_args()
d01 = op.join(dirw, "01.blast")
d03 = op.join(dirw, "03.mcl.in")
d04 = op.join(dirw, "04.mcl")
d08 = op.join(dirw, "08.grp")
for dirw in [d01, d03, d04, d08]:
if not op.exists(dirw): os.makedirs(dirw)
for org in orgs:
print "working on " + org
f01 = op.join(d01, org + ".tbl")
#run_blast(org, f01)
f03 = op.join(d03, org + ".tbl")
blast2tbl(f01, f03)
f04 = op.join(d04, org + ".mcl")
cmd = "$soft/mcl/bin/mcl %s -te 4 -I 2.0 --abc -o %s" % (f03, f04)
os.system(cmd)
f08 = op.join(d08, org + ".tbl")
|
orionzhou/robin
|
old/gene.cluster.py
|
Python
|
gpl-2.0
| 3,707
|
[
"BLAST"
] |
494702ccef65322c6f1e1a96ea35fcdcd51366a93928ddb8fef5af020da5384a
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RMinfi(RPackage):
"""Tools to analyze & visualize Illumina Infinium methylation arrays."""
homepage = "https://bioconductor.org/packages/minfi/"
url = "https://git.bioconductor.org/packages/minfi"
list_url = homepage
version('1.22.1', git='https://git.bioconductor.org/packages/minfi', commit='b2faf84bcbb291e32d470a0e029450093527545b')
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-summarizedexperiment', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
depends_on('r-bumphunter', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-genomeinfodb', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-beanplot', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
depends_on('r-lattice', type=('build', 'run'))
depends_on('r-nor1mix', type=('build', 'run'))
depends_on('r-siggenes', type=('build', 'run'))
depends_on('r-limma', type=('build', 'run'))
depends_on('r-preprocesscore', type=('build', 'run'))
depends_on('r-illuminaio', type=('build', 'run'))
depends_on('r-matrixstats', type=('build', 'run'))
depends_on('r-mclust', type=('build', 'run'))
depends_on('r-genefilter', type=('build', 'run'))
depends_on('r-nlme', type=('build', 'run'))
depends_on('r-reshape', type=('build', 'run'))
depends_on('r-mass', type=('build', 'run'))
depends_on('r-quadprog', type=('build', 'run'))
depends_on('r-data-table', type=('build', 'run'))
depends_on('r-geoquery', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@1.22.1')
|
lgarren/spack
|
var/spack/repos/builtin/packages/r-minfi/package.py
|
Python
|
lgpl-2.1
| 3,058
|
[
"Bioconductor"
] |
e3ad9a226dc0ee839de183f9c3f1dbcf70bd119482e361c88a800dbc1246d935
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Example that trains a small multi-layer perceptron with fully connected layers on MNIST.
This example has some command line arguments that enable different neon features.
Examples:
python mnist_mlp.py -b gpu -e 10
Run the example for 10 epochs of mnist data using the nervana gpu backend
python mnist_mlp.py --validation_freq 1
After each training epoch the validation/test data set will be processed through the model
and the cost will be displayed.
python mnist_mlp.py --serialize 1 -s checkpoint.pkl
After every iteration of training the model will be dumped to a pickle file named
"checkpoint.pkl". Changing the serialize parameter changes the frequency at which the
model is saved.
python mnist_mlp.py --model_file checkpoint.pkl
Before starting to train the model, the model state is set to the values stored in the
checkpoint file named checkpoint.pkl.
"""
import logging
from neon.callbacks.callbacks import Callbacks
from neon.data import DataIterator, load_mnist
from neon.initializers import Gaussian
from neon.layers import GeneralizedCost, Affine
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Rectlin, Logistic, CrossEntropyBinary, Misclassification
from neon.util.argparser import NeonArgparser
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
logger = logging.getLogger()
logger.setLevel(args.log_thresh)
# load up the mnist data set
# split into train and tests sets
(X_train, y_train), (X_test, y_test), nclass = load_mnist(path=args.data_dir)
# setup a training set iterator
train_set = DataIterator(X_train, y_train, nclass=nclass, lshape=(1, 28, 28))
# setup a validation data set iterator
valid_set = DataIterator(X_test, y_test, nclass=nclass, lshape=(1, 28, 28))
# setup weight initialization function
init_norm = Gaussian(loc=0.0, scale=0.01)
# setup model layers
layers = [Affine(nout=100, init=init_norm, activation=Rectlin()),
Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]
# setup cost function as CrossEntropy
cost = GeneralizedCost(costfunc=CrossEntropyBinary())
# setup optimizer
optimizer = GradientDescentMomentum(0.1, momentum_coef=0.9, stochastic_round=args.rounding)
# initialize model object
mlp = Model(layers=layers)
# configure callbacks
callbacks = Callbacks(mlp, train_set, eval_set=valid_set, **args.callback_args)
# run fit
mlp.fit(train_set, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
print('Misclassification error = %.1f%%' % (mlp.eval(valid_set, metric=Misclassification())*100))
|
nhynes/neon
|
examples/mnist_mlp.py
|
Python
|
apache-2.0
| 3,473
|
[
"Gaussian"
] |
8eac18aeb7a5194addbb5712a2c0afbfcb83c0ae9a6cd7e4f12bdf4ffecaebf4
|
#!/usr/bin/env python
"""Copyright 2010 Phidgets Inc.
This work is licensed under the Creative Commons Attribution 2.5 Canada License.
To view a copy of this license, visit http://creativecommons.org/licenses/by/2.5/ca/
"""
__author__ = 'Adam Stelmack'
__version__ = '2.1.8'
__date__ = 'May 17 2010'
#Basic imports
from ctypes import *
import sys
import random
#Phidget specific imports
from Phidgets.PhidgetException import PhidgetErrorCodes, PhidgetException
from Phidgets.Events.Events import AttachEventArgs, DetachEventArgs, ErrorEventArgs, InputChangeEventArgs, OutputChangeEventArgs, SensorChangeEventArgs
from Phidgets.Devices.InterfaceKit import InterfaceKit
from Phidgets.Phidget import PhidgetLogLevel
#Create an interfacekit object
try:
interfaceKit = InterfaceKit()
except RuntimeError as e:
print("Runtime Exception: %s" % e.details)
print("Exiting....")
exit(1)
#Information Display Function
def displayDeviceInfo():
print("|------------|----------------------------------|--------------|------------|")
print("|- Attached -|- Type -|- Serial No. -|- Version -|")
print("|------------|----------------------------------|--------------|------------|")
print("|- %8s -|- %30s -|- %10d -|- %8d -|" % (interfaceKit.isAttached(), interfaceKit.getDeviceName(), interfaceKit.getSerialNum(), interfaceKit.getDeviceVersion()))
print("|------------|----------------------------------|--------------|------------|")
print("Number of Digital Inputs: %i" % (interfaceKit.getInputCount()))
print("Number of Digital Outputs: %i" % (interfaceKit.getOutputCount()))
print("Number of Sensor Inputs: %i" % (interfaceKit.getSensorCount()))
#Event Handler Callback Functions
def interfaceKitAttached(e):
attached = e.device
print("InterfaceKit %i Attached!" % (attached.getSerialNum()))
def interfaceKitDetached(e):
detached = e.device
print("InterfaceKit %i Detached!" % (detached.getSerialNum()))
def interfaceKitError(e):
try:
source = e.device
print("InterfaceKit %i: Phidget Error %i: %s" % (source.getSerialNum(), e.eCode, e.description))
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
def interfaceKitInputChanged(e):
source = e.device
print("InterfaceKit %i: Input %i: %s" % (source.getSerialNum(), e.index, e.state))
def interfaceKitSensorChanged(e):
source = e.device
print("InterfaceKit %i: Sensor %i: %i" % (source.getSerialNum(), e.index, e.value))
def interfaceKitOutputChanged(e):
source = e.device
print("InterfaceKit %i: Output %i: %s" % (source.getSerialNum(), e.index, e.state))
#Main Program Code
try:
#logging example, uncomment to generate a log file
#interfaceKit.enableLogging(PhidgetLogLevel.PHIDGET_LOG_VERBOSE, "phidgetlog.log")
interfaceKit.setOnAttachHandler(interfaceKitAttached)
interfaceKit.setOnDetachHandler(interfaceKitDetached)
interfaceKit.setOnErrorhandler(interfaceKitError)
interfaceKit.setOnInputChangeHandler(interfaceKitInputChanged)
interfaceKit.setOnOutputChangeHandler(interfaceKitOutputChanged)
interfaceKit.setOnSensorChangeHandler(interfaceKitSensorChanged)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Opening phidget object....")
try:
interfaceKit.openPhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Waiting for attach....")
try:
interfaceKit.waitForAttach(10000)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
try:
interfaceKit.closePhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Exiting....")
exit(1)
else:
displayDeviceInfo()
print("Setting the data rate for each sensor index to 4ms....")
for i in range(interfaceKit.getSensorCount()):
try:
interfaceKit.setDataRate(i, 4)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Press Enter to quit....")
chr = sys.stdin.read(1)
print("Closing...")
try:
interfaceKit.closePhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Done.")
exit(0)
|
danielsuo/mobot
|
src/move/Python/InterfaceKit-simple.py
|
Python
|
mit
| 4,650
|
[
"VisIt"
] |
0e2af7f6c748dad8839486cca15987759e46847168ded07434762c012c51b88a
|
#!/usr/bin/env python
'''
Before running any steps in the pipeline, check your data to see if the fastq input is in
Illumina 1.3-1.7 format, specificially that
* the @ line follows the format @stuff:with:colons#BARCODE/1 (or /2 for reverse reads)
* the quality line uses Sanger encoding (ascii offset 64, high score h to low score B)
'''
import itertools, os, os.path, sys, argparse, shutil, re
from Bio import SeqIO
import util
def parse_fastq_record_id(record):
'''BioPython fastq record "@lol/1" -> "lol"'''
m = re.match('^(.+)/[12]', record.id)
if m is None:
raise RuntimeError("fastq record line did not parse: %s" % record.id)
else:
rid = m.group(1)
return rid
def check_illumina_format(inputs, targets):
'''
Raise error if any of the input filenames are not in the desired format.
inputs : filenames or filehandle, or iterable of strings/filehandles
files to be checked
target : string, or iterable of strings
'illumina13' or 'illumina18' or 'ambiguous', or some combination
'''
# convert to lists if required
inputs = util.listify(inputs)
targets = util.listify(targets)
# check that input targets are in acceptable list
acceptable_targets = set(['illumina13', 'illumina18', 'ambiguous'])
if not set(targets).issubset(acceptable_targets):
bad_targets = targets - acceptable_targets
raise ArgumentError("unrecognized format type(s): %s" % bad_targets)
# check all the formats
formats = [file_format(i) for i in inputs]
tests = [form in targets for form in formats]
bad_files = [i for i, test in zip(i, tests) if test == False]
bad_forms = [form for form, test in zip(formats, tests) if test == False]
# complain if something went wrong
if False in tests:
bad_info = "\n".join([" ".join([i, form]) for i, form in zip(bad_files, bad_forms)])
raise RuntimeError("files do not appear to be in %s format: \n%s" % (targets, bad_info))
else:
return True
def file_format(fastq, max_entries=10):
'''
what format is the file?
returns : string
'illumina13', 'illumin18', or 'ambiguous'
'''
for i, record in enumerate(SeqIO.parse(fastq, 'fastq')):
if i > max_entries:
raise RuntimeError("could not verify format after %d entries" % max_entries)
# make sure we can parse the at line
rid = parse_fastq_record_id(record)
# check the quality line's character content
return fastq_record_format(record)
raise RuntimeError("fell off end")
def fastq_record_format(record):
'''
Guess the fastq format using the quality codes.
record : BioPython Seq object
to be analyzed
returns : 'illumina13', 'illumina18', 'ambiguous'
either Illumina 1.3-1.7 (B--h), Illumina 1.8 ("--J), or ambiguous
'''
scores = record.letter_annotations['phred_quality']
min_score = min(scores)
max_score = max(scores)
if 1 <= min_score and min_score < 32 and max_score <= 41:
return 'illumina18'
elif 32 <= min_score and 41 < max_score and max_score <= 71:
return 'illumina13'
elif min_score < 1 or max_score > 71:
raise RuntimeError("quality scores don't match known encoding: min=%s max=%s" %(min_score, max_score))
else:
return 'ambiguous'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Use quality scores to check if fastq is in Illumina 1.3-1.7 format or Illumina 1.8 format', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('fastq', help='input fastq')
args = parser.parse_args()
with open(args.fastq) as f:
format_guess = file_format(f)
if format_guess == 'illumina13':
print "Illumina 1.3-1.7 format"
elif format_guess == 'illumina18':
print "Illumina 1.8 format"
elif format_guess == 'ambiguous':
print "Could be either 1.3-1.7 or 1.8 format. Ambiguous."
else:
raise RuntimeError
|
almlab/SmileTrain
|
check_fastq_format.py
|
Python
|
mit
| 4,174
|
[
"Biopython"
] |
30b1baf1d470acdcebbfcd4c4658a694b39ee96d4f3738a574092d391b92a0d7
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the Pipeline class."""
from __future__ import absolute_import
import copy
import platform
import unittest
from builtins import object
from builtins import range
import mock
from nose.plugins.attrib import attr
import apache_beam as beam
from apache_beam import typehints
from apache_beam.coders import BytesCoder
from apache_beam.io import Read
from apache_beam.metrics import Metrics
from apache_beam.pipeline import Pipeline
from apache_beam.pipeline import PipelineOptions
from apache_beam.pipeline import PipelineVisitor
from apache_beam.pipeline import PTransformOverride
from apache_beam.pvalue import AsSingleton
from apache_beam.pvalue import TaggedOutput
from apache_beam.runners.dataflow.native_io.iobase import NativeSource
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms import CombineGlobally
from apache_beam.transforms import Create
from apache_beam.transforms import DoFn
from apache_beam.transforms import FlatMap
from apache_beam.transforms import Map
from apache_beam.transforms import ParDo
from apache_beam.transforms import PTransform
from apache_beam.transforms import WindowInto
from apache_beam.transforms.userstate import BagStateSpec
from apache_beam.transforms.window import SlidingWindows
from apache_beam.transforms.window import TimestampedValue
from apache_beam.utils import windowed_value
from apache_beam.utils.timestamp import MIN_TIMESTAMP
# TODO(BEAM-1555): Test is failing on the service, with FakeSource.
# from nose.plugins.attrib import attr
class FakeSource(NativeSource):
"""Fake source returning a fixed list of values."""
class _Reader(object):
def __init__(self, vals):
self._vals = vals
self._output_counter = Metrics.counter('main', 'outputs')
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
pass
def __iter__(self):
for v in self._vals:
self._output_counter.inc()
yield v
def __init__(self, vals):
self._vals = vals
def reader(self):
return FakeSource._Reader(self._vals)
class FakeUnboundedSource(NativeSource):
"""Fake unbounded source. Does not work at runtime"""
def reader(self):
return None
def is_bounded(self):
return False
class DoubleParDo(beam.PTransform):
def expand(self, input):
return input | 'Inner' >> beam.Map(lambda a: a * 2)
def to_runner_api_parameter(self, context):
return self.to_runner_api_pickled(context)
class TripleParDo(beam.PTransform):
def expand(self, input):
# Keeping labels the same intentionally to make sure that there is no label
# conflict due to replacement.
return input | 'Inner' >> beam.Map(lambda a: a * 3)
class ToStringParDo(beam.PTransform):
def expand(self, input):
# We use copy.copy() here to make sure the typehint mechanism doesn't
# automatically infer that the output type is str.
return input | 'Inner' >> beam.Map(lambda a: copy.copy(str(a)))
class PipelineTest(unittest.TestCase):
@staticmethod
def custom_callable(pcoll):
return pcoll | '+1' >> FlatMap(lambda x: [x + 1])
# Some of these tests designate a runner by name, others supply a runner.
# This variation is just to verify that both means of runner specification
# work and is not related to other aspects of the tests.
class CustomTransform(PTransform):
def expand(self, pcoll):
return pcoll | '+1' >> FlatMap(lambda x: [x + 1])
class Visitor(PipelineVisitor):
def __init__(self, visited):
self.visited = visited
self.enter_composite = []
self.leave_composite = []
def visit_value(self, value, _):
self.visited.append(value)
def enter_composite_transform(self, transform_node):
self.enter_composite.append(transform_node)
def leave_composite_transform(self, transform_node):
self.leave_composite.append(transform_node)
def test_create(self):
pipeline = TestPipeline()
pcoll = pipeline | 'label1' >> Create([1, 2, 3])
assert_that(pcoll, equal_to([1, 2, 3]))
# Test if initial value is an iterator object.
pcoll2 = pipeline | 'label2' >> Create(iter((4, 5, 6)))
pcoll3 = pcoll2 | 'do' >> FlatMap(lambda x: [x + 10])
assert_that(pcoll3, equal_to([14, 15, 16]), label='pcoll3')
pipeline.run()
def test_flatmap_builtin(self):
pipeline = TestPipeline()
pcoll = pipeline | 'label1' >> Create([1, 2, 3])
assert_that(pcoll, equal_to([1, 2, 3]))
pcoll2 = pcoll | 'do' >> FlatMap(lambda x: [x + 10])
assert_that(pcoll2, equal_to([11, 12, 13]), label='pcoll2')
pcoll3 = pcoll2 | 'm1' >> Map(lambda x: [x, 12])
assert_that(pcoll3,
equal_to([[11, 12], [12, 12], [13, 12]]), label='pcoll3')
pcoll4 = pcoll3 | 'do2' >> FlatMap(set)
assert_that(pcoll4, equal_to([11, 12, 12, 12, 13]), label='pcoll4')
pipeline.run()
def test_maptuple_builtin(self):
pipeline = TestPipeline()
pcoll = pipeline | Create([('e1', 'e2')])
side1 = beam.pvalue.AsSingleton(pipeline | 'side1' >> Create(['s1']))
side2 = beam.pvalue.AsSingleton(pipeline | 'side2' >> Create(['s2']))
# A test function with a tuple input, an auxiliary parameter,
# and some side inputs.
fn = lambda e1, e2, t=DoFn.TimestampParam, s1=None, s2=None: (
e1, e2, t, s1, s2)
assert_that(pcoll | 'NoSides' >> beam.core.MapTuple(fn),
equal_to([('e1', 'e2', MIN_TIMESTAMP, None, None)]),
label='NoSidesCheck')
assert_that(pcoll | 'StaticSides' >> beam.core.MapTuple(fn, 's1', 's2'),
equal_to([('e1', 'e2', MIN_TIMESTAMP, 's1', 's2')]),
label='StaticSidesCheck')
assert_that(pcoll | 'DynamicSides' >> beam.core.MapTuple(fn, side1, side2),
equal_to([('e1', 'e2', MIN_TIMESTAMP, 's1', 's2')]),
label='DynamicSidesCheck')
assert_that(pcoll | 'MixedSides' >> beam.core.MapTuple(fn, s2=side2),
equal_to([('e1', 'e2', MIN_TIMESTAMP, None, 's2')]),
label='MixedSidesCheck')
pipeline.run()
def test_flatmaptuple_builtin(self):
pipeline = TestPipeline()
pcoll = pipeline | Create([('e1', 'e2')])
side1 = beam.pvalue.AsSingleton(pipeline | 'side1' >> Create(['s1']))
side2 = beam.pvalue.AsSingleton(pipeline | 'side2' >> Create(['s2']))
# A test function with a tuple input, an auxiliary parameter,
# and some side inputs.
fn = lambda e1, e2, t=DoFn.TimestampParam, s1=None, s2=None: (
e1, e2, t, s1, s2)
assert_that(pcoll | 'NoSides' >> beam.core.FlatMapTuple(fn),
equal_to(['e1', 'e2', MIN_TIMESTAMP, None, None]),
label='NoSidesCheck')
assert_that(pcoll | 'StaticSides' >> beam.core.FlatMapTuple(fn, 's1', 's2'),
equal_to(['e1', 'e2', MIN_TIMESTAMP, 's1', 's2']),
label='StaticSidesCheck')
assert_that(pcoll
| 'DynamicSides' >> beam.core.FlatMapTuple(fn, side1, side2),
equal_to(['e1', 'e2', MIN_TIMESTAMP, 's1', 's2']),
label='DynamicSidesCheck')
assert_that(pcoll | 'MixedSides' >> beam.core.FlatMapTuple(fn, s2=side2),
equal_to(['e1', 'e2', MIN_TIMESTAMP, None, 's2']),
label='MixedSidesCheck')
pipeline.run()
def test_create_singleton_pcollection(self):
pipeline = TestPipeline()
pcoll = pipeline | 'label' >> Create([[1, 2, 3]])
assert_that(pcoll, equal_to([[1, 2, 3]]))
pipeline.run()
# TODO(BEAM-1555): Test is failing on the service, with FakeSource.
# @attr('ValidatesRunner')
def test_metrics_in_fake_source(self):
pipeline = TestPipeline()
pcoll = pipeline | Read(FakeSource([1, 2, 3, 4, 5, 6]))
assert_that(pcoll, equal_to([1, 2, 3, 4, 5, 6]))
res = pipeline.run()
metric_results = res.metrics().query()
outputs_counter = metric_results['counters'][0]
self.assertEqual(outputs_counter.key.step, 'Read')
self.assertEqual(outputs_counter.key.metric.name, 'outputs')
self.assertEqual(outputs_counter.committed, 6)
def test_fake_read(self):
pipeline = TestPipeline()
pcoll = pipeline | 'read' >> Read(FakeSource([1, 2, 3]))
assert_that(pcoll, equal_to([1, 2, 3]))
pipeline.run()
def test_visit_entire_graph(self):
pipeline = Pipeline()
pcoll1 = pipeline | 'pcoll' >> beam.Impulse()
pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])
pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])
pcoll4 = pcoll2 | 'do3' >> FlatMap(lambda x: [x + 1])
transform = PipelineTest.CustomTransform()
pcoll5 = pcoll4 | transform
visitor = PipelineTest.Visitor(visited=[])
pipeline.visit(visitor)
self.assertEqual(set([pcoll1, pcoll2, pcoll3, pcoll4, pcoll5]),
set(visitor.visited))
self.assertEqual(set(visitor.enter_composite),
set(visitor.leave_composite))
self.assertEqual(2, len(visitor.enter_composite))
self.assertEqual(visitor.enter_composite[1].transform, transform)
self.assertEqual(visitor.leave_composite[0].transform, transform)
def test_apply_custom_transform(self):
pipeline = TestPipeline()
pcoll = pipeline | 'pcoll' >> Create([1, 2, 3])
result = pcoll | PipelineTest.CustomTransform()
assert_that(result, equal_to([2, 3, 4]))
pipeline.run()
def test_reuse_custom_transform_instance(self):
pipeline = Pipeline()
pcoll1 = pipeline | 'pcoll1' >> Create([1, 2, 3])
pcoll2 = pipeline | 'pcoll2' >> Create([4, 5, 6])
transform = PipelineTest.CustomTransform()
pcoll1 | transform
with self.assertRaises(RuntimeError) as cm:
pipeline.apply(transform, pcoll2)
self.assertEqual(
cm.exception.args[0],
'A transform with label "CustomTransform" already exists in the '
'pipeline. To apply a transform with a specified label write '
'pvalue | "label" >> transform')
def test_reuse_cloned_custom_transform_instance(self):
pipeline = TestPipeline()
pcoll1 = pipeline | 'pc1' >> Create([1, 2, 3])
pcoll2 = pipeline | 'pc2' >> Create([4, 5, 6])
transform = PipelineTest.CustomTransform()
result1 = pcoll1 | transform
result2 = pcoll2 | 'new_label' >> transform
assert_that(result1, equal_to([2, 3, 4]), label='r1')
assert_that(result2, equal_to([5, 6, 7]), label='r2')
pipeline.run()
def test_transform_no_super_init(self):
class AddSuffix(PTransform):
def __init__(self, suffix):
# No call to super(...).__init__
self.suffix = suffix
def expand(self, pcoll):
return pcoll | Map(lambda x: x + self.suffix)
self.assertEqual(
['a-x', 'b-x', 'c-x'],
sorted(['a', 'b', 'c'] | 'AddSuffix' >> AddSuffix('-x')))
@unittest.skip("Fails on some platforms with new urllib3.")
def test_memory_usage(self):
try:
import resource
except ImportError:
# Skip the test if resource module is not available (e.g. non-Unix os).
self.skipTest('resource module not available.')
if platform.mac_ver()[0]:
# Skip the test on macos, depending on version it returns ru_maxrss in
# different units.
self.skipTest('ru_maxrss is not in standard units.')
def get_memory_usage_in_bytes():
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * (2 ** 10)
def check_memory(value, memory_threshold):
memory_usage = get_memory_usage_in_bytes()
if memory_usage > memory_threshold:
raise RuntimeError(
'High memory usage: %d > %d' % (memory_usage, memory_threshold))
return value
len_elements = 1000000
num_elements = 10
num_maps = 100
# TODO(robertwb): reduce memory usage of FnApiRunner so that this test
# passes.
pipeline = TestPipeline(runner='BundleBasedDirectRunner')
# Consumed memory should not be proportional to the number of maps.
memory_threshold = (
get_memory_usage_in_bytes() + (5 * len_elements * num_elements))
# Plus small additional slack for memory fluctuations during the test.
memory_threshold += 10 * (2 ** 20)
biglist = pipeline | 'oom:create' >> Create(
['x' * len_elements] * num_elements)
for i in range(num_maps):
biglist = biglist | ('oom:addone-%d' % i) >> Map(lambda x: x + 'y')
result = biglist | 'oom:check' >> Map(check_memory, memory_threshold)
assert_that(result, equal_to(
['x' * len_elements + 'y' * num_maps] * num_elements))
pipeline.run()
def test_aggregator_empty_input(self):
actual = [] | CombineGlobally(max).without_defaults()
self.assertEqual(actual, [])
def test_pipeline_as_context(self):
def raise_exception(exn):
raise exn
with self.assertRaises(ValueError):
with Pipeline() as p:
# pylint: disable=expression-not-assigned
p | Create([ValueError('msg')]) | Map(raise_exception)
# TODO(BEAM-1894).
# def test_eager_pipeline(self):
# p = Pipeline('EagerRunner')
# self.assertEqual([1, 4, 9], p | Create([1, 2, 3]) | Map(lambda x: x*x))
@mock.patch(
'apache_beam.runners.direct.direct_runner._get_transform_overrides')
def test_ptransform_overrides(self, file_system_override_mock):
class MyParDoOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(applied_ptransform.transform, DoubleParDo)
def get_replacement_transform(self, ptransform):
if isinstance(ptransform, DoubleParDo):
return TripleParDo()
raise ValueError('Unsupported type of transform: %r' % ptransform)
def get_overrides(unused_pipeline_options):
return [MyParDoOverride()]
file_system_override_mock.side_effect = get_overrides
# Specify DirectRunner as it's the one patched above.
with Pipeline(runner='BundleBasedDirectRunner') as p:
pcoll = p | beam.Create([1, 2, 3]) | 'Multiply' >> DoubleParDo()
assert_that(pcoll, equal_to([3, 6, 9]))
def test_ptransform_override_type_hints(self):
class NoTypeHintOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(applied_ptransform.transform, DoubleParDo)
def get_replacement_transform(self, ptransform):
return ToStringParDo()
class WithTypeHintOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(applied_ptransform.transform, DoubleParDo)
def get_replacement_transform(self, ptransform):
return (ToStringParDo()
.with_input_types(int)
.with_output_types(str))
for override, expected_type in [(NoTypeHintOverride(), typehints.Any),
(WithTypeHintOverride(), str)]:
p = TestPipeline()
pcoll = (p
| beam.Create([1, 2, 3])
| 'Operate' >> DoubleParDo()
| 'NoOp' >> beam.Map(lambda x: x))
p.replace_all([override])
self.assertEqual(pcoll.producer.inputs[0].element_type, expected_type)
def test_ptransform_override_multiple_outputs(self):
class MultiOutputComposite(PTransform):
def __init__(self):
self.output_tags = set()
def expand(self, pcoll):
def mux_input(x):
x = x * 2
if isinstance(x, int):
yield TaggedOutput('numbers', x)
else:
yield TaggedOutput('letters', x)
multi = pcoll | 'MyReplacement' >> beam.ParDo(mux_input).with_outputs()
letters = multi.letters | 'LettersComposite' >> beam.Map(lambda x: x*3)
numbers = multi.numbers | 'NumbersComposite' >> beam.Map(lambda x: x*5)
return {
'letters': letters,
'numbers': numbers,
}
class MultiOutputOverride(PTransformOverride):
def matches(self, applied_ptransform):
return applied_ptransform.full_label == 'MyMultiOutput'
def get_replacement_transform(self, ptransform):
return MultiOutputComposite()
def mux_input(x):
if isinstance(x, int):
yield TaggedOutput('numbers', x)
else:
yield TaggedOutput('letters', x)
p = TestPipeline()
multi = (p
| beam.Create([1, 2, 3, 'a', 'b', 'c'])
| 'MyMultiOutput' >> beam.ParDo(mux_input).with_outputs())
letters = multi.letters | 'MyLetters' >> beam.Map(lambda x: x)
numbers = multi.numbers | 'MyNumbers' >> beam.Map(lambda x: x)
# Assert that the PCollection replacement worked correctly and that elements
# are flowing through. The replacement transform first multiples by 2 then
# the leaf nodes inside the composite multiply by an additional 3 and 5. Use
# prime numbers to ensure that each transform is getting executed once.
assert_that(letters,
equal_to(['a'*2*3, 'b'*2*3, 'c'*2*3]),
label='assert letters')
assert_that(numbers,
equal_to([1*2*5, 2*2*5, 3*2*5]),
label='assert numbers')
# Do the replacement and run the element assertions.
p.replace_all([MultiOutputOverride()])
p.run()
# The following checks the graph to make sure the replacement occurred.
visitor = PipelineTest.Visitor(visited=[])
p.visit(visitor)
pcollections = visitor.visited
composites = visitor.enter_composite
# Assert the replacement is in the composite list and retrieve the
# AppliedPTransform.
self.assertIn(MultiOutputComposite,
[t.transform.__class__ for t in composites])
multi_output_composite = list(
filter(lambda t: t.transform.__class__ == MultiOutputComposite,
composites))[0]
# Assert that all of the replacement PCollections are in the graph.
for output in multi_output_composite.outputs.values():
self.assertIn(output, pcollections)
# Assert that all of the "old"/replaced PCollections are not in the graph.
self.assertNotIn(multi[None], visitor.visited)
self.assertNotIn(multi.letters, visitor.visited)
self.assertNotIn(multi.numbers, visitor.visited)
def test_kv_ptransform_honor_type_hints(self):
# The return type of this DoFn cannot be inferred by the default
# Beam type inference
class StatefulDoFn(DoFn):
BYTES_STATE = BagStateSpec('bytes', BytesCoder())
def return_recursive(self, count):
if count == 0:
return ["some string"]
else:
self.return_recursive(count-1)
def process(self, element, counter=DoFn.StateParam(BYTES_STATE)):
return self.return_recursive(1)
p = TestPipeline()
pcoll = (p
| beam.Create([(1, 1), (2, 2), (3, 3)])
| beam.GroupByKey()
| beam.ParDo(StatefulDoFn()))
p.run()
self.assertEqual(pcoll.element_type, typehints.Any)
p = TestPipeline()
pcoll = (p
| beam.Create([(1, 1), (2, 2), (3, 3)])
| beam.GroupByKey()
| beam.ParDo(StatefulDoFn()).with_output_types(str))
p.run()
self.assertEqual(pcoll.element_type, str)
def test_track_pcoll_unbounded(self):
pipeline = TestPipeline()
pcoll1 = pipeline | 'read' >> Read(FakeUnboundedSource())
pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])
pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])
self.assertIs(pcoll1.is_bounded, False)
self.assertIs(pcoll1.is_bounded, False)
self.assertIs(pcoll3.is_bounded, False)
def test_track_pcoll_bounded(self):
pipeline = TestPipeline()
pcoll1 = pipeline | 'label1' >> Create([1, 2, 3])
pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])
pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])
self.assertIs(pcoll1.is_bounded, True)
self.assertIs(pcoll2.is_bounded, True)
self.assertIs(pcoll3.is_bounded, True)
def test_track_pcoll_bounded_flatten(self):
pipeline = TestPipeline()
pcoll1_a = pipeline | 'label_a' >> Create([1, 2, 3])
pcoll2_a = pcoll1_a | 'do_a' >> FlatMap(lambda x: [x + 1])
pcoll1_b = pipeline | 'label_b' >> Create([1, 2, 3])
pcoll2_b = pcoll1_b | 'do_b' >> FlatMap(lambda x: [x + 1])
merged = (pcoll2_a, pcoll2_b) | beam.Flatten()
self.assertIs(pcoll1_a.is_bounded, True)
self.assertIs(pcoll2_a.is_bounded, True)
self.assertIs(pcoll1_b.is_bounded, True)
self.assertIs(pcoll2_b.is_bounded, True)
self.assertIs(merged.is_bounded, True)
def test_track_pcoll_unbounded_flatten(self):
pipeline = TestPipeline()
pcoll1_bounded = pipeline | 'label1' >> Create([1, 2, 3])
pcoll2_bounded = pcoll1_bounded | 'do1' >> FlatMap(lambda x: [x + 1])
pcoll1_unbounded = pipeline | 'read' >> Read(FakeUnboundedSource())
pcoll2_unbounded = pcoll1_unbounded | 'do2' >> FlatMap(lambda x: [x + 1])
merged = (pcoll2_bounded, pcoll2_unbounded) | beam.Flatten()
self.assertIs(pcoll1_bounded.is_bounded, True)
self.assertIs(pcoll2_bounded.is_bounded, True)
self.assertIs(pcoll1_unbounded.is_bounded, False)
self.assertIs(pcoll2_unbounded.is_bounded, False)
self.assertIs(merged.is_bounded, False)
class DoFnTest(unittest.TestCase):
def test_element(self):
class TestDoFn(DoFn):
def process(self, element):
yield element + 10
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(TestDoFn())
assert_that(pcoll, equal_to([11, 12]))
pipeline.run()
def test_side_input_no_tag(self):
class TestDoFn(DoFn):
def process(self, element, prefix, suffix):
return ['%s-%s-%s' % (prefix, element, suffix)]
pipeline = TestPipeline()
words_list = ['aa', 'bb', 'cc']
words = pipeline | 'SomeWords' >> Create(words_list)
prefix = 'zyx'
suffix = pipeline | 'SomeString' >> Create(['xyz']) # side in
result = words | 'DecorateWordsDoFnNoTag' >> ParDo(
TestDoFn(), prefix, suffix=AsSingleton(suffix))
assert_that(result, equal_to(['zyx-%s-xyz' % x for x in words_list]))
pipeline.run()
def test_side_input_tagged(self):
class TestDoFn(DoFn):
def process(self, element, prefix, suffix=DoFn.SideInputParam):
return ['%s-%s-%s' % (prefix, element, suffix)]
pipeline = TestPipeline()
words_list = ['aa', 'bb', 'cc']
words = pipeline | 'SomeWords' >> Create(words_list)
prefix = 'zyx'
suffix = pipeline | 'SomeString' >> Create(['xyz']) # side in
result = words | 'DecorateWordsDoFnNoTag' >> ParDo(
TestDoFn(), prefix, suffix=AsSingleton(suffix))
assert_that(result, equal_to(['zyx-%s-xyz' % x for x in words_list]))
pipeline.run()
@attr('ValidatesRunner')
def test_element_param(self):
pipeline = TestPipeline()
input = [1, 2]
pcoll = (pipeline
| 'Create' >> Create(input)
| 'Ele param' >> Map(lambda element=DoFn.ElementParam: element))
assert_that(pcoll, equal_to(input))
pipeline.run()
@attr('ValidatesRunner')
def test_key_param(self):
pipeline = TestPipeline()
pcoll = (pipeline
| 'Create' >> Create([('a', 1), ('b', 2)])
| 'Key param' >> Map(lambda _, key=DoFn.KeyParam: key))
assert_that(pcoll, equal_to(['a', 'b']))
pipeline.run()
def test_window_param(self):
class TestDoFn(DoFn):
def process(self, element, window=DoFn.WindowParam):
yield (element, (float(window.start), float(window.end)))
pipeline = TestPipeline()
pcoll = (pipeline
| Create([1, 7])
| Map(lambda x: TimestampedValue(x, x))
| WindowInto(windowfn=SlidingWindows(10, 5))
| ParDo(TestDoFn()))
assert_that(pcoll, equal_to([(1, (-5, 5)), (1, (0, 10)),
(7, (0, 10)), (7, (5, 15))]))
pcoll2 = pcoll | 'Again' >> ParDo(TestDoFn())
assert_that(
pcoll2,
equal_to([
((1, (-5, 5)), (-5, 5)), ((1, (0, 10)), (0, 10)),
((7, (0, 10)), (0, 10)), ((7, (5, 15)), (5, 15))]),
label='doubled windows')
pipeline.run()
def test_timestamp_param(self):
class TestDoFn(DoFn):
def process(self, element, timestamp=DoFn.TimestampParam):
yield timestamp
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(TestDoFn())
assert_that(pcoll, equal_to([MIN_TIMESTAMP, MIN_TIMESTAMP]))
pipeline.run()
def test_timestamp_param_map(self):
with TestPipeline() as p:
assert_that(
p | Create([1, 2]) | beam.Map(lambda _, t=DoFn.TimestampParam: t),
equal_to([MIN_TIMESTAMP, MIN_TIMESTAMP]))
def test_pane_info_param(self):
with TestPipeline() as p:
pc = p | Create([(None, None)])
assert_that(
pc | beam.Map(lambda _, p=DoFn.PaneInfoParam: p),
equal_to([windowed_value.PANE_INFO_UNKNOWN]),
label='CheckUngrouped')
assert_that(
pc | beam.GroupByKey() | beam.Map(lambda _, p=DoFn.PaneInfoParam: p),
equal_to([windowed_value.PaneInfo(
is_first=True,
is_last=True,
timing=windowed_value.PaneInfoTiming.ON_TIME,
index=0,
nonspeculative_index=0)]),
label='CheckGrouped')
def test_incomparable_default(self):
class IncomparableType(object):
def __eq__(self, other):
raise RuntimeError()
def __ne__(self, other):
raise RuntimeError()
def __hash__(self):
raise RuntimeError()
# Ensure that we don't use default values in a context where they must be
# comparable (see BEAM-8301).
pipeline = TestPipeline()
pcoll = (pipeline
| beam.Create([None])
| Map(lambda e, x=IncomparableType(): (e, type(x).__name__)))
assert_that(pcoll, equal_to([(None, 'IncomparableType')]))
pipeline.run()
class Bacon(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--slices', type=int)
class Eggs(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--style', default='scrambled')
class Breakfast(Bacon, Eggs):
pass
class PipelineOptionsTest(unittest.TestCase):
def test_flag_parsing(self):
options = Breakfast(['--slices=3', '--style=sunny side up', '--ignored'])
self.assertEqual(3, options.slices)
self.assertEqual('sunny side up', options.style)
def test_keyword_parsing(self):
options = Breakfast(
['--slices=3', '--style=sunny side up', '--ignored'],
slices=10)
self.assertEqual(10, options.slices)
self.assertEqual('sunny side up', options.style)
def test_attribute_setting(self):
options = Breakfast(slices=10)
self.assertEqual(10, options.slices)
options.slices = 20
self.assertEqual(20, options.slices)
def test_view_as(self):
generic_options = PipelineOptions(['--slices=3'])
self.assertEqual(3, generic_options.view_as(Bacon).slices)
self.assertEqual(3, generic_options.view_as(Breakfast).slices)
generic_options.view_as(Breakfast).slices = 10
self.assertEqual(10, generic_options.view_as(Bacon).slices)
with self.assertRaises(AttributeError):
generic_options.slices # pylint: disable=pointless-statement
with self.assertRaises(AttributeError):
generic_options.view_as(Eggs).slices # pylint: disable=expression-not-assigned
def test_defaults(self):
options = Breakfast(['--slices=3'])
self.assertEqual(3, options.slices)
self.assertEqual('scrambled', options.style)
def test_dir(self):
options = Breakfast()
self.assertEqual(
set(['from_dictionary', 'get_all_options', 'slices', 'style',
'view_as', 'display_data']),
set([attr for attr in dir(options) if not attr.startswith('_') and
attr != 'next']))
self.assertEqual(
set(['from_dictionary', 'get_all_options', 'style', 'view_as',
'display_data']),
set([attr for attr in dir(options.view_as(Eggs))
if not attr.startswith('_') and attr != 'next']))
class RunnerApiTest(unittest.TestCase):
def test_parent_pointer(self):
class MyPTransform(beam.PTransform):
def expand(self, p):
self.p = p
return p | beam.Create([None])
p = beam.Pipeline()
p | MyPTransform() # pylint: disable=expression-not-assigned
p = Pipeline.from_runner_api(
Pipeline.to_runner_api(p, use_fake_coders=True), None, None)
self.assertIsNotNone(p.transforms_stack[0].parts[0].parent)
self.assertEqual(p.transforms_stack[0].parts[0].parent,
p.transforms_stack[0])
if __name__ == '__main__':
unittest.main()
|
RyanSkraba/beam
|
sdks/python/apache_beam/pipeline_test.py
|
Python
|
apache-2.0
| 29,620
|
[
"VisIt"
] |
f3080d5ecb73aa2400a813e037e2152139523378e1bb3eda6c2b2efad47eb2c0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include("crud_fusion.users.urls", namespace="users")),
url(r'^customers/', include("crud_fusion.customers.urls", namespace="customers")),
url(r'^accounts/', include('allauth.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look.
urlpatterns += [
url(r'^400/$', default_views.bad_request),
url(r'^403/$', default_views.permission_denied),
url(r'^404/$', default_views.page_not_found),
url(r'^500/$', default_views.server_error),
]
|
BCriswell/crud-fusion
|
config/urls.py
|
Python
|
bsd-3-clause
| 1,315
|
[
"VisIt"
] |
d21abd9c4db038d94bc8b56bc4487b4e0e5462556d9ba9b46958f67eae7f8cbb
|
#!/usr/bin/env python
'''
Optimize molecular geometry within the environment of QM/MM charges.
'''
import numpy
from pyscf import gto, scf, cc, qmmm
mol = gto.M(atom='''
C 1.1879 -0.3829 0.0000
C 0.0000 0.5526 0.0000
O -1.1867 -0.2472 0.0000
H -1.9237 0.3850 0.0000
H 2.0985 0.2306 0.0000
H 1.1184 -1.0093 0.8869
H 1.1184 -1.0093 -0.8869
H -0.0227 1.1812 0.8852
H -0.0227 1.1812 -0.8852
''',
basis='3-21g')
numpy.random.seed(1)
coords = numpy.random.random((5,3)) * 10
charges = (numpy.arange(5) + 1.) * -.001
mf = qmmm.mm_charge(scf.RHF(mol), coords, charges)
#mf.verbose=4
#mf.kernel()
mol1 = mf.Gradients().optimizer(solver='berny').kernel()
# or
#from pyscf.geomopt import berny_solver
#mol1 = berny_solver.optimize(mf)
mycc = cc.CCSD(mf)
mol1 = mycc.Gradients().optimizer().kernel()
# or
#from pyscf.geomopt import geometric_solver
#mol1 = geometric_solver.optimize(mycc)
|
gkc1000/pyscf
|
examples/geomopt/10-with_qmmm.py
|
Python
|
apache-2.0
| 970
|
[
"PySCF"
] |
2e9bce58d5a33a3e62d44c1a06e19db84dda3e3225baf07a6fa3b7d47401fc18
|
"""
==========================================
Statistical functions (:mod:`scipy.stats`)
==========================================
.. module:: scipy.stats
This module contains a large number of probability distributions as
well as a growing library of statistical functions.
Each included distribution is an instance of the class rv_continous:
For each given name the following methods are available:
.. autosummary::
:toctree: generated/
rv_continuous
rv_continuous.rvs
rv_continuous.pdf
rv_continuous.logpdf
rv_continuous.cdf
rv_continuous.logcdf
rv_continuous.sf
rv_continuous.logsf
rv_continuous.ppf
rv_continuous.isf
rv_continuous.moment
rv_continuous.stats
rv_continuous.entropy
rv_continuous.fit
rv_continuous.expect
rv_continuous.median
rv_continuous.mean
rv_continuous.var
rv_continuous.std
rv_continuous.interval
Calling the instance as a function returns a frozen pdf whose shape,
location, and scale parameters are fixed.
Similarly, each discrete distribution is an instance of the class
rv_discrete:
.. autosummary::
:toctree: generated/
rv_discrete
rv_discrete.rvs
rv_discrete.pmf
rv_discrete.logpmf
rv_discrete.cdf
rv_discrete.logcdf
rv_discrete.sf
rv_discrete.logsf
rv_discrete.ppf
rv_discrete.isf
rv_discrete.stats
rv_discrete.moment
rv_discrete.entropy
rv_discrete.expect
rv_discrete.median
rv_discrete.mean
rv_discrete.var
rv_discrete.std
rv_discrete.interval
Continuous distributions
========================
.. autosummary::
:toctree: generated/
norm -- Normal (Gaussian)
alpha -- Alpha
anglit -- Anglit
arcsine -- Arcsine
beta -- Beta
betaprime -- Beta Prime
bradford -- Bradford
burr -- Burr
cauchy -- Cauchy
chi -- Chi
chi2 -- Chi-squared
cosine -- Cosine
dgamma -- Double Gamma
dweibull -- Double Weibull
erlang -- Erlang
expon -- Exponential
exponweib -- Exponentiated Weibull
exponpow -- Exponential Power
f -- F (Snecdor F)
fatiguelife -- Fatigue Life (Birnbaum-Sanders)
fisk -- Fisk
foldcauchy -- Folded Cauchy
foldnorm -- Folded Normal
frechet_r -- Frechet Right Sided, Extreme Value Type II (Extreme LB) or weibull_min
frechet_l -- Frechet Left Sided, Weibull_max
genlogistic -- Generalized Logistic
genpareto -- Generalized Pareto
genexpon -- Generalized Exponential
genextreme -- Generalized Extreme Value
gausshyper -- Gauss Hypergeometric
gamma -- Gamma
gengamma -- Generalized gamma
genhalflogistic -- Generalized Half Logistic
gilbrat -- Gilbrat
gompertz -- Gompertz (Truncated Gumbel)
gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I
gumbel_l -- Left Sided Gumbel, etc.
halfcauchy -- Half Cauchy
halflogistic -- Half Logistic
halfnorm -- Half Normal
hypsecant -- Hyperbolic Secant
invgamma -- Inverse Gamma
invgauss -- Inverse Gaussian
invweibull -- Inverse Weibull
johnsonsb -- Johnson SB
johnsonsu -- Johnson SU
ksone -- Kolmogorov-Smirnov one-sided (no stats)
kstwobign -- Kolmogorov-Smirnov two-sided test for Large N (no stats)
laplace -- Laplace
logistic -- Logistic
loggamma -- Log-Gamma
loglaplace -- Log-Laplace (Log Double Exponential)
lognorm -- Log-Normal
lomax -- Lomax (Pareto of the second kind)
maxwell -- Maxwell
mielke -- Mielke's Beta-Kappa
nakagami -- Nakagami
ncx2 -- Non-central chi-squared
ncf -- Non-central F
nct -- Non-central Student's T
pareto -- Pareto
powerlaw -- Power-function
powerlognorm -- Power log normal
powernorm -- Power normal
rdist -- R-distribution
reciprocal -- Reciprocal
rayleigh -- Rayleigh
rice -- Rice
recipinvgauss -- Reciprocal Inverse Gaussian
semicircular -- Semicircular
t -- Student's T
triang -- Triangular
truncexpon -- Truncated Exponential
truncnorm -- Truncated Normal
tukeylambda -- Tukey-Lambda
uniform -- Uniform
vonmises -- Von-Mises (Circular)
wald -- Wald
weibull_min -- Minimum Weibull (see Frechet)
weibull_max -- Maximum Weibull (see Frechet)
wrapcauchy -- Wrapped Cauchy
Discrete distributions
======================
.. autosummary::
:toctree: generated/
binom -- Binomial
bernoulli -- Bernoulli
nbinom -- Negative Binomial
geom -- Geometric
hypergeom -- Hypergeometric
logser -- Logarithmic (Log-Series, Series)
poisson -- Poisson
planck -- Planck (Discrete Exponential)
boltzmann -- Boltzmann (Truncated Discrete Exponential)
randint -- Discrete Uniform
zipf -- Zipf
dlaplace -- Discrete Laplacian
Statistical functions
=====================
Several of these functions have a similar version in scipy.stats.mstats
which work for masked arrays.
.. autosummary::
:toctree: generated/
gmean -- Geometric mean
hmean -- Harmonic mean
mean -- Arithmetic mean
cmedian -- Computed median
median -- Median
mode -- Modal value
tmean -- Truncated arithmetic mean
tvar -- Truncated variance
tmin _
tmax _
tstd _
tsem _
moment -- Central moment
variation -- Coefficient of variation
skew -- Skewness
kurtosis -- Fisher or Pearson kurtosis
describe -- Descriptive statistics
skewtest _
kurtosistest _
normaltest _
.. autosummary::
:toctree: generated/
itemfreq _
scoreatpercentile _
percentileofscore _
histogram2 _
histogram _
cumfreq _
relfreq _
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
bayes_mvs
sem
zmap
zscore
.. autosummary::
:toctree: generated/
threshold
trimboth
trim1
.. autosummary::
:toctree: generated/
f_oneway
pearsonr
spearmanr
pointbiserialr
kendalltau
linregress
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_rel
kstest
chisquare
ks_2samp
mannwhitneyu
tiecorrect
ranksums
wilcoxon
kruskal
friedmanchisquare
.. autosummary::
:toctree: generated/
ansari
bartlett
levene
shapiro
anderson
binom_test
fligner
mood
oneway
Contingency table functions
===========================
.. autosummary::
:toctree: generated/
fisher_exact
chi2_contingency
contingency.expected_freq
contingency.margins
General linear model
====================
.. autosummary::
:toctree: generated/
glm
Plot-tests
==========
.. autosummary::
:toctree: generated/
probplot
ppcc_max
ppcc_plot
Masked statistics functions
===========================
.. toctree::
stats.mstats
Univariate and multivariate kernel density estimation (:mod:`scipy.stats.kde`)
==============================================================================
.. autosummary::
:toctree: generated/
gaussian_kde
For many more stat related functions install the software R and the
interface package rpy.
"""
from stats import *
from distributions import *
from rv import *
from morestats import *
from kde import gaussian_kde
import mstats
from contingency import chi2_contingency
#remove vonmises_cython from __all__, I don't know why it is included
__all__ = filter(lambda s:not (s.startswith('_') or s.endswith('cython')),dir())
from numpy.testing import Tester
test = Tester().test
|
ygenc/onlineLDA
|
onlineldavb_new/build/scipy/scipy/stats/__init__.py
|
Python
|
gpl-3.0
| 8,519
|
[
"Gaussian"
] |
0ce6a304441bf0f626c7883e53947b0db1cd5762641287d49df5b99a6a27acd5
|
"""
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .externals.six.moves import xrange
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .utils.multiclass import check_classification_targets
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :] # rescale
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
.. versionadded:: 0.17
*LinearDiscriminantAnalysis*.
.. versionchanged:: 0.17
Deprecated :class:`lda.LDA` have been moved to *LinearDiscriminantAnalysis*.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default). Does not compute the
covariance matrix, therefore this solver is recommended for
data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
.. versionadded:: 0.17
tol : float, optional
Threshold used for rank estimation in SVD solver.
.. versionadded:: 0.17
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=None, tol=None):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
.. versionchanged:: 0.17
Deprecated *store_covariance* have been moved to main constructor.
.. versionchanged:: 0.17
Deprecated *tol* have been moved to main constructor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("The parameter 'store_covariance' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariance = store_covariance
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y, ensure_min_samples=2, estimator=self)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if self.priors_.sum() != 1:
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
.. versionadded:: 0.17
*QuadraticDiscriminantAnalysis*
.. versionchanged:: 0.17
Deprecated :class:`qda.QDA` have been moved to *QuadraticDiscriminantAnalysis*.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
.. versionadded:: 0.17
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
.. versionadded:: 0.17
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0,
store_covariances=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
def __init__(self, priors=None, reg_param=0., store_covariances=False,
tol=1.0e-4):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariances = store_covariances
self.tol = tol
def fit(self, X, y, store_covariances=None, tol=None):
"""Fit the model according to the given training data and parameters.
.. versionchanged:: 0.17
Deprecated *store_covariance* have been moved to main constructor.
.. versionchanged:: 0.17
Deprecated *tol* have been moved to main constructor.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
"""
if store_covariances:
warnings.warn("The parameter 'store_covariances' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariances = store_covariances
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if self.store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
|
valexandersaulys/airbnb_kaggle_contest
|
venv/lib/python3.4/site-packages/sklearn/discriminant_analysis.py
|
Python
|
gpl-2.0
| 28,413
|
[
"Gaussian"
] |
6efeb148d467125367c022ad84ad2ccb6fe2b884a524660e22fad1fb20f5895a
|
""" Utility for prompting users
"""
from DIRAC import S_OK, S_ERROR
def promptUser( message, choices = [], default = 'n', logger = None ):
""" Prompting users with message, choices by default are 'y', 'n'
"""
if logger is None:
from DIRAC import gLogger
logger = gLogger
if not choices:
choices = ['y', 'n']
if ( choices ) and ( default ) and ( not default in choices ):
return S_ERROR( "The default value is not a valid choice" )
choiceString = ''
if choices and default:
choiceString = '/'.join( choices ).replace( default, '[%s]' % default )
elif choices and ( not default ):
choiceString = '/'.join( choices )
elif ( not choices ) and ( default ):
choiceString = '[%s]' % default
while True:
if choiceString:
logger.notice( '%s %s :' % ( message, choiceString ) )
elif default:
logger.notice( '%s %s :' % ( message, default ) )
else:
logger.notice( '%s :' % message )
response = raw_input( '' )
if ( not response ) and ( default ):
return S_OK( default )
elif ( not response ) and ( not default ):
logger.error( "Failed to determine user selection" )
return S_ERROR( "Failed to determine user selection" )
elif ( response ) and ( choices ) and ( not response in choices ):
logger.notice( 'your answer is not valid' )
continue
else:
return S_OK( response )
|
andresailer/DIRAC
|
Core/Utilities/PromptUser.py
|
Python
|
gpl-3.0
| 1,398
|
[
"DIRAC"
] |
06bd35052f28e7408167f36696b165ae889d49d5d9945d384779513617a12f11
|
# PyParticles : Particles simulation in python
# Copyright (C) 2012 Simone Riva
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import zlib
###########################
# Current version
v_major = 0
v_minor = 3
v_revision = 6
###########################
def py_particle_version( r='s' ):
global v_major
global v_minor
global v_revision
if r == 's' :
return "%d.%d.%d" % ( v_major , v_minor , v_revision )
else :
return ( v_major , v_minor , v_revision )
def test_pyopencl():
try :
import pyopencl
except :
return False
else :
return True
def about():
mail = zlib.decompress(b'x\x9c+\xce\xcc\xcd\xcfK\xd5+*KtH\xcfM\xcc\xcc\xd1K\xce\xcf\x05\x00R\x9c\x07\xba').decode('utf-8')
message = """
PyParticles is a particle simulation toolbox entirely written in python.
The main objective of PyParticles is to provide a system API simple and fast to use.
Furthermore is to provide a basic application for the implementation of simple models.
Visit: http://pyparticles.wordpress.com/
Docs: http://simon-r.github.com/PyParticles/index.html
Copyright (C) %s %s email: %s
--------------------------------------------------------------------
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
message = message % ( '2012' , 'Simone Riva' , mail )
print( message )
|
simon-r/PyParticles
|
pyparticles/utils/pypart_global.py
|
Python
|
gpl-3.0
| 2,638
|
[
"VisIt"
] |
f31bf3f2fb0ba6179dcb72c1a9fcbfef390089da240cddabb41e2b78c9971347
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.