text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Code snippets used in webdocs.
The examples here are written specifically to read well with the accompanying
web docs. Do not rewrite them until you make sure the webdocs still read well
and the rewritten code supports the concept being described. For example, there
are snippets that could be shorter but they are written like this to make a
specific point in the docs.
The code snippets are all organized as self contained functions. Parts of the
function body delimited by [START tag] and [END tag] will be included
automatically in the web docs. The naming convention for the tags is to have as
prefix the PATH_TO_HTML where they are included followed by a descriptive
string. The tags can contain only letters, digits and _.
"""
import apache_beam as beam
from apache_beam.test_pipeline import TestPipeline
from apache_beam.metrics import Metrics
# Quiet some pylint warnings that happen because of the somewhat special
# format for the code snippets.
# pylint:disable=invalid-name
# pylint:disable=expression-not-assigned
# pylint:disable=redefined-outer-name
# pylint:disable=reimported
# pylint:disable=unused-variable
# pylint:disable=wrong-import-order, wrong-import-position
class SnippetUtils(object):
from apache_beam.pipeline import PipelineVisitor
class RenameFiles(PipelineVisitor):
"""RenameFiles will rewire read/write paths for unit testing.
RenameFiles will replace the GCS files specified in the read and
write transforms to local files so the pipeline can be run as a
unit test. This assumes that read and write transforms defined in snippets
have already been replaced by transforms 'DummyReadForTesting' and
'DummyReadForTesting' (see snippets_test.py).
This is as close as we can get to have code snippets that are
executed and are also ready to presented in webdocs.
"""
def __init__(self, renames):
self.renames = renames
def visit_transform(self, transform_node):
if transform_node.full_label.find('DummyReadForTesting') >= 0:
transform_node.transform.fn.file_to_read = self.renames['read']
elif transform_node.full_label.find('DummyWriteForTesting') >= 0:
transform_node.transform.fn.file_to_write = self.renames['write']
def construct_pipeline(renames):
"""A reverse words snippet as an example for constructing a pipeline."""
import re
class ReverseWords(beam.PTransform):
"""A PTransform that reverses individual elements in a PCollection."""
def expand(self, pcoll):
return pcoll | beam.Map(lambda e: e[::-1])
def filter_words(unused_x):
"""Pass through filter to select everything."""
return True
# [START pipelines_constructing_creating]
from apache_beam.utils.pipeline_options import PipelineOptions
p = beam.Pipeline(options=PipelineOptions())
# [END pipelines_constructing_creating]
p = TestPipeline() # Use TestPipeline for testing.
# [START pipelines_constructing_reading]
lines = p | 'ReadMyFile' >> beam.io.ReadFromText('gs://some/inputData.txt')
# [END pipelines_constructing_reading]
# [START pipelines_constructing_applying]
words = lines | beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x))
reversed_words = words | ReverseWords()
# [END pipelines_constructing_applying]
# [START pipelines_constructing_writing]
filtered_words = reversed_words | 'FilterWords' >> beam.Filter(filter_words)
filtered_words | 'WriteMyFile' >> beam.io.WriteToText(
'gs://some/outputData.txt')
# [END pipelines_constructing_writing]
p.visit(SnippetUtils.RenameFiles(renames))
# [START pipelines_constructing_running]
p.run()
# [END pipelines_constructing_running]
def model_pipelines(argv):
"""A wordcount snippet as a simple pipeline example."""
# [START model_pipelines]
import re
import apache_beam as beam
from apache_beam.utils.pipeline_options import PipelineOptions
class MyOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--input',
dest='input',
default='gs://dataflow-samples/shakespeare/kinglear'
'.txt',
help='Input file to process.')
parser.add_argument('--output',
dest='output',
required=True,
help='Output file to write results to.')
pipeline_options = PipelineOptions(argv)
my_options = pipeline_options.view_as(MyOptions)
p = beam.Pipeline(options=pipeline_options)
(p
| beam.io.ReadFromText(my_options.input)
| beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x))
| beam.Map(lambda x: (x, 1))
| beam.combiners.Count.PerKey()
| beam.io.WriteToText(my_options.output))
result = p.run()
# [END model_pipelines]
result.wait_until_finish()
def model_pcollection(argv):
"""Creating a PCollection from data in local memory."""
from apache_beam.utils.pipeline_options import PipelineOptions
class MyOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--output',
dest='output',
required=True,
help='Output file to write results to.')
pipeline_options = PipelineOptions(argv)
my_options = pipeline_options.view_as(MyOptions)
# [START model_pcollection]
p = beam.Pipeline(options=pipeline_options)
(p
| beam.Create([
'To be, or not to be: that is the question: ',
'Whether \'tis nobler in the mind to suffer ',
'The slings and arrows of outrageous fortune, ',
'Or to take arms against a sea of troubles, '])
| beam.io.WriteToText(my_options.output))
result = p.run()
# [END model_pcollection]
result.wait_until_finish()
def pipeline_options_remote(argv):
"""Creating a Pipeline using a PipelineOptions object for remote execution."""
from apache_beam import Pipeline
from apache_beam.utils.pipeline_options import PipelineOptions
# [START pipeline_options_create]
options = PipelineOptions(flags=argv)
# [END pipeline_options_create]
# [START pipeline_options_define_custom]
class MyOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--input')
parser.add_argument('--output')
# [END pipeline_options_define_custom]
from apache_beam.utils.pipeline_options import GoogleCloudOptions
from apache_beam.utils.pipeline_options import StandardOptions
# [START pipeline_options_dataflow_service]
# Create and set your PipelineOptions.
options = PipelineOptions(flags=argv)
# For Cloud execution, set the Cloud Platform project, job_name,
# staging location, temp_location and specify DataflowRunner.
google_cloud_options = options.view_as(GoogleCloudOptions)
google_cloud_options.project = 'my-project-id'
google_cloud_options.job_name = 'myjob'
google_cloud_options.staging_location = 'gs://my-bucket/binaries'
google_cloud_options.temp_location = 'gs://my-bucket/temp'
options.view_as(StandardOptions).runner = 'DataflowRunner'
# Create the Pipeline with the specified options.
p = Pipeline(options=options)
# [END pipeline_options_dataflow_service]
my_options = options.view_as(MyOptions)
my_input = my_options.input
my_output = my_options.output
p = TestPipeline() # Use TestPipeline for testing.
lines = p | beam.io.ReadFromText(my_input)
lines | beam.io.WriteToText(my_output)
p.run()
def pipeline_options_local(argv):
"""Creating a Pipeline using a PipelineOptions object for local execution."""
from apache_beam import Pipeline
from apache_beam.utils.pipeline_options import PipelineOptions
options = PipelineOptions(flags=argv)
# [START pipeline_options_define_custom_with_help_and_default]
class MyOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--input',
help='Input for the pipeline',
default='gs://my-bucket/input')
parser.add_argument('--output',
help='Output for the pipeline',
default='gs://my-bucket/output')
# [END pipeline_options_define_custom_with_help_and_default]
my_options = options.view_as(MyOptions)
my_input = my_options.input
my_output = my_options.output
# [START pipeline_options_local]
# Create and set your Pipeline Options.
options = PipelineOptions()
p = Pipeline(options=options)
# [END pipeline_options_local]
p = TestPipeline() # Use TestPipeline for testing.
lines = p | beam.io.ReadFromText(my_input)
lines | beam.io.WriteToText(my_output)
p.run()
def pipeline_options_command_line(argv):
"""Creating a Pipeline by passing a list of arguments."""
# [START pipeline_options_command_line]
# Use Python argparse module to parse custom arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--input')
parser.add_argument('--output')
known_args, pipeline_args = parser.parse_known_args(argv)
# Create the Pipeline with remaining arguments.
p = beam.Pipeline(argv=pipeline_args)
lines = p | 'ReadFromText' >> beam.io.ReadFromText(known_args.input)
lines | 'WriteToText' >> beam.io.WriteToText(known_args.output)
# [END pipeline_options_command_line]
p.run().wait_until_finish()
def pipeline_logging(lines, output):
"""Logging Pipeline Messages."""
import re
import apache_beam as beam
# [START pipeline_logging]
# import Python logging module.
import logging
class ExtractWordsFn(beam.DoFn):
def process(self, element):
words = re.findall(r'[A-Za-z\']+', element)
for word in words:
yield word
if word.lower() == 'love':
# Log using the root logger at info or higher levels
logging.info('Found : %s', word.lower())
# Remaining WordCount example code ...
# [END pipeline_logging]
p = TestPipeline() # Use TestPipeline for testing.
(p
| beam.Create(lines)
| beam.ParDo(ExtractWordsFn())
| beam.io.WriteToText(output))
p.run()
def pipeline_monitoring(renames):
"""Using monitoring interface snippets."""
import re
import apache_beam as beam
from apache_beam.utils.pipeline_options import PipelineOptions
class WordCountOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--input',
help='Input for the pipeline',
default='gs://my-bucket/input')
parser.add_argument('--output',
help='output for the pipeline',
default='gs://my-bucket/output')
class ExtractWordsFn(beam.DoFn):
def process(self, element):
words = re.findall(r'[A-Za-z\']+', element)
for word in words:
yield word
class FormatCountsFn(beam.DoFn):
def process(self, element):
word, count = element
yield '%s: %s' % (word, count)
# [START pipeline_monitoring_composite]
# The CountWords Composite Transform inside the WordCount pipeline.
class CountWords(beam.PTransform):
def expand(self, pcoll):
return (pcoll
# Convert lines of text into individual words.
| 'ExtractWords' >> beam.ParDo(ExtractWordsFn())
# Count the number of times each word occurs.
| beam.combiners.Count.PerElement()
# Format each word and count into a printable string.
| 'FormatCounts' >> beam.ParDo(FormatCountsFn()))
# [END pipeline_monitoring_composite]
pipeline_options = PipelineOptions()
options = pipeline_options.view_as(WordCountOptions)
p = TestPipeline() # Use TestPipeline for testing.
# [START pipeline_monitoring_execution]
(p
# Read the lines of the input text.
| 'ReadLines' >> beam.io.ReadFromText(options.input)
# Count the words.
| CountWords()
# Write the formatted word counts to output.
| 'WriteCounts' >> beam.io.WriteToText(options.output))
# [END pipeline_monitoring_execution]
p.visit(SnippetUtils.RenameFiles(renames))
p.run()
def examples_wordcount_minimal(renames):
"""MinimalWordCount example snippets."""
import re
import apache_beam as beam
from apache_beam.utils.pipeline_options import GoogleCloudOptions
from apache_beam.utils.pipeline_options import StandardOptions
from apache_beam.utils.pipeline_options import PipelineOptions
# [START examples_wordcount_minimal_options]
options = PipelineOptions()
google_cloud_options = options.view_as(GoogleCloudOptions)
google_cloud_options.project = 'my-project-id'
google_cloud_options.job_name = 'myjob'
google_cloud_options.staging_location = 'gs://your-bucket-name-here/staging'
google_cloud_options.temp_location = 'gs://your-bucket-name-here/temp'
options.view_as(StandardOptions).runner = 'DataflowRunner'
# [END examples_wordcount_minimal_options]
# Run it locally for testing.
options = PipelineOptions()
# [START examples_wordcount_minimal_create]
p = beam.Pipeline(options=options)
# [END examples_wordcount_minimal_create]
(
# [START examples_wordcount_minimal_read]
p | beam.io.ReadFromText(
'gs://dataflow-samples/shakespeare/kinglear.txt')
# [END examples_wordcount_minimal_read]
# [START examples_wordcount_minimal_pardo]
| 'ExtractWords' >> beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x))
# [END examples_wordcount_minimal_pardo]
# [START examples_wordcount_minimal_count]
| beam.combiners.Count.PerElement()
# [END examples_wordcount_minimal_count]
# [START examples_wordcount_minimal_map]
| beam.Map(lambda (word, count): '%s: %s' % (word, count))
# [END examples_wordcount_minimal_map]
# [START examples_wordcount_minimal_write]
| beam.io.WriteToText('gs://my-bucket/counts.txt')
# [END examples_wordcount_minimal_write]
)
p.visit(SnippetUtils.RenameFiles(renames))
# [START examples_wordcount_minimal_run]
result = p.run()
# [END examples_wordcount_minimal_run]
result.wait_until_finish()
def examples_wordcount_wordcount(renames):
"""WordCount example snippets."""
import re
import apache_beam as beam
from apache_beam.utils.pipeline_options import PipelineOptions
argv = []
# [START examples_wordcount_wordcount_options]
class WordCountOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--input',
help='Input for the pipeline',
default='gs://my-bucket/input')
options = PipelineOptions(argv)
p = beam.Pipeline(options=options)
# [END examples_wordcount_wordcount_options]
lines = p | beam.io.ReadFromText(
'gs://dataflow-samples/shakespeare/kinglear.txt')
# [START examples_wordcount_wordcount_composite]
class CountWords(beam.PTransform):
def expand(self, pcoll):
return (pcoll
# Convert lines of text into individual words.
| 'ExtractWords' >> beam.FlatMap(
lambda x: re.findall(r'[A-Za-z\']+', x))
# Count the number of times each word occurs.
| beam.combiners.Count.PerElement())
counts = lines | CountWords()
# [END examples_wordcount_wordcount_composite]
# [START examples_wordcount_wordcount_dofn]
class FormatAsTextFn(beam.DoFn):
def process(self, element):
word, count = element
yield '%s: %s' % (word, count)
formatted = counts | beam.ParDo(FormatAsTextFn())
# [END examples_wordcount_wordcount_dofn]
formatted | beam.io.WriteToText('gs://my-bucket/counts.txt')
p.visit(SnippetUtils.RenameFiles(renames))
p.run().wait_until_finish()
def examples_wordcount_debugging(renames):
"""DebuggingWordCount example snippets."""
import re
import apache_beam as beam
# [START example_wordcount_debugging_logging]
# [START example_wordcount_debugging_aggregators]
import logging
class FilterTextFn(beam.DoFn):
"""A DoFn that filters for a specific key based on a regular expression."""
def __init__(self, pattern):
self.pattern = pattern
# A custom metric can track values in your pipeline as it runs. Create
# custom metrics matched_word and unmatched_words.
self.matched_words = Metrics.counter(self.__class__, 'matched_words')
self.umatched_words = Metrics.counter(self.__class__, 'umatched_words')
def process(self, element):
word, _ = element
if re.match(self.pattern, word):
# Log at INFO level each element we match. When executing this pipeline
# using the Dataflow service, these log lines will appear in the Cloud
# Logging UI.
logging.info('Matched %s', word)
# Add 1 to the custom metric counter matched_words
self.matched_words.inc()
yield element
else:
# Log at the "DEBUG" level each element that is not matched. Different
# log levels can be used to control the verbosity of logging providing
# an effective mechanism to filter less important information. Note
# currently only "INFO" and higher level logs are emitted to the Cloud
# Logger. This log message will not be visible in the Cloud Logger.
logging.debug('Did not match %s', word)
# Add 1 to the custom metric counter umatched_words
self.umatched_words.inc()
# [END example_wordcount_debugging_logging]
# [END example_wordcount_debugging_aggregators]
p = TestPipeline() # Use TestPipeline for testing.
filtered_words = (
p
| beam.io.ReadFromText(
'gs://dataflow-samples/shakespeare/kinglear.txt')
| 'ExtractWords' >> beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x))
| beam.combiners.Count.PerElement()
| 'FilterText' >> beam.ParDo(FilterTextFn('Flourish|stomach')))
# [START example_wordcount_debugging_assert]
beam.assert_that(
filtered_words, beam.equal_to([('Flourish', 3), ('stomach', 1)]))
# [END example_wordcount_debugging_assert]
output = (filtered_words
| 'format' >> beam.Map(lambda (word, c): '%s: %s' % (word, c))
| 'Write' >> beam.io.WriteToText('gs://my-bucket/counts.txt'))
p.visit(SnippetUtils.RenameFiles(renames))
p.run()
import apache_beam as beam
from apache_beam.io import iobase
from apache_beam.io.range_trackers import OffsetRangeTracker
from apache_beam.transforms.core import PTransform
from apache_beam.utils.pipeline_options import PipelineOptions
# Defining a new source.
# [START model_custom_source_new_source]
class CountingSource(iobase.BoundedSource):
def __init__(self, count):
self.records_read = Metrics.counter(self.__class__, 'recordsRead')
self._count = count
def estimate_size(self):
return self._count
def get_range_tracker(self, start_position, stop_position):
if start_position is None:
start_position = 0
if stop_position is None:
stop_position = self._count
return OffsetRangeTracker(start_position, stop_position)
def read(self, range_tracker):
for i in range(self._count):
if not range_tracker.try_claim(i):
return
self.records_read.inc()
yield i
def split(self, desired_bundle_size, start_position=None,
stop_position=None):
if start_position is None:
start_position = 0
if stop_position is None:
stop_position = self._count
bundle_start = start_position
while bundle_start < self._count:
bundle_stop = max(self._count, bundle_start + desired_bundle_size)
yield iobase.SourceBundle(weight=(bundle_stop - bundle_start),
source=self,
start_position=bundle_start,
stop_position=bundle_stop)
bundle_start = bundle_stop
# [END model_custom_source_new_source]
def model_custom_source(count):
"""Demonstrates creating a new custom source and using it in a pipeline.
Defines a new source ``CountingSource`` that produces integers starting from 0
up to a given size.
Uses the new source in an example pipeline.
Additionally demonstrates how a source should be implemented using a
``PTransform``. This is the recommended way to develop sources that are to
distributed to a large number of end users.
This method runs two pipelines.
(1) A pipeline that uses ``CountingSource`` directly using the ``df.Read``
transform.
(2) A pipeline that uses a custom ``PTransform`` that wraps
``CountingSource``.
Args:
count: the size of the counting source to be used in the pipeline
demonstrated in this method.
"""
# Using the source in an example pipeline.
# [START model_custom_source_use_new_source]
p = beam.Pipeline(options=PipelineOptions())
numbers = p | 'ProduceNumbers' >> beam.io.Read(CountingSource(count))
# [END model_custom_source_use_new_source]
lines = numbers | beam.core.Map(lambda number: 'line %d' % number)
beam.assert_that(
lines, beam.equal_to(
['line ' + str(number) for number in range(0, count)]))
p.run().wait_until_finish()
# We recommend users to start Source classes with an underscore to discourage
# using the Source class directly when a PTransform for the source is
# available. We simulate that here by simply extending the previous Source
# class.
class _CountingSource(CountingSource):
pass
# [START model_custom_source_new_ptransform]
class ReadFromCountingSource(PTransform):
def __init__(self, count, **kwargs):
super(ReadFromCountingSource, self).__init__(**kwargs)
self._count = count
def expand(self, pcoll):
return pcoll | iobase.Read(_CountingSource(count))
# [END model_custom_source_new_ptransform]
# [START model_custom_source_use_ptransform]
p = beam.Pipeline(options=PipelineOptions())
numbers = p | 'ProduceNumbers' >> ReadFromCountingSource(count)
# [END model_custom_source_use_ptransform]
lines = numbers | beam.core.Map(lambda number: 'line %d' % number)
beam.assert_that(
lines, beam.equal_to(
['line ' + str(number) for number in range(0, count)]))
# Don't test runner api due to pickling errors.
p.run(test_runner_api=False).wait_until_finish()
def model_custom_sink(simplekv, KVs, final_table_name_no_ptransform,
final_table_name_with_ptransform):
"""Demonstrates creating a new custom sink and using it in a pipeline.
Defines a new sink ``SimpleKVSink`` that demonstrates writing to a simple
key-value based storage system which has following API.
simplekv.connect(url) -
connects to the storage system and returns an access token which can be
used to perform further operations
simplekv.open_table(access_token, table_name) -
creates a table named 'table_name'. Returns a table object.
simplekv.write_to_table(access_token, table, key, value) -
writes a key-value pair to the given table.
simplekv.rename_table(access_token, old_name, new_name) -
renames the table named 'old_name' to 'new_name'.
Uses the new sink in an example pipeline.
Additionally demonstrates how a sink should be implemented using a
``PTransform``. This is the recommended way to develop sinks that are to be
distributed to a large number of end users.
This method runs two pipelines.
(1) A pipeline that uses ``SimpleKVSink`` directly using the ``df.Write``
transform.
(2) A pipeline that uses a custom ``PTransform`` that wraps
``SimpleKVSink``.
Args:
simplekv: an object that mocks the key-value storage.
KVs: the set of key-value pairs to be written in the example pipeline.
final_table_name_no_ptransform: the prefix of final set of tables to be
created by the example pipeline that uses
``SimpleKVSink`` directly.
final_table_name_with_ptransform: the prefix of final set of tables to be
created by the example pipeline that uses
a ``PTransform`` that wraps
``SimpleKVSink``.
"""
import apache_beam as beam
from apache_beam.io import iobase
from apache_beam.transforms.core import PTransform
from apache_beam.utils.pipeline_options import PipelineOptions
# Defining the new sink.
# [START model_custom_sink_new_sink]
class SimpleKVSink(iobase.Sink):
def __init__(self, url, final_table_name):
self._url = url
self._final_table_name = final_table_name
def initialize_write(self):
access_token = simplekv.connect(self._url)
return access_token
def open_writer(self, access_token, uid):
table_name = 'table' + uid
return SimpleKVWriter(access_token, table_name)
def finalize_write(self, access_token, table_names):
for i, table_name in enumerate(table_names):
simplekv.rename_table(
access_token, table_name, self._final_table_name + str(i))
# [END model_custom_sink_new_sink]
# Defining a writer for the new sink.
# [START model_custom_sink_new_writer]
class SimpleKVWriter(iobase.Writer):
def __init__(self, access_token, table_name):
self._access_token = access_token
self._table_name = table_name
self._table = simplekv.open_table(access_token, table_name)
def write(self, record):
key, value = record
simplekv.write_to_table(self._access_token, self._table, key, value)
def close(self):
return self._table_name
# [END model_custom_sink_new_writer]
final_table_name = final_table_name_no_ptransform
# Using the new sink in an example pipeline.
# [START model_custom_sink_use_new_sink]
p = beam.Pipeline(options=PipelineOptions())
kvs = p | 'CreateKVs' >> beam.Create(KVs)
kvs | 'WriteToSimpleKV' >> beam.io.Write(
SimpleKVSink('http://url_to_simple_kv/', final_table_name))
# [END model_custom_sink_use_new_sink]
p.run().wait_until_finish()
# We recommend users to start Sink class names with an underscore to
# discourage using the Sink class directly when a PTransform for the sink is
# available. We simulate that here by simply extending the previous Sink
# class.
class _SimpleKVSink(SimpleKVSink):
pass
# [START model_custom_sink_new_ptransform]
class WriteToKVSink(PTransform):
def __init__(self, url, final_table_name, **kwargs):
super(WriteToKVSink, self).__init__(**kwargs)
self._url = url
self._final_table_name = final_table_name
def expand(self, pcoll):
return pcoll | iobase.Write(_SimpleKVSink(self._url,
self._final_table_name))
# [END model_custom_sink_new_ptransform]
final_table_name = final_table_name_with_ptransform
# [START model_custom_sink_use_ptransform]
p = beam.Pipeline(options=PipelineOptions())
kvs = p | 'CreateKVs' >> beam.core.Create(KVs)
kvs | 'WriteToSimpleKV' >> WriteToKVSink(
'http://url_to_simple_kv/', final_table_name)
# [END model_custom_sink_use_ptransform]
p.run().wait_until_finish()
def model_textio(renames):
"""Using a Read and Write transform to read/write text files."""
def filter_words(x):
import re
return re.findall(r'[A-Za-z\']+', x)
import apache_beam as beam
from apache_beam.utils.pipeline_options import PipelineOptions
# [START model_textio_read]
p = beam.Pipeline(options=PipelineOptions())
# [START model_pipelineio_read]
lines = p | 'ReadFromText' >> beam.io.ReadFromText('path/to/input-*.csv')
# [END model_pipelineio_read]
# [END model_textio_read]
# [START model_textio_write]
filtered_words = lines | 'FilterWords' >> beam.FlatMap(filter_words)
# [START model_pipelineio_write]
filtered_words | 'WriteToText' >> beam.io.WriteToText(
'/path/to/numbers', file_name_suffix='.csv')
# [END model_pipelineio_write]
# [END model_textio_write]
p.visit(SnippetUtils.RenameFiles(renames))
p.run().wait_until_finish()
def model_textio_compressed(renames, expected):
"""Using a Read Transform to read compressed text files."""
p = TestPipeline()
# [START model_textio_write_compressed]
lines = p | 'ReadFromText' >> beam.io.ReadFromText(
'/path/to/input-*.csv.gz',
compression_type=beam.io.filesystem.CompressionTypes.GZIP)
# [END model_textio_write_compressed]
beam.assert_that(lines, beam.equal_to(expected))
p.visit(SnippetUtils.RenameFiles(renames))
p.run().wait_until_finish()
def model_datastoreio():
"""Using a Read and Write transform to read/write to Cloud Datastore."""
import uuid
from google.cloud.proto.datastore.v1 import entity_pb2
from google.cloud.proto.datastore.v1 import query_pb2
import googledatastore
import apache_beam as beam
from apache_beam.utils.pipeline_options import PipelineOptions
from apache_beam.io.gcp.datastore.v1.datastoreio import ReadFromDatastore
from apache_beam.io.gcp.datastore.v1.datastoreio import WriteToDatastore
project = 'my_project'
kind = 'my_kind'
query = query_pb2.Query()
query.kind.add().name = kind
# [START model_datastoreio_read]
p = beam.Pipeline(options=PipelineOptions())
entities = p | 'Read From Datastore' >> ReadFromDatastore(project, query)
# [END model_datastoreio_read]
# [START model_datastoreio_write]
p = beam.Pipeline(options=PipelineOptions())
musicians = p | 'Musicians' >> beam.Create(
['Mozart', 'Chopin', 'Beethoven', 'Vivaldi'])
def to_entity(content):
entity = entity_pb2.Entity()
googledatastore.helper.add_key_path(entity.key, kind, str(uuid.uuid4()))
googledatastore.helper.add_properties(entity, {'content': unicode(content)})
return entity
entities = musicians | 'To Entity' >> beam.Map(to_entity)
entities | 'Write To Datastore' >> WriteToDatastore(project)
# [END model_datastoreio_write]
def model_bigqueryio():
"""Using a Read and Write transform to read/write to BigQuery."""
import apache_beam as beam
from apache_beam.utils.pipeline_options import PipelineOptions
# [START model_bigqueryio_read]
p = beam.Pipeline(options=PipelineOptions())
weather_data = p | 'ReadWeatherStations' >> beam.io.Read(
beam.io.BigQuerySource(
'clouddataflow-readonly:samples.weather_stations'))
# [END model_bigqueryio_read]
# [START model_bigqueryio_query]
p = beam.Pipeline(options=PipelineOptions())
weather_data = p | 'ReadYearAndTemp' >> beam.io.Read(
beam.io.BigQuerySource(
query='SELECT year, mean_temp FROM samples.weather_stations'))
# [END model_bigqueryio_query]
# [START model_bigqueryio_query_standard_sql]
p = beam.Pipeline(options=PipelineOptions())
weather_data = p | 'ReadYearAndTemp' >> beam.io.Read(
beam.io.BigQuerySource(
query='SELECT year, mean_temp FROM `samples.weather_stations`',
use_standard_sql=True))
# [END model_bigqueryio_query_standard_sql]
# [START model_bigqueryio_schema]
schema = 'source:STRING, quote:STRING'
# [END model_bigqueryio_schema]
# [START model_bigqueryio_write]
quotes = p | beam.Create(
[{'source': 'Mahatma Ghandi', 'quote': 'My life is my message.'}])
quotes | 'Write' >> beam.io.Write(
beam.io.BigQuerySink(
'my-project:output.output_table',
schema=schema,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED))
# [END model_bigqueryio_write]
def model_composite_transform_example(contents, output_path):
"""Example of a composite transform.
To declare a composite transform, define a subclass of PTransform.
To override the apply method, define a method "apply" that
takes a PCollection as its only parameter and returns a PCollection.
"""
import re
import apache_beam as beam
# [START composite_transform_example]
# [START composite_ptransform_apply_method]
# [START composite_ptransform_declare]
class CountWords(beam.PTransform):
# [END composite_ptransform_declare]
def expand(self, pcoll):
return (pcoll
| beam.FlatMap(lambda x: re.findall(r'\w+', x))
| beam.combiners.Count.PerElement()
| beam.Map(lambda (word, c): '%s: %s' % (word, c)))
# [END composite_ptransform_apply_method]
# [END composite_transform_example]
p = TestPipeline() # Use TestPipeline for testing.
(p
| beam.Create(contents)
| CountWords()
| beam.io.WriteToText(output_path))
p.run()
def model_multiple_pcollections_flatten(contents, output_path):
"""Merging a PCollection with Flatten."""
some_hash_fn = lambda s: ord(s[0])
import apache_beam as beam
p = TestPipeline() # Use TestPipeline for testing.
partition_fn = lambda element, partitions: some_hash_fn(element) % partitions
# Partition into deciles
partitioned = p | beam.Create(contents) | beam.Partition(partition_fn, 3)
pcoll1 = partitioned[0]
pcoll2 = partitioned[1]
pcoll3 = partitioned[2]
# Flatten them back into 1
# A collection of PCollection objects can be represented simply
# as a tuple (or list) of PCollections.
# (The SDK for Python has no separate type to store multiple
# PCollection objects, whether containing the same or different
# types.)
# [START model_multiple_pcollections_flatten]
merged = (
# [START model_multiple_pcollections_tuple]
(pcoll1, pcoll2, pcoll3)
# [END model_multiple_pcollections_tuple]
# A list of tuples can be "piped" directly into a Flatten transform.
| beam.Flatten())
# [END model_multiple_pcollections_flatten]
merged | beam.io.WriteToText(output_path)
p.run()
def model_multiple_pcollections_partition(contents, output_path):
"""Splitting a PCollection with Partition."""
some_hash_fn = lambda s: ord(s[0])
def get_percentile(i):
"""Assume i in [0,100)."""
return i
import apache_beam as beam
p = TestPipeline() # Use TestPipeline for testing.
students = p | beam.Create(contents)
# [START model_multiple_pcollections_partition]
def partition_fn(student, num_partitions):
return int(get_percentile(student) * num_partitions / 100)
by_decile = students | beam.Partition(partition_fn, 10)
# [END model_multiple_pcollections_partition]
# [START model_multiple_pcollections_partition_40th]
fortieth_percentile = by_decile[4]
# [END model_multiple_pcollections_partition_40th]
([by_decile[d] for d in xrange(10) if d != 4] + [fortieth_percentile]
| beam.Flatten()
| beam.io.WriteToText(output_path))
p.run()
def model_group_by_key(contents, output_path):
"""Applying a GroupByKey Transform."""
import re
import apache_beam as beam
p = TestPipeline() # Use TestPipeline for testing.
words_and_counts = (
p
| beam.Create(contents)
| beam.FlatMap(lambda x: re.findall(r'\w+', x))
| 'one word' >> beam.Map(lambda w: (w, 1)))
# GroupByKey accepts a PCollection of (w, 1) and
# outputs a PCollection of (w, (1, 1, ...)).
# (A key/value pair is just a tuple in Python.)
# This is a somewhat forced example, since one could
# simply use beam.combiners.Count.PerElement here.
# [START model_group_by_key_transform]
grouped_words = words_and_counts | beam.GroupByKey()
# [END model_group_by_key_transform]
(grouped_words
| 'count words' >> beam.Map(lambda (word, counts): (word, len(counts)))
| beam.io.WriteToText(output_path))
p.run()
def model_co_group_by_key_tuple(email_list, phone_list, output_path):
"""Applying a CoGroupByKey Transform to a tuple."""
import apache_beam as beam
p = TestPipeline() # Use TestPipeline for testing.
# [START model_group_by_key_cogroupbykey_tuple]
# Each data set is represented by key-value pairs in separate PCollections.
# Both data sets share a common key type (in this example str).
# The email_list contains values such as: ('joe', 'joe@example.com') with
# multiple possible values for each key.
# The phone_list contains values such as: ('mary': '111-222-3333') with
# multiple possible values for each key.
emails = p | 'email' >> beam.Create(email_list)
phones = p | 'phone' >> beam.Create(phone_list)
# The result PCollection contains one key-value element for each key in the
# input PCollections. The key of the pair will be the key from the input and
# the value will be a dictionary with two entries: 'emails' - an iterable of
# all values for the current key in the emails PCollection and 'phones': an
# iterable of all values for the current key in the phones PCollection.
# For instance, if 'emails' contained ('joe', 'joe@example.com') and
# ('joe', 'joe@gmail.com'), then 'result' will contain the element
# ('joe', {'emails': ['joe@example.com', 'joe@gmail.com'], 'phones': ...})
result = {'emails': emails, 'phones': phones} | beam.CoGroupByKey()
def join_info((name, info)):
return '; '.join(['%s' % name,
'%s' % ','.join(info['emails']),
'%s' % ','.join(info['phones'])])
contact_lines = result | beam.Map(join_info)
# [END model_group_by_key_cogroupbykey_tuple]
contact_lines | beam.io.WriteToText(output_path)
p.run()
def model_join_using_side_inputs(
name_list, email_list, phone_list, output_path):
"""Joining PCollections using side inputs."""
import apache_beam as beam
from apache_beam.pvalue import AsIter
p = TestPipeline() # Use TestPipeline for testing.
# [START model_join_using_side_inputs]
# This code performs a join by receiving the set of names as an input and
# passing PCollections that contain emails and phone numbers as side inputs
# instead of using CoGroupByKey.
names = p | 'names' >> beam.Create(name_list)
emails = p | 'email' >> beam.Create(email_list)
phones = p | 'phone' >> beam.Create(phone_list)
def join_info(name, emails, phone_numbers):
filtered_emails = []
for name_in_list, email in emails:
if name_in_list == name:
filtered_emails.append(email)
filtered_phone_numbers = []
for name_in_list, phone_number in phone_numbers:
if name_in_list == name:
filtered_phone_numbers.append(phone_number)
return '; '.join(['%s' % name,
'%s' % ','.join(filtered_emails),
'%s' % ','.join(filtered_phone_numbers)])
contact_lines = names | 'CreateContacts' >> beam.core.Map(
join_info, AsIter(emails), AsIter(phones))
# [END model_join_using_side_inputs]
contact_lines | beam.io.WriteToText(output_path)
p.run()
# [START model_library_transforms_keys]
class Keys(beam.PTransform):
def expand(self, pcoll):
return pcoll | 'Keys' >> beam.Map(lambda (k, v): k)
# [END model_library_transforms_keys]
# pylint: enable=invalid-name
# [START model_library_transforms_count]
class Count(beam.PTransform):
def expand(self, pcoll):
return (
pcoll
| 'PairWithOne' >> beam.Map(lambda v: (v, 1))
| beam.CombinePerKey(sum))
# [END model_library_transforms_count]
|
vikkyrk/incubator-beam
|
sdks/python/apache_beam/examples/snippets/snippets.py
|
Python
|
apache-2.0
| 40,103
|
[
"VisIt"
] |
cad9f38b016ed10da6110d89690ff01066698d473d0b900db18493f3a7618337
|
#!/usr/bin/env python3
import sys
if sys.version_info < (3, 0):
raise "must use python 3.0 or greater"
import sys
from elasticsearch import Elasticsearch
from pprint import pprint
import json
from collections import OrderedDict
import os
CA_CERTS_PATH='/Users/philpot/aws/credentials/certs.pem'
es = Elasticsearch(
[
'https://darpamemex:darpamemex@esc.memexproxy.com/' # dig-ht-latest/offer
# 'http://user:secret@localhost:9200/',
],
# make sure we verify SSL certificates (off by default)
verify_certs=True,
# provide a path to CA certs on disk
ca_certs=CA_CERTS_PATH
)
def makeBodyNested(fieldName="name", innerPath="itemOffered", size=10):
return {
"query": {
"match_all": {}
},
"aggs": {
"toplevelAgg": {
"nested": {
"path": innerPath
},
"aggs": {
"termAgg": {
"terms": {
"field": "{}.{}".format(innerPath, fieldName),
"size" : size
}
}
}
}
}
}
def makeBodyDirect(fieldName="name", size=10):
return {
"query": {
"match_all": {}
},
"aggs": {
"termAgg": {
"terms": {
"field": fieldName,
"size": size
}
}
}
}
def makeBody(fieldName="name", innerPath="", size=10):
if innerPath:
return makeBodyNested(fieldName=fieldName,
innerPath=innerPath,
size=size)
else:
return makeBodyDirect(fieldName=fieldName,
size=size)
"""
{'_shards': {'failed': 0, 'successful': 20, 'total': 20},
'aggregations': {'toplevelAgg': {'doc_count': 19134836,
'termAgg': {'buckets': [{'doc_count': 18104,
'key': 'jessica'},
{'doc_count': 15956,
'key': 'ashley'},
{'doc_count': 12748,
'key': 'amber'},
{'doc_count': 12037,
'key': 'tiffany'},
{'doc_count': 11808,
'key': 'bella'},
{'doc_count': 11628,
'key': 'mya'},
{'doc_count': 11514,
'key': 'candy'},
{'doc_count': 10963,
'key': 'nikki'},
{'doc_count': 10932,
'key': 'diamond'},
{'doc_count': 10808,
'key': 'lexi'}],
'doc_count_error_upper_bound': 2728,
'sum_other_doc_count': 1322532}}},
'hits': {'hits': [], 'max_score': 0.0, 'total': 19134836},
'timed_out': False,
'took': 1422}
"""
def harvest(index="dig-ht-latest", docType="webpage",fieldName="addressCountry", innerPath="", size=10):
nested = True if innerPath else False
body=makeBody(fieldName=fieldName, innerPath=innerPath, size=size)
result = es.search(index=index,
doc_type=docType,
body=body,
search_type="count")
agg = result['aggregations']['toplevelAgg']['termAgg'] if nested else result['aggregations']['termAgg']
report = {"docType": docType,
"fieldName": fieldName,
"innerPath": innerPath,
"size": size,
# use 'result' later to get hitsTotal, sum_other_doc_count if needed
"result": result,
# collections.OrderedDict is serialized to JSON in the order keys were added
# so preserves decreasing value order
"histo": OrderedDict()
}
for bucket in agg['buckets']:
report["histo"][bucket["key"]] = bucket["doc_count"]
return report
# def outputPathname(docType="webpage", innerPath="mainEntity.availableAtOrFrom.address", fieldName="addressCountry", root="/tmp", **kwargs):
# return os.path.join(root, "{}_{}_{}.json".format(docType, innerPath.replace('.', '_').replace('__','_'), fieldName))
OUTPUT_ROOT = "/Users/philpot/Documents/project/graph-keyword-search/src/dig/data/cache"
def outputPathname(docType="webpage", innerPath="", fieldName="addressCountry", root=OUTPUT_ROOT, **kwargs):
return os.path.join(root, "{}_{}_{}.json".format(docType, innerPath.replace('.', '_').replace('__','_'), fieldName))
WORKING=[ # works
{"docType": "offer", "innerPath": "itemOffered", "fieldName": "name", "size": 200},
# works
{"docType": "webpage", "innerPath": "publisher", "fieldName": "name", "size": 200},
{"docType": "offer", "innerPath": "seller", "fieldName": "name", "size": 200},
{"docType": "offer", "innerPath": "itemOffered", "fieldName": "personAge", "size": 20},
{"docType": "offer", "innerPath": "mainEntityOfPage.publisher", "fieldName": "name", "size": 20},
{"docType": "seller", "innerPath": "makesOffer.mainEntityOfPage.publisher", "fieldName": "name", "size": 20},
{"docType": "phone", "innerPath": "owner.makesOffer.mainEntityOfPage.publisher", "fieldName": "name", "size": 20},
{"docType": "offer", "innerPath": "priceSpecification", "fieldName": "billingIncrement", "size": 10},
{"docType": "offer", "innerPath": "priceSpecification", "fieldName": "price", "size": 10},
{"docType": "offer", "innerPath": "priceSpecification", "fieldName": "name", "size": 10},
{"docType": "offer", "innerPath": "priceSpecification", "fieldName": "unitCode", "size": 10},
{"docType": "offer", "innerPath": "priceSpecification", "fieldName": "billingIncrement", "size": 10},
{"docType": "offer", "innerPath": "availableAtOrFrom", "fieldName": "name", "size": 10},
{"docType": "offer", "innerPath": "availableAtOrFrom.geo", "fieldName": "lat", "size": 10},
{"docType": "offer", "innerPath": "itemOffered", "fieldName": "hairColor", "size": 20},
{"docType": "offer", "innerPath": "itemOffered", "fieldName": "eyeColor", "size": 20},
{"docType": "offer", "innerPath": "itemOffered", "fieldName": "name", "size": 20},
{"docType": "offer", "innerPath": "availableAtOrFrom.geo", "fieldName": "lat", "size": 10},
{"docType": "seller", "innerPath": "telephone", "fieldName": "name", "size": 10},
{"docType": "seller", "innerPath": "telephone", "fieldName": "a", "size": 10}
]
SPECS=[ # {"docType": "webpage", "innerPath": "mainEntity.availableAtOrFrom.address", "fieldName": "addressCountry", "size": 200},
# {"docType": "webpage", "innerPath": "mainEntity.availableAtOrFrom.address", "fieldName": "addressRegion", "size": 200},
# {"docType": "webpage", "innerPath": "mainEntity.availableAtOrFrom.address", "fieldName": "addressLocality", "size": 200},
###{"docType": "seller", "innerPath": "email", "fieldName": "name", "size": 10},
###{"docType": "seller", "innerPath": "email", "fieldName": "a", "size": 10},
###{"docType": "offer", "innerPath": "seller.telephone", "fieldName": "a", "size": 10},
# WORKS
###{"docType": "seller", "innerPath": "telephone", "fieldName": "name", "size": 10},
# DOES NOT WORK in pyelasticsearch, only in sense/curl
{"docType": "offer", "innerPath": "seller.telephone", "fieldName": "name", "size": 10},
# WORKS
{"docType": "webpage", "innerPath": "mainEntity.seller.telephone", "fieldName": "name", "size": 10}
# Doesn't work
# {"docType": "offer", "innerPath": "seller.telephone", "fieldName": "name", "size": 200},
# ???
# {"docType": "offer", "innerPath": "seller", "fieldName": "a", "size": 200},
# {"docType": "offer", "innerPath": "itemOffered", "fieldName": "a", "size": 200},
# {"docType": "seller", "innerPath": "telephone", "fieldName": "name", "size": 200}
# bad syntax
# {"docType": "address", "innerPath": "", "fieldName": "addressCountry", "size": 200}
# doesn't work
# probably the issue w.r.t. nested Pedro suggested
# but sibling fields do work
]
# SPECS=[ {"docType": "offer", "innerPath": "itemOffered", "fieldName": "name", "size": 10} ]
SPECS=[ {"docType": "adultservice", "fieldName": "eyeColor", "size": 10} ]
SPECS=[ {"docType": "adultservice", "fieldName": "eyeColor", "size": 10},
{"docType": "adultservice", "fieldName": "hairColor", "size": 10},
{"docType": "adultservice", "fieldName": "name", "size": 200},
{"docType": "adultservice", "fieldName": "personAge", "size": 20},
# These are valid, but has flat distribution, so not useful for suggestion
# {"docType": "phone", "fieldName": "name", "size": 200},
# {"docType": "email", "fieldName": "name", "size": 200},
# Instead seller-centric distribution
{"docType": "seller", "innerPath": "telephone", "fieldName": "name", "size": 200},
{"docType": "seller", "innerPath": "email", "fieldName": "name", "size": 200},
{"docType": "webpage", "innerPath": "publisher", "fieldName": "name", "size": 200},
# Ignore webpage.description, webpage.dateCreated
# Ignore offer.identifier
{"docType": "offer", "innerPath": "priceSpecification", "fieldName": "billingIncrement", "size": 10},
{"docType": "offer", "innerPath": "priceSpecification", "fieldName": "price", "size": 200},
{"docType": "offer", "innerPath": "priceSpecification", "fieldName": "name", "size": 200},
{"docType": "offer", "innerPath": "priceSpecification", "fieldName": "unitCode", "size": 10},
{"docType": "offer", "innerPath": "availableAtOrFrom.address", "fieldName": "addressLocality", "size": 200},
{"docType": "offer", "innerPath": "availableAtOrFrom.address", "fieldName": "addressRegion", "size": 200},
{"docType": "offer", "innerPath": "availableAtOrFrom.address", "fieldName": "addressCountry", "size": 200},
# Ignore offer.availableAtOrFrom.name
# Ignore offer.availableAtOrFrom.geo.lat, offer.availableAtOrFrom.geo.lon
]
def harvestToFile(spec):
outPath = None
try:
outPath = outputPathname(**spec)
except:
pass
try:
h = harvest(**spec)
print("Harvest to {}".format(outPath), file=sys.stderr)
with open(outPath, 'w') as f:
# Don't use sort_keys here
# We are counting on the behavior where collections.OrderedDict is
# serialized in the order keys were added. If we add things in
# order of decreasing counts, the order will stick, unless we use sort_keys.
json.dump(h, f, indent=4)
except Exception as e:
print("Error [{}] during processing of {}".format(e, outPath))
def generateAll ():
for spec in SPECS:
print()
print(spec)
# harvestToFile(spec)
try:
h = harvest(**spec)
# pprint(h)
l = -1
try:
try:
# nested
b = h["result"]["aggregations"]["toplevelAgg"]["termAgg"]["buckets"]
except:
# direct
b = h["result"]["aggregations"]["termAgg"]["buckets"]
l = len(b)
if l>0:
print("Success %d for %s" % (l, spec), file=sys.stderr)
q = 5
for i,v in zip(range(q+1), b[0:q]):
print("value %d is %s" % (i, v))
elif l==0:
print("No data for %s" % (spec), file=sys.stderr)
else:
pass
except Exception as e:
print("Nothing happened for %s" % (spec), file=sys.stderr)
print(e, file=sys.stderr)
except Exception as e:
print("Failed during %s" % (spec), file=sys.stderr)
print(e, file=sys.stderr)
"""
POST https://darpamemex:darpamemex@esc.memexproxy.com/dig-ht-latest/offer/_search?search_type=count
{
"query": {
"filtered": {
"query": {
"match_all": {}
},
"filter": {
"nested": {
"path": "itemOffered",
"filter": {
"exists": {
"field": "eyeColor"
}
}
}
}
}
},
"aggs": {
"toplevelAgg": {
"nested": {
"path": "itemOffered"
},
"aggs": {
"termAgg": {
"terms": {
"field": "itemOffered.eyeColor",
"size" : 100
}
}
}
}
}
}
"""
|
usc-isi-i2/graph-keyword-search
|
src/dig/harvest.py
|
Python
|
apache-2.0
| 13,865
|
[
"Amber"
] |
9d1ac6bebc0facfa91b5b43d9005a0404f4d585bb224abf1862167e76483a0cc
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.random_forest import H2ORandomForestEstimator
import random
def random_attack():
def attack(train, valid, x, y):
kwargs = {}
# randomly select parameters and their corresponding values
if random.randint(0,1): kwargs['mtries'] = random.randint(1,len(x))
if random.randint(0,1): kwargs['sample_rate'] = random.random()
if random.randint(0,1): kwargs['build_tree_one_node'] = True
if random.randint(0,1): kwargs['ntrees'] = random.randint(1,10)
if random.randint(0,1): kwargs['max_depth'] = random.randint(1,5)
if random.randint(0,1): kwargs['min_rows'] = random.randint(1,10)
if random.randint(0,1): kwargs['nbins'] = random.randint(2,20)
if random.randint(0,1):
kwargs['balance_classes'] = True
if random.randint(0,1): kwargs['max_after_balance_size'] = random.uniform(0,10)
if random.randint(0,1): kwargs['seed'] = random.randint(1,10000)
do_validation = [True, False][random.randint(0,1)]
# display the parameters and their corresponding values
print "-----------------------"
print "x: {0}".format(x)
print "y: {0}".format(y)
print "validation: {0}".format(do_validation)
for k, v in zip(kwargs.keys(), kwargs.values()): print k + ": {0}".format(v)
if do_validation:
H2ORandomForestEstimator(**kwargs).train(x=x,y=y,training_frame=train,validation_frame=valid)
# h2o.random_forest(x=train[x], y=train[y], validation_x=valid[x], validation_y=valid[y], **kwargs)
else:
H2ORandomForestEstimator(**kwargs).train(x=x,y=y,training_frame=train)
# h2o.random_forest(x=train[x], y=train[y], **kwargs)
print "-----------------------"
print "Import and data munging..."
pros = h2o.upload_file(pyunit_utils.locate("smalldata/prostate/prostate.csv.zip"))
pros[1] = pros[1].asfactor()
pros[4] = pros[4].asfactor()
pros[5] = pros[5].asfactor()
pros[8] = pros[8].asfactor()
r = pros[0].runif() # a column of length pros.nrow with values between 0 and 1
# ~80/20 train/validation split
pros_train = pros[r > .2]
pros_valid = pros[r <= .2]
cars = h2o.upload_file(pyunit_utils.locate("smalldata/junit/cars.csv"))
r = cars[0].runif()
cars_train = cars[r > .2]
cars_valid = cars[r <= .2]
print
print "======================================================================"
print "============================== Binomial =============================="
print "======================================================================"
for i in range(10):
attack(pros_train, pros_valid, random.sample([2,3,4,5,6,7,8],random.randint(1,7)), 1)
print
print "======================================================================"
print "============================== Gaussian =============================="
print "======================================================================"
for i in range(10):
attack(cars_train, cars_valid, random.sample([2,3,4,5,6,7],random.randint(1,6)), 1)
print
print "======================================================================"
print "============================= Multinomial ============================"
print "======================================================================"
cars_train[2] = cars_train[2].asfactor()
cars_valid[2] = cars_valid[2].asfactor()
for i in range(10):
attack(cars_train, cars_valid, random.sample([1,3,4,5,6,7],random.randint(1,6)), 2)
if __name__ == "__main__":
pyunit_utils.standalone_test(random_attack)
else:
random_attack()
|
madmax983/h2o-3
|
h2o-py/tests/testdir_algos/rf/pyunit_random_attack_medium.py
|
Python
|
apache-2.0
| 3,804
|
[
"Gaussian"
] |
aaf4c5add0b48a6ef3730a7df7b346030794f1f944677ee3edb49fe8ff804ea2
|
#
# Copyright (C) 2006-2007 Cooper Street Innovations Inc.
# Charles Eidsness <charles@cooper-street.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
"""
This module provides all of basic eispice waveforms for use with
speific device, e.g. V, I, VI.
Basic Waveform Classes:
-- These classes are wrappers around waveforms defined in the
simulator library (written in C)
PWL -- Piece-Wise Linear
PWC -- Piece-Wise Cubic-Spline
SFFM -- Single Frequency FM
Exp -- Exponential Rise and/or Fall
Pulse -- Pulse Train
Gauss -- Pulse Train with Gaussian Edges
Sin -- Sine Wave
SFFM
"""
from numpy import array, double
import units
import simulator_
class PWL(simulator_.PWL_):
"""
Piece-Wise Linear Waveform
-- A 2D curve, points between defined data points are calculated
via linear interpolation.
Example:
>>> import eispice
>>> wave = eispice.PWL([['2n', 4],['12n', 3],['50n', 20],['75n', -20], \
['95n', -22]])
>>> cct = eispice.Circuit('PWL Test')
>>> cct.Rv = eispice.R(1, eispice.GND, 1)
>>> cct.Vx = eispice.V(1, 0, eispice.GND, wave)
>>> cct.tran('0.5n','100n')
>>> cct.check_v(1, 8.815789474e+00, '25n')
True
>>> cct.check_v(1, -2.000000000e+01, '75n')
True
"""
def __init__(self, data):
"""
Arguments:
data -- 2D Array Representing the PWL Curve
"""
data = units.floatList2D(data)
data = data[data[:,0].argsort(),] # sort by first column for simulator
simulator_.PWL_.__init__(self, data)
class PWC(simulator_.PWC_):
"""
Piece-Wise Cubic-Spline Waveform
-- A 2D curve, points between defined data points are calculated
as cubic splines.
Example:
>>> import eispice
>>> wave = eispice.PWC([['2n', 4],['12n', 3],['50n', 20],['75n', -20], \
['95n', -22]])
>>> cct = eispice.Circuit('PWC Test')
>>> cct.Rv = eispice.R(1, eispice.GND, 1)
>>> cct.Vx = eispice.V(1, 0, eispice.GND, wave)
>>> cct.tran('0.5n','100n')
>>> cct.check_v(1, 1.148836888e+01, '25n')
True
>>> cct.check_v(1, -2.000000000e+01, '75n')
True
"""
def __init__(self, data):
"""
Arguments:
data -- 2D Array Representing the PWC Curve
"""
data = units.floatList2D(data)
data = data[data[:,0].argsort(),] # sort by first column for simulator
simulator_.PWC_.__init__(self, data)
class SFFM(simulator_.SFFM_):
"""Single Frequency FM Waveform
Example:
>>> import eispice
>>> wave = eispice.SFFM(1, 4, '100M', 2, '10M')
>>> cct = eispice.Circuit('SFFM Test')
>>> cct.Rv = eispice.R(1, eispice.GND, 1)
>>> cct.Vx = eispice.V(1, 0, eispice.GND, wave)
>>> cct.tran('0.5n','100n')
>>> cct.check_v(1, -2.630296891e+00, '25n')
True
>>> cct.check_v(1, 4.631842886e+00, '75n')
True
"""
def __init__(self, *args):
"""
Arguments:
Vo -- Offset
Va -- Amplitude
Fc -- (optional) Carrier Frequency, default = 1/tstop
MDI -- (optional) Modulation Index, default = 0.0
Fs -- (optional) Signal Frequency, default = 1/tstop
"""
simulator_.SFFM_.__init__(self,*units.floatList1D(args))
class Exp(simulator_.Exp_):
"""Exponential Rise and/or Fall Waveform
Example:
>>> import eispice
>>> wave = eispice.Exp(0, 4, '5n', '2n', '25n', '5n')
>>> cct = eispice.Circuit('Exp Test')
>>> cct.Rv = eispice.R(1, eispice.GND, 1)
>>> cct.Vx = eispice.V(1, 0, eispice.GND, wave)
>>> cct.tran('0.5n','100n')
>>> cct.check_v(1, 3.999818400e+00, '25n')
True
>>> cct.check_v(1, 1.818267660e-04, '75n')
True
"""
def __init__(self, *args):
"""
Arguments:
V1 -- Initial Value
V2 -- Pulsed Value
Td1 -- (optional) Rise Delay Time, default = 0.0
Tau1 -- (optional) Rise Time Constant, default = tstep
Td2 -- (optional) Fall Delay Time, default = td1 + tstep
Tau2 -- (optional) Fall Time Constant, default = tstep
"""
simulator_.Exp_.__init__(self,*units.floatList1D(args))
class Pulse(simulator_.Pulse_):
"""Pulse Train Waveform
Example:
>>> import eispice
>>> wave = eispice.Pulse(4, 8, '10n', '2n', '3n', '5n', '20n')
>>> cct = eispice.Circuit('Pulse Test')
>>> cct.Rv = eispice.R(1, eispice.GND, 1)
>>> cct.Vx = eispice.V(1, 0, eispice.GND, wave)
>>> cct.tran('0.5n','100n')
>>> cct.check_v(1, 4, '25n')
True
>>> cct.check_v(1, 8, '75n')
True
"""
def __init__(self, *args):
"""
Arguments:
V1 -- Initial Value
V2 -- Pulsed Value
Td -- (optional) Delay Time, default = 0.0
Tr -- (optional) Rise Time, default = tstep
Tf -- (optional) Fall Time, default = tstep
PW -- (optional) Pulse Width, default = tstop
Per -- (optional) Period, default = tstop
"""
simulator_.Pulse_.__init__(self,*units.floatList1D(args))
class Gauss(simulator_.Gauss_):
"""Pulse Train Waveform with Gaussian Edges
Example:
>>> import eispice
>>> wave = eispice.Gauss(0, 3.3, '0n', '2n', '5n', '10n', '50n')
>>> cct = eispice.Circuit('Gauss Test')
>>> cct.Rv = eispice.R(1, eispice.GND, 1)
>>> cct.Vx = eispice.V(1, 0, eispice.GND, wave)
>>> cct.tran('0.5n','100n')
>>> cct.check_v(1, 1.517639357e-01, '25n')
True
>>> cct.check_v(1, 3.148220583e+00, '65n')
True
"""
def __init__(self, *args):
"""
Arguments:
V1 -- Initial Value
V2 -- Pulsed Value
Td -- (optional) Delay Time, default = 0.0
Tr -- (optional) Rise Time (20% to 80%), default = tstep
Tf -- (optional) Fall Time (20% to 80%), default = tstep
PW -- (optional) Pulse Width, default = tstop
Per -- (optional) Period, default = tstop
"""
simulator_.Gauss_.__init__(self,*units.floatList1D(args))
class Sin(simulator_.Sin_):
"""Sine Wave Waveform
Example:
>>> import eispice
>>> wave = wave = eispice.Sin(0, 4, '50M', '5n', '10M')
>>> cct = eispice.Circuit('Sin Test')
>>> cct.Rv = eispice.R(1, eispice.GND, 1)
>>> cct.Vx = eispice.V(1, 0, eispice.GND, wave)
>>> cct.tran('0.5n','100n')
>>> cct.check_v(1, -6.561833244e-04, '25n')
True
>>> cct.check_v(1, 1.910792387e-04, '75n')
True
"""
def __init__(self, *args):
"""
Arguments:
Vo --> Offset
Va --> Amplitude
Fc --> (optional) Frequency, default = 1/tstop
Td --> (optional) Delay, default = 0.0
DF --> (optional) Damping Factor, default = 0.0
"""
simulator_.Sin_.__init__(self,*units.floatList1D(args))
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=False)
print('Testing Complete')
|
Narrat/python3-eispice
|
module/waveform.py
|
Python
|
gpl-2.0
| 7,564
|
[
"Gaussian"
] |
d7f1214ceed704b20e4558c3d9de5c21a6bd6e92de1a56807bf2f5f2339ad85f
|
"""
<Filename>
geoip_module.py
<Purpose>
This is the command dictionary for location-related services for seash.
For more information on the command dictionary, see the documentation for
seash_importer.py.
It implements the following commands:
show location
show coordinates
"""
import seash_global_variables
import seash_exceptions
import repyhelper
repyhelper.translate_and_import("geoip_client.repy")
# show location -- Display location information about the nodes
def show_location(input_dict, environment_dict):
if not environment_dict['currenttarget']:
raise seash_exceptions.UserError("Error, command requires a target")
geoip_init_client()
# we should only visit a node once...
printedIPlist = []
for longname in seash_global_variables.targets[environment_dict['currenttarget']]:
thisnodeIP = seash_global_variables.vesselinfo[longname]['IP']
# if we haven't visited this node
if thisnodeIP not in printedIPlist:
printedIPlist.append(thisnodeIP)
try:
location_dict = geoip_record_by_addr(thisnodeIP)
except:
location_dict = None
if location_dict:
print str(seash_global_variables.vesselinfo[longname]['ID'])+'('+str(thisnodeIP)+'): '+geoip_location_str(location_dict)
else:
print str(seash_global_variables.vesselinfo[longname]['ID'])+'('+str(thisnodeIP)+'): Location unknown'
#show coordinates -- Display the latitude & longitude of the nodes
def show_coordinates(input_dict, environment_dict):
if not environment_dict['currenttarget']:
raise seash_exceptions.UserError("Error, command requires a target")
geoip_init_client()
# we should only visit a node once...
printedIPlist = []
for longname in seash_global_variables.targets[environment_dict['currenttarget']]:
thisnodeIP = seash_global_variables.vesselinfo[longname]['IP']
# if we haven't visited this node
if thisnodeIP not in printedIPlist:
printedIPlist.append(thisnodeIP)
location_dict = geoip_record_by_addr(thisnodeIP)
if location_dict:
print str(seash_global_variables.vesselinfo[longname]['ID'])+'('+str(thisnodeIP)+'): ' + str(location_dict['latitude']) + ", " + str(location_dict['longitude'])
else:
print str(seash_global_variables.vesselinfo[longname]['ID'])+'('+str(thisnodeIP)+'): Location unknown'
SHOW_LOCATION_HELPTEXT = """
show location
Uses a geo-IP location service to return information about the position of the
nodes in the current group.
Example:
exampleuser@browsegood !> show ip
192.x.x.2
193.x.x.42
219.x.x.62
exampleuser@browsegood !> show location
%1(192.x.x.2): Location unknown
%3(193.x.x.42): Cesson-svign, France
%4(219.x.x.62): Beijing, China
"""
SHOW_COORDINATES_HELPTEXT = """
show coordinates
Uses a geo-IP location service to get approximate latitude and longitude
information about nodes in the current group.
Example:
exampleuser@browsegood !> show location
%1(192.x.x.2): Location unknown
%3(193.x.x.42): Cesson-svign, France
%4(219.x.x.62): Beijing, China
exampleuser@browsegood !> show coordinates
%1(192.x.x.2): Location unknown
%3(193.x.x.42): 48.1167, 1.6167
%4(219.x.x.62): 39.9289, 116.3883
"""
command_dict = {
'show':{
'children': {
'location':{
'name':'location',
'callback': show_location,
'summary': "Display location information (countries) for the nodes",
'help_text':SHOW_LOCATION_HELPTEXT,
'children': {}},
'coordinates':{
'name':'coordinates',
'callback': show_coordinates,
'summary':'Display the latitude & longitude of the node',
'help_text':SHOW_COORDINATES_HELPTEXT,
'children': {}}
}}
}
help_text = """
GeoIP Module
This module includes commands that provide information regarding vessels'
geographical locations. To get started using this module, acquire several
vessels through the Seattle Clearinghouse, use the 'browse' command, and then
in any group, run either 'show location' or 'show coordinates'.
"""
# This is where the module importer loads the module from
moduledata = {
'command_dict': command_dict,
'help_text': help_text,
'url': None,
}
|
drewwestrick/Repy-Web-Server
|
seattle_repy/modules/geoip/__init__.py
|
Python
|
gpl-2.0
| 4,190
|
[
"VisIt"
] |
78439d2e4706adac3da0d8dc50106d5bf8fb20101fd0674112579e90cd2e0cea
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import unittest2 as unittest
from pymatgen.io.vasp.sets import *
from pymatgen.io.vasp.inputs import Poscar, Incar, Kpoints
from pymatgen import Specie, Lattice, Structure
from pymatgen.util.testing import PymatgenTest
from monty.json import MontyDecoder
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
dec = MontyDecoder()
class MITMPVaspInputSetTest(unittest.TestCase):
def setUp(self):
if "VASP_PSP_DIR" not in os.environ:
os.environ["VASP_PSP_DIR"] = test_dir
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
self.struct = poscar.structure
self.mitparamset = MITVaspInputSet()
self.mitparamset_unsorted = MITVaspInputSet(sort_structure=False)
self.mithseparamset = MITHSEVaspInputSet()
self.paramset = MPVaspInputSet()
self.userparamset = MPVaspInputSet(
user_incar_settings={'MAGMOM': {"Fe": 10, "S": -5, "Mn3+": 100}}
)
self.mitggaparam = MITGGAVaspInputSet()
self.mpstaticparamset = MPStaticVaspInputSet()
self.mpnscfparamsetu = MPNonSCFVaspInputSet(
{"NBANDS": 50}, mode="Uniform")
self.mpnscfparamsetl = MPNonSCFVaspInputSet(
{"NBANDS": 60}, mode="Line")
self.mphseparamset = MPHSEVaspInputSet()
self.mpdielparamset = MPStaticDielectricDFPTVaspInputSet()
def test_get_poscar(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Fe", "Mn"], coords)
s_unsorted = self.mitparamset_unsorted.get_poscar(struct).structure
s_sorted = self.mitparamset.get_poscar(struct).structure
self.assertEqual(s_unsorted[0].specie.symbol, 'Fe')
self.assertEqual(s_sorted[0].specie.symbol, 'Mn')
def test_get_potcar_symbols(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.75, 0.25, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["P", "Fe", "O"], coords)
syms = self.paramset.get_potcar_symbols(struct)
self.assertEqual(syms, ['Fe_pv', 'P', 'O'])
syms = MPVaspInputSet(sort_structure=False).get_potcar_symbols(struct)
self.assertEqual(syms, ['P', 'Fe_pv', 'O'])
def test_false_potcar_hash(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.75, 0.25, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["P", "Fe", "O"], coords)
self.mitparamset.potcar_settings['Fe']['symbol'] = 'Fe_pv'
self.assertRaises(ValueError, self.mitparamset.get_potcar, struct, check_hash=True)
self.mitparamset.potcar_settings['Fe']['symbol'] = 'Fe'
def test_lda_potcar(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["P", "Fe"], coords)
p = MITVaspInputSet(potcar_functional="LDA").get_potcar(struct)
self.assertEqual(p.functional, 'LDA')
def test_get_nelect(self):
coords = [[0]*3, [0.5]*3, [0.75]*3]
lattice = Lattice.cubic(4)
s = Structure(lattice, ['Si', 'Si', 'Fe'], coords)
self.assertAlmostEqual(MITVaspInputSet().get_nelect(s), 16)
def test_get_incar(self):
incar = self.paramset.get_incar(self.struct)
self.assertEqual(incar['LDAUU'], [5.3, 0, 0])
self.assertAlmostEqual(incar['EDIFF'], 0.0012)
incar = self.mitparamset.get_incar(self.struct)
self.assertEqual(incar['LDAUU'], [4.0, 0, 0])
self.assertAlmostEqual(incar['EDIFF'], 0.0012)
incar_gga = self.mitggaparam.get_incar(self.struct)
self.assertNotIn("LDAU", incar_gga)
incar_static = self.mpstaticparamset.get_incar(self.struct)
self.assertEqual(incar_static["NSW"], 0)
incar_nscfl = self.mpnscfparamsetl.get_incar(self.struct)
self.assertEqual(incar_nscfl["NBANDS"], 60)
incar_nscfu = self.mpnscfparamsetu.get_incar(self.struct)
self.assertEqual(incar_nscfu["ISYM"], 0)
incar_hse = self.mphseparamset.get_incar(self.struct)
self.assertEqual(incar_hse['LHFCALC'], True)
self.assertEqual(incar_hse['HFSCREEN'], 0.2)
incar_diel = self.mpdielparamset.get_incar(self.struct)
self.assertEqual(incar_diel['IBRION'], 8)
self.assertEqual(incar_diel['LEPSILON'], True)
si = 14
coords = list()
coords.append(np.array([0, 0, 0]))
coords.append(np.array([0.75, 0.5, 0.75]))
#Silicon structure for testing.
latt = Lattice(np.array([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]]))
struct = Structure(latt, [si, si], coords)
incar = self.paramset.get_incar(struct)
self.assertNotIn("LDAU", incar)
incar = self.mithseparamset.get_incar(self.struct)
self.assertTrue(incar['LHFCALC'])
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Fe", "Mn"], coords)
incar = self.paramset.get_incar(struct)
self.assertNotIn('LDAU', incar)
#check fluorides
struct = Structure(lattice, ["Fe", "F"], coords)
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [5.3, 0])
self.assertEqual(incar['MAGMOM'], [5, 0.6])
struct = Structure(lattice, ["Fe", "F"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [4.0, 0])
#Make sure this works with species.
struct = Structure(lattice, ["Fe2+", "O2-"], coords)
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [5.3, 0])
struct = Structure(lattice, ["Fe", "Mn"], coords,
site_properties={'magmom': (5.2, -4.5)})
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [-4.5, 5.2])
incar = self.mpstaticparamset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [-4.5, 5.2])
incar = self.mitparamset_unsorted.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [5.2, -4.5])
struct = Structure(lattice, [Specie("Fe", 2, {'spin': 4.1}), "Mn"],
coords)
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [5, 4.1])
incar = self.mpnscfparamsetl.get_incar(struct)
self.assertEqual(incar.get('MAGMOM', None), None)
struct = Structure(lattice, ["Mn3+", "Mn4+"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [4, 3])
incar = self.mpnscfparamsetu.get_incar(struct)
self.assertEqual(incar.get('MAGMOM', None), None)
self.assertEqual(self.userparamset.get_incar(struct)['MAGMOM'],
[100, 0.6])
#sulfide vs sulfate test
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.25, 0.5, 0])
struct = Structure(lattice, ["Fe", "Fe", "S"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [1.9, 0])
#Make sure Matproject sulfides are ok.
self.assertNotIn('LDAUU', self.paramset.get_incar(struct))
self.assertNotIn('LDAUU', self.mpstaticparamset.get_incar(struct))
struct = Structure(lattice, ["Fe", "S", "O"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [4.0, 0, 0])
#Make sure Matproject sulfates are ok.
self.assertEqual(self.paramset.get_incar(struct)['LDAUU'], [5.3, 0, 0])
self.assertEqual(self.mpnscfparamsetl.get_incar(struct)['LDAUU'],
[5.3, 0, 0])
self.assertEqual(self.userparamset.get_incar(struct)['MAGMOM'],
[10, -5, 0.6])
def test_optics(self):
self.mpopticsparamset = MPOpticsNonSCFVaspInputSet.from_previous_vasp_run(
'{}/static_silicon'.format(test_dir), output_dir='optics_test_dir',
nedos=1145)
self.assertTrue(os.path.exists('optics_test_dir/CHGCAR'))
incar = Incar.from_file('optics_test_dir/INCAR')
self.assertTrue(incar['LOPTICS'])
self.assertEqual(incar['NEDOS'], 1145)
#Remove the directory in which the inputs have been created
shutil.rmtree('optics_test_dir')
def test_get_kpoints(self):
kpoints = self.paramset.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[2, 4, 6]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Monkhorst)
kpoints = self.mitparamset.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[2, 4, 6]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Monkhorst)
kpoints = self.mpstaticparamset.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[6, 6, 4]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Monkhorst)
kpoints = self.mpnscfparamsetl.get_kpoints(self.struct)
self.assertEqual(kpoints.num_kpts, 140)
self.assertEqual(kpoints.style, Kpoints.supported_modes.Reciprocal)
kpoints = self.mpnscfparamsetu.get_kpoints(self.struct)
self.assertEqual(kpoints.num_kpts, 168)
recip_paramset = MPVaspInputSet(force_gamma=True)
recip_paramset.kpoints_settings = {"reciprocal_density": 40}
kpoints = recip_paramset.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[2, 4, 6]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
def test_get_all_vasp_input(self):
d = self.mitparamset.get_all_vasp_input(self.struct)
self.assertEqual(d["INCAR"]["ISMEAR"], -5)
self.struct.make_supercell(4)
d = self.mitparamset.get_all_vasp_input(self.struct)
self.assertEqual(d["INCAR"]["ISMEAR"], 0)
def test_to_from_dict(self):
self.mitparamset = MITVaspInputSet()
self.mithseparamset = MITHSEVaspInputSet()
self.paramset = MPVaspInputSet()
self.userparamset = MPVaspInputSet(
user_incar_settings={'MAGMOM': {"Fe": 10, "S": -5, "Mn3+": 100}}
)
d = self.mitparamset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LDAUU"]["O"]["Fe"], 4)
d = self.mitggaparam.as_dict()
v = dec.process_decoded(d)
self.assertNotIn("LDAUU", v.incar_settings)
d = self.mithseparamset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LHFCALC"], True)
d = self.mphseparamset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LHFCALC"], True)
d = self.paramset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LDAUU"]["O"]["Fe"], 5.3)
d = self.userparamset.as_dict()
v = dec.process_decoded(d)
#self.assertEqual(type(v), MPVaspInputSet)
self.assertEqual(v.incar_settings["MAGMOM"],
{"Fe": 10, "S": -5, "Mn3+": 100})
class MITMDVaspInputSetTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
self.struct = poscar.structure
self.mitmdparam = MITMDVaspInputSet(300, 1200, 10000)
def test_get_potcar_symbols(self):
syms = self.mitmdparam.get_potcar_symbols(self.struct)
self.assertEqual(syms, ['Fe', 'P', 'O'])
def test_get_incar(self):
incar = self.mitmdparam.get_incar(self.struct)
self.assertNotIn("LDAUU", incar)
self.assertAlmostEqual(incar['EDIFF'], 2.4e-5)
def test_get_kpoints(self):
kpoints = self.mitmdparam.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [(1, 1, 1)])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
def test_to_from_dict(self):
d = self.mitmdparam.as_dict()
v = dec.process_decoded(d)
self.assertEqual(type(v), MITMDVaspInputSet)
self.assertEqual(v.incar_settings["TEBEG"], 300)
class MITNEBVaspInputSetTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
self.struct = poscar.structure
self.vis = MITNEBVaspInputSet(nimages=10, hubbard_off=True)
def test_get_potcar_symbols(self):
syms = self.vis.get_potcar_symbols(self.struct)
self.assertEqual(syms, ['Fe', 'P', 'O'])
def test_get_incar(self):
incar = self.vis.get_incar(self.struct)
self.assertNotIn("LDAUU", incar)
self.assertAlmostEqual(incar['EDIFF'], 0.00005)
def test_get_kpoints(self):
kpoints = self.vis.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[2, 4, 6]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Monkhorst)
def test_to_from_dict(self):
d = self.vis.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["IMAGES"], 10)
def test_write_inputs(self):
c1 = [[0.5] * 3, [0.9] * 3]
c2 = [[0.5] * 3, [0.9, 0.1, 0.1]]
s1 = Structure(Lattice.cubic(5), ['Si', 'Si'], c1)
s2 = Structure(Lattice.cubic(5), ['Si', 'Si'], c2)
structs = []
for s in s1.interpolate(s2, 3, pbc=True):
structs.append(Structure.from_sites(s.sites, to_unit_cell=True))
fc = self.vis._process_structures(structs)[2].frac_coords
self.assertTrue(np.allclose(fc, [[0.5]*3,[0.9, 1.033333, 1.0333333]]))
if __name__ == '__main__':
unittest.main()
|
Bismarrck/pymatgen
|
pymatgen/io/vasp/tests/test_sets_deprecated.py
|
Python
|
mit
| 15,130
|
[
"VASP",
"pymatgen"
] |
a239b74d57ed2c176d61fc7e8ecc439496e701d18970e82ea53057f930e0b08d
|
#
# QAPI visitor generator
#
# Copyright IBM, Corp. 2011
#
# Authors:
# Anthony Liguori <aliguori@us.ibm.com>
# Michael Roth <mdroth@linux.vnet.ibm.com>
#
# This work is licensed under the terms of the GNU GPLv2.
# See the COPYING.LIB file in the top-level directory.
from ordereddict import OrderedDict
from qapi import *
import sys
import os
import getopt
import errno
def generate_visit_struct_body(field_prefix, name, members):
ret = mcgen('''
if (!error_is_set(errp)) {
''')
push_indent()
if len(field_prefix):
field_prefix = field_prefix + "."
ret += mcgen('''
Error **errp = &err; /* from outer scope */
Error *err = NULL;
visit_start_struct(m, NULL, "", "%(name)s", 0, &err);
''',
name=name)
else:
ret += mcgen('''
Error *err = NULL;
visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(name)s), &err);
''',
name=name)
ret += mcgen('''
if (!err) {
if (!obj || *obj) {
''')
push_indent()
push_indent()
for argname, argentry, optional, structured in parse_args(members):
if optional:
ret += mcgen('''
visit_start_optional(m, obj ? &(*obj)->%(c_prefix)shas_%(c_name)s : NULL, "%(name)s", &err);
if (obj && (*obj)->%(prefix)shas_%(c_name)s) {
''',
c_prefix=c_var(field_prefix), prefix=field_prefix,
c_name=c_var(argname), name=argname)
push_indent()
if structured:
ret += generate_visit_struct_body(field_prefix + argname, argname, argentry)
else:
ret += mcgen('''
visit_type_%(type)s(m, obj ? &(*obj)->%(c_prefix)s%(c_name)s : NULL, "%(name)s", &err);
''',
c_prefix=c_var(field_prefix), prefix=field_prefix,
type=type_name(argentry), c_name=c_var(argname),
name=argname)
if optional:
pop_indent()
ret += mcgen('''
}
visit_end_optional(m, &err);
''')
pop_indent()
ret += mcgen('''
error_propagate(errp, err);
err = NULL;
}
''')
pop_indent()
pop_indent()
ret += mcgen('''
/* Always call end_struct if start_struct succeeded. */
visit_end_struct(m, &err);
}
error_propagate(errp, err);
}
''')
return ret
def generate_visit_struct(name, members):
ret = mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp)
{
''',
name=name)
push_indent()
ret += generate_visit_struct_body("", name, members)
pop_indent()
ret += mcgen('''
}
''')
return ret
def generate_visit_list(name, members):
return mcgen('''
void visit_type_%(name)sList(Visitor *m, %(name)sList ** obj, const char *name, Error **errp)
{
GenericList *i, **prev = (GenericList **)obj;
Error *err = NULL;
if (!error_is_set(errp)) {
visit_start_list(m, name, &err);
if (!err) {
for (; (i = visit_next_list(m, prev, &err)) != NULL; prev = &i) {
%(name)sList *native_i = (%(name)sList *)i;
visit_type_%(name)s(m, &native_i->value, NULL, &err);
}
error_propagate(errp, err);
err = NULL;
/* Always call end_list if start_list succeeded. */
visit_end_list(m, &err);
}
error_propagate(errp, err);
}
}
''',
name=name)
def generate_visit_enum(name, members):
return mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s * obj, const char *name, Error **errp)
{
visit_type_enum(m, (int *)obj, %(name)s_lookup, "%(name)s", name, errp);
}
''',
name=name)
def generate_visit_union(name, members):
ret = generate_visit_enum('%sKind' % name, members.keys())
ret += mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp)
{
Error *err = NULL;
if (!error_is_set(errp)) {
visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(name)s), &err);
if (!err) {
if (obj && *obj) {
visit_type_%(name)sKind(m, &(*obj)->kind, "type", &err);
if (!err) {
switch ((*obj)->kind) {
''',
name=name)
push_indent()
push_indent()
for key in members:
ret += mcgen('''
case %(abbrev)s_KIND_%(enum)s:
visit_type_%(c_type)s(m, &(*obj)->%(c_name)s, "data", &err);
break;
''',
abbrev = de_camel_case(name).upper(),
enum = c_fun(de_camel_case(key)).upper(),
c_type=members[key],
c_name=c_fun(key))
ret += mcgen('''
default:
abort();
}
}
error_propagate(errp, err);
err = NULL;
}
''')
pop_indent()
ret += mcgen('''
/* Always call end_struct if start_struct succeeded. */
visit_end_struct(m, &err);
}
error_propagate(errp, err);
}
''')
pop_indent();
ret += mcgen('''
}
''')
return ret
def generate_declaration(name, members, genlist=True):
ret = mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp);
''',
name=name)
if genlist:
ret += mcgen('''
void visit_type_%(name)sList(Visitor *m, %(name)sList ** obj, const char *name, Error **errp);
''',
name=name)
return ret
def generate_enum_declaration(name, members, genlist=True):
ret = ""
if genlist:
ret += mcgen('''
void visit_type_%(name)sList(Visitor *m, %(name)sList ** obj, const char *name, Error **errp);
''',
name=name)
return ret
def generate_decl_enum(name, members, genlist=True):
return mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s * obj, const char *name, Error **errp);
''',
name=name)
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "chp:o:",
["source", "header", "prefix=", "output-dir="])
except getopt.GetoptError, err:
print str(err)
sys.exit(1)
output_dir = ""
prefix = ""
c_file = 'qapi-visit.c'
h_file = 'qapi-visit.h'
do_c = False
do_h = False
for o, a in opts:
if o in ("-p", "--prefix"):
prefix = a
elif o in ("-o", "--output-dir"):
output_dir = a + "/"
elif o in ("-c", "--source"):
do_c = True
elif o in ("-h", "--header"):
do_h = True
if not do_c and not do_h:
do_c = True
do_h = True
c_file = output_dir + prefix + c_file
h_file = output_dir + prefix + h_file
try:
os.makedirs(output_dir)
except os.error, e:
if e.errno != errno.EEXIST:
raise
def maybe_open(really, name, opt):
if really:
return open(name, opt)
else:
import StringIO
return StringIO.StringIO()
fdef = maybe_open(do_c, c_file, 'w')
fdecl = maybe_open(do_h, h_file, 'w')
fdef.write(mcgen('''
/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QAPI visitor functions
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#include "%(header)s"
''',
header=basename(h_file)))
fdecl.write(mcgen('''
/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QAPI visitor function
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#ifndef %(guard)s
#define %(guard)s
#include "qapi/qapi-visit-core.h"
#include "%(prefix)sqapi-types.h"
''',
prefix=prefix, guard=guardname(h_file)))
exprs = parse_schema(sys.stdin)
for expr in exprs:
if expr.has_key('type'):
ret = generate_visit_struct(expr['type'], expr['data'])
ret += generate_visit_list(expr['type'], expr['data'])
fdef.write(ret)
ret = generate_declaration(expr['type'], expr['data'])
fdecl.write(ret)
elif expr.has_key('union'):
ret = generate_visit_union(expr['union'], expr['data'])
ret += generate_visit_list(expr['union'], expr['data'])
fdef.write(ret)
ret = generate_decl_enum('%sKind' % expr['union'], expr['data'].keys())
ret += generate_declaration(expr['union'], expr['data'])
fdecl.write(ret)
elif expr.has_key('enum'):
ret = generate_visit_list(expr['enum'], expr['data'])
ret += generate_visit_enum(expr['enum'], expr['data'])
fdef.write(ret)
ret = generate_decl_enum(expr['enum'], expr['data'])
ret += generate_enum_declaration(expr['enum'], expr['data'])
fdecl.write(ret)
fdecl.write('''
#endif
''')
fdecl.flush()
fdecl.close()
fdef.flush()
fdef.close()
|
asias/qemu
|
scripts/qapi-visit.py
|
Python
|
gpl-2.0
| 9,031
|
[
"VisIt"
] |
c246e613759ea58fde40a111e57216743d481b40edd139c12ea1c9a13f0addfb
|
"""Analysis of text input into executable blocks.
The main class in this module, :class:`InputSplitter`, is designed to break
input from either interactive, line-by-line environments or block-based ones,
into standalone blocks that can be executed by Python as 'single' statements
(thus triggering sys.displayhook).
A companion, :class:`IPythonInputSplitter`, provides the same functionality but
with full support for the extended IPython syntax (magics, system calls, etc).
For more details, see the class docstring below.
Syntax Transformations
----------------------
One of the main jobs of the code in this file is to apply all syntax
transformations that make up 'the IPython language', i.e. magics, shell
escapes, etc. All transformations should be implemented as *fully stateless*
entities, that simply take one line as their input and return a line.
Internally for implementation purposes they may be a normal function or a
callable object, but the only input they receive will be a single line and they
should only return a line, without holding any data-dependent state between
calls.
As an example, the EscapedTransformer is a class so we can more clearly group
together the functionality of dispatching to individual functions based on the
starting escape character, but the only method for public use is its call
method.
ToDo
----
- Should we make push() actually raise an exception once push_accepts_more()
returns False?
- Naming cleanups. The tr_* names aren't the most elegant, though now they are
at least just attributes of a class so not really very exposed.
- Think about the best way to support dynamic things: automagic, autocall,
macros, etc.
- Think of a better heuristic for the application of the transforms in
IPythonInputSplitter.push() than looking at the buffer ending in ':'. Idea:
track indentation change events (indent, dedent, nothing) and apply them only
if the indentation went up, but not otherwise.
- Think of the cleanest way for supporting user-specified transformations (the
user prefilters we had before).
Authors
-------
* Fernando Perez
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib
import ast
import codeop
import re
import sys
import tokenize
from StringIO import StringIO
# IPython modules
from IPython.core.splitinput import split_user_input, LineInfo
from IPython.utils.py3compat import cast_unicode
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# The escape sequences that define the syntax transformations IPython will
# apply to user input. These can NOT be just changed here: many regular
# expressions and other parts of the code may use their hardcoded values, and
# for all intents and purposes they constitute the 'IPython syntax', so they
# should be considered fixed.
ESC_SHELL = '!' # Send line to underlying system shell
ESC_SH_CAP = '!!' # Send line to system shell and capture output
ESC_HELP = '?' # Find information about object
ESC_HELP2 = '??' # Find extra-detailed information about object
ESC_MAGIC = '%' # Call magic function
ESC_MAGIC2 = '%%' # Call cell-magic function
ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
ESC_QUOTE2 = ';' # Quote all args as a single string, call
ESC_PAREN = '/' # Call first argument with rest of line as arguments
ESC_SEQUENCES = [ESC_SHELL, ESC_SH_CAP, ESC_HELP ,\
ESC_HELP2, ESC_MAGIC, ESC_MAGIC2,\
ESC_QUOTE, ESC_QUOTE2, ESC_PAREN ]
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
# FIXME: These are general-purpose utilities that later can be moved to the
# general ward. Kept here for now because we're being very strict about test
# coverage with this code, and this lets us ensure that we keep 100% coverage
# while developing.
# compiled regexps for autoindent management
dedent_re = re.compile('|'.join([
r'^\s+raise(\s.*)?$', # raise statement (+ space + other stuff, maybe)
r'^\s+raise\([^\)]*\).*$', # wacky raise with immediate open paren
r'^\s+return(\s.*)?$', # normal return (+ space + other stuff, maybe)
r'^\s+return\([^\)]*\).*$', # wacky return with immediate open paren
r'^\s+pass\s*$' # pass (optionally followed by trailing spaces)
]))
ini_spaces_re = re.compile(r'^([ \t\r\f\v]+)')
# regexp to match pure comment lines so we don't accidentally insert 'if 1:'
# before pure comments
comment_line_re = re.compile('^\s*\#')
def num_ini_spaces(s):
"""Return the number of initial spaces in a string.
Note that tabs are counted as a single space. For now, we do *not* support
mixing of tabs and spaces in the user's input.
Parameters
----------
s : string
Returns
-------
n : int
"""
ini_spaces = ini_spaces_re.match(s)
if ini_spaces:
return ini_spaces.end()
else:
return 0
def last_blank(src):
"""Determine if the input source ends in a blank.
A blank is either a newline or a line consisting of whitespace.
Parameters
----------
src : string
A single or multiline string.
"""
if not src: return False
ll = src.splitlines()[-1]
return (ll == '') or ll.isspace()
last_two_blanks_re = re.compile(r'\n\s*\n\s*$', re.MULTILINE)
last_two_blanks_re2 = re.compile(r'.+\n\s*\n\s+$', re.MULTILINE)
def last_two_blanks(src):
"""Determine if the input source ends in two blanks.
A blank is either a newline or a line consisting of whitespace.
Parameters
----------
src : string
A single or multiline string.
"""
if not src: return False
# The logic here is tricky: I couldn't get a regexp to work and pass all
# the tests, so I took a different approach: split the source by lines,
# grab the last two and prepend '###\n' as a stand-in for whatever was in
# the body before the last two lines. Then, with that structure, it's
# possible to analyze with two regexps. Not the most elegant solution, but
# it works. If anyone tries to change this logic, make sure to validate
# the whole test suite first!
new_src = '\n'.join(['###\n'] + src.splitlines()[-2:])
return (bool(last_two_blanks_re.match(new_src)) or
bool(last_two_blanks_re2.match(new_src)) )
def remove_comments(src):
"""Remove all comments from input source.
Note: comments are NOT recognized inside of strings!
Parameters
----------
src : string
A single or multiline input string.
Returns
-------
String with all Python comments removed.
"""
return re.sub('#.*', '', src)
def has_comment(src):
"""Indicate whether an input line has (i.e. ends in, or is) a comment.
This uses tokenize, so it can distinguish comments from # inside strings.
Parameters
----------
src : string
A single line input string.
Returns
-------
Boolean: True if source has a comment.
"""
readline = StringIO(src).readline
toktypes = set()
try:
for t in tokenize.generate_tokens(readline):
toktypes.add(t[0])
except tokenize.TokenError:
pass
return(tokenize.COMMENT in toktypes)
def get_input_encoding():
"""Return the default standard input encoding.
If sys.stdin has no encoding, 'ascii' is returned."""
# There are strange environments for which sys.stdin.encoding is None. We
# ensure that a valid encoding is returned.
encoding = getattr(sys.stdin, 'encoding', None)
if encoding is None:
encoding = 'ascii'
return encoding
#-----------------------------------------------------------------------------
# Classes and functions for normal Python syntax handling
#-----------------------------------------------------------------------------
class InputSplitter(object):
"""An object that can accumulate lines of Python source before execution.
This object is designed to be fed python source line-by-line, using
:meth:`push`. It will return on each push whether the currently pushed
code could be executed already. In addition, it provides a method called
:meth:`push_accepts_more` that can be used to query whether more input
can be pushed into a single interactive block.
This is a simple example of how an interactive terminal-based client can use
this tool::
isp = InputSplitter()
while isp.push_accepts_more():
indent = ' '*isp.indent_spaces
prompt = '>>> ' + indent
line = indent + raw_input(prompt)
isp.push(line)
print 'Input source was:\n', isp.source_reset(),
"""
# Number of spaces of indentation computed from input that has been pushed
# so far. This is the attributes callers should query to get the current
# indentation level, in order to provide auto-indent facilities.
indent_spaces = 0
# String, indicating the default input encoding. It is computed by default
# at initialization time via get_input_encoding(), but it can be reset by a
# client with specific knowledge of the encoding.
encoding = ''
# String where the current full source input is stored, properly encoded.
# Reading this attribute is the normal way of querying the currently pushed
# source code, that has been properly encoded.
source = ''
# Code object corresponding to the current source. It is automatically
# synced to the source, so it can be queried at any time to obtain the code
# object; it will be None if the source doesn't compile to valid Python.
code = None
# Input mode
input_mode = 'line'
# Private attributes
# List with lines of input accumulated so far
_buffer = None
# Command compiler
_compile = None
# Mark when input has changed indentation all the way back to flush-left
_full_dedent = False
# Boolean indicating whether the current block is complete
_is_complete = None
def __init__(self, input_mode=None):
"""Create a new InputSplitter instance.
Parameters
----------
input_mode : str
One of ['line', 'cell']; default is 'line'.
The input_mode parameter controls how new inputs are used when fed via
the :meth:`push` method:
- 'line': meant for line-oriented clients, inputs are appended one at a
time to the internal buffer and the whole buffer is compiled.
- 'cell': meant for clients that can edit multi-line 'cells' of text at
a time. A cell can contain one or more blocks that can be compile in
'single' mode by Python. In this mode, each new input new input
completely replaces all prior inputs. Cell mode is thus equivalent
to prepending a full reset() to every push() call.
"""
self._buffer = []
self._compile = codeop.CommandCompiler()
self.encoding = get_input_encoding()
self.input_mode = InputSplitter.input_mode if input_mode is None \
else input_mode
def reset(self):
"""Reset the input buffer and associated state."""
self.indent_spaces = 0
self._buffer[:] = []
self.source = ''
self.code = None
self._is_complete = False
self._full_dedent = False
def source_reset(self):
"""Return the input source and perform a full reset.
"""
out = self.source
self.reset()
return out
def push(self, lines):
"""Push one or more lines of input.
This stores the given lines and returns a status code indicating
whether the code forms a complete Python block or not.
Any exceptions generated in compilation are swallowed, but if an
exception was produced, the method returns True.
Parameters
----------
lines : string
One or more lines of Python input.
Returns
-------
is_complete : boolean
True if the current input source (the result of the current input
plus prior inputs) forms a complete Python execution block. Note that
this value is also stored as a private attribute (``_is_complete``), so it
can be queried at any time.
"""
if self.input_mode == 'cell':
self.reset()
self._store(lines)
source = self.source
# Before calling _compile(), reset the code object to None so that if an
# exception is raised in compilation, we don't mislead by having
# inconsistent code/source attributes.
self.code, self._is_complete = None, None
# Honor termination lines properly
if source.rstrip().endswith('\\'):
return False
self._update_indent(lines)
try:
self.code = self._compile(source, symbol="exec")
# Invalid syntax can produce any of a number of different errors from
# inside the compiler, so we have to catch them all. Syntax errors
# immediately produce a 'ready' block, so the invalid Python can be
# sent to the kernel for evaluation with possible ipython
# special-syntax conversion.
except (SyntaxError, OverflowError, ValueError, TypeError,
MemoryError):
self._is_complete = True
else:
# Compilation didn't produce any exceptions (though it may not have
# given a complete code object)
self._is_complete = self.code is not None
return self._is_complete
def push_accepts_more(self):
"""Return whether a block of interactive input can accept more input.
This method is meant to be used by line-oriented frontends, who need to
guess whether a block is complete or not based solely on prior and
current input lines. The InputSplitter considers it has a complete
interactive block and will not accept more input only when either a
SyntaxError is raised, or *all* of the following are true:
1. The input compiles to a complete statement.
2. The indentation level is flush-left (because if we are indented,
like inside a function definition or for loop, we need to keep
reading new input).
3. There is one extra line consisting only of whitespace.
Because of condition #3, this method should be used only by
*line-oriented* frontends, since it means that intermediate blank lines
are not allowed in function definitions (or any other indented block).
If the current input produces a syntax error, this method immediately
returns False but does *not* raise the syntax error exception, as
typically clients will want to send invalid syntax to an execution
backend which might convert the invalid syntax into valid Python via
one of the dynamic IPython mechanisms.
"""
# With incomplete input, unconditionally accept more
if not self._is_complete:
return True
# If we already have complete input and we're flush left, the answer
# depends. In line mode, if there hasn't been any indentation,
# that's it. If we've come back from some indentation, we need
# the blank final line to finish.
# In cell mode, we need to check how many blocks the input so far
# compiles into, because if there's already more than one full
# independent block of input, then the client has entered full
# 'cell' mode and is feeding lines that each is complete. In this
# case we should then keep accepting. The Qt terminal-like console
# does precisely this, to provide the convenience of terminal-like
# input of single expressions, but allowing the user (with a
# separate keystroke) to switch to 'cell' mode and type multiple
# expressions in one shot.
if self.indent_spaces==0:
if self.input_mode=='line':
if not self._full_dedent:
return False
else:
try:
code_ast = ast.parse(u''.join(self._buffer))
except Exception:
return False
else:
if len(code_ast.body) == 1:
return False
# When input is complete, then termination is marked by an extra blank
# line at the end.
last_line = self.source.splitlines()[-1]
return bool(last_line and not last_line.isspace())
#------------------------------------------------------------------------
# Private interface
#------------------------------------------------------------------------
def _find_indent(self, line):
"""Compute the new indentation level for a single line.
Parameters
----------
line : str
A single new line of non-whitespace, non-comment Python input.
Returns
-------
indent_spaces : int
New value for the indent level (it may be equal to self.indent_spaces
if indentation doesn't change.
full_dedent : boolean
Whether the new line causes a full flush-left dedent.
"""
indent_spaces = self.indent_spaces
full_dedent = self._full_dedent
inisp = num_ini_spaces(line)
if inisp < indent_spaces:
indent_spaces = inisp
if indent_spaces <= 0:
#print 'Full dedent in text',self.source # dbg
full_dedent = True
if line.rstrip()[-1] == ':':
indent_spaces += 4
elif dedent_re.match(line):
indent_spaces -= 4
if indent_spaces <= 0:
full_dedent = True
# Safety
if indent_spaces < 0:
indent_spaces = 0
#print 'safety' # dbg
return indent_spaces, full_dedent
def _update_indent(self, lines):
for line in remove_comments(lines).splitlines():
if line and not line.isspace():
self.indent_spaces, self._full_dedent = self._find_indent(line)
def _store(self, lines, buffer=None, store='source'):
"""Store one or more lines of input.
If input lines are not newline-terminated, a newline is automatically
appended."""
if buffer is None:
buffer = self._buffer
if lines.endswith('\n'):
buffer.append(lines)
else:
buffer.append(lines+'\n')
setattr(self, store, self._set_source(buffer))
def _set_source(self, buffer):
return u''.join(buffer)
#-----------------------------------------------------------------------------
# Functions and classes for IPython-specific syntactic support
#-----------------------------------------------------------------------------
# The escaped translators ALL receive a line where their own escape has been
# stripped. Only '?' is valid at the end of the line, all others can only be
# placed at the start.
# Transformations of the special syntaxes that don't rely on an explicit escape
# character but instead on patterns on the input line
# The core transformations are implemented as standalone functions that can be
# tested and validated in isolation. Each of these uses a regexp, we
# pre-compile these and keep them close to each function definition for clarity
_assign_system_re = re.compile(r'(?P<lhs>(\s*)([\w\.]+)((\s*,\s*[\w\.]+)*))'
r'\s*=\s*!\s*(?P<cmd>.*)')
def transform_assign_system(line):
"""Handle the `files = !ls` syntax."""
m = _assign_system_re.match(line)
if m is not None:
cmd = m.group('cmd')
lhs = m.group('lhs')
new_line = '%s = get_ipython().getoutput(%r)' % (lhs, cmd)
return new_line
return line
_assign_magic_re = re.compile(r'(?P<lhs>(\s*)([\w\.]+)((\s*,\s*[\w\.]+)*))'
r'\s*=\s*%\s*(?P<cmd>.*)')
def transform_assign_magic(line):
"""Handle the `a = %who` syntax."""
m = _assign_magic_re.match(line)
if m is not None:
cmd = m.group('cmd')
lhs = m.group('lhs')
new_line = '%s = get_ipython().magic(%r)' % (lhs, cmd)
return new_line
return line
_classic_prompt_re = re.compile(r'^([ \t]*>>> |^[ \t]*\.\.\. )')
def transform_classic_prompt(line):
"""Handle inputs that start with '>>> ' syntax."""
if not line or line.isspace():
return line
m = _classic_prompt_re.match(line)
if m:
return line[len(m.group(0)):]
else:
return line
_ipy_prompt_re = re.compile(r'^([ \t]*In \[\d+\]: |^[ \t]*\ \ \ \.\.\.+: )')
def transform_ipy_prompt(line):
"""Handle inputs that start classic IPython prompt syntax."""
if not line or line.isspace():
return line
#print 'LINE: %r' % line # dbg
m = _ipy_prompt_re.match(line)
if m:
#print 'MATCH! %r -> %r' % (line, line[len(m.group(0)):]) # dbg
return line[len(m.group(0)):]
else:
return line
def _make_help_call(target, esc, lspace, next_input=None):
"""Prepares a pinfo(2)/psearch call from a target name and the escape
(i.e. ? or ??)"""
method = 'pinfo2' if esc == '??' \
else 'psearch' if '*' in target \
else 'pinfo'
arg = " ".join([method, target])
if next_input is None:
return '%sget_ipython().magic(%r)' % (lspace, arg)
else:
return '%sget_ipython().set_next_input(%r);get_ipython().magic(%r)' % \
(lspace, next_input, arg)
_initial_space_re = re.compile(r'\s*')
_help_end_re = re.compile(r"""(%{0,2}
[a-zA-Z_*][\w*]* # Variable name
(\.[a-zA-Z_*][\w*]*)* # .etc.etc
)
(\?\??)$ # ? or ??""",
re.VERBOSE)
def transform_help_end(line):
"""Translate lines with ?/?? at the end"""
m = _help_end_re.search(line)
if m is None or has_comment(line):
return line
target = m.group(1)
esc = m.group(3)
lspace = _initial_space_re.match(line).group(0)
# If we're mid-command, put it back on the next prompt for the user.
next_input = line.rstrip('?') if line.strip() != m.group(0) else None
return _make_help_call(target, esc, lspace, next_input)
class EscapedTransformer(object):
"""Class to transform lines that are explicitly escaped out."""
def __init__(self):
tr = { ESC_SHELL : self._tr_system,
ESC_SH_CAP : self._tr_system2,
ESC_HELP : self._tr_help,
ESC_HELP2 : self._tr_help,
ESC_MAGIC : self._tr_magic,
ESC_QUOTE : self._tr_quote,
ESC_QUOTE2 : self._tr_quote2,
ESC_PAREN : self._tr_paren }
self.tr = tr
# Support for syntax transformations that use explicit escapes typed by the
# user at the beginning of a line
@staticmethod
def _tr_system(line_info):
"Translate lines escaped with: !"
cmd = line_info.line.lstrip().lstrip(ESC_SHELL)
return '%sget_ipython().system(%r)' % (line_info.pre, cmd)
@staticmethod
def _tr_system2(line_info):
"Translate lines escaped with: !!"
cmd = line_info.line.lstrip()[2:]
return '%sget_ipython().getoutput(%r)' % (line_info.pre, cmd)
@staticmethod
def _tr_help(line_info):
"Translate lines escaped with: ?/??"
# A naked help line should just fire the intro help screen
if not line_info.line[1:]:
return 'get_ipython().show_usage()'
return _make_help_call(line_info.ifun, line_info.esc, line_info.pre)
@staticmethod
def _tr_magic(line_info):
"Translate lines escaped with: %"
tpl = '%sget_ipython().magic(%r)'
cmd = ' '.join([line_info.ifun, line_info.the_rest]).strip()
return tpl % (line_info.pre, cmd)
@staticmethod
def _tr_quote(line_info):
"Translate lines escaped with: ,"
return '%s%s("%s")' % (line_info.pre, line_info.ifun,
'", "'.join(line_info.the_rest.split()) )
@staticmethod
def _tr_quote2(line_info):
"Translate lines escaped with: ;"
return '%s%s("%s")' % (line_info.pre, line_info.ifun,
line_info.the_rest)
@staticmethod
def _tr_paren(line_info):
"Translate lines escaped with: /"
return '%s%s(%s)' % (line_info.pre, line_info.ifun,
", ".join(line_info.the_rest.split()))
def __call__(self, line):
"""Class to transform lines that are explicitly escaped out.
This calls the above _tr_* static methods for the actual line
translations."""
# Empty lines just get returned unmodified
if not line or line.isspace():
return line
# Get line endpoints, where the escapes can be
line_info = LineInfo(line)
if not line_info.esc in self.tr:
# If we don't recognize the escape, don't modify the line
return line
return self.tr[line_info.esc](line_info)
# A function-looking object to be used by the rest of the code. The purpose of
# the class in this case is to organize related functionality, more than to
# manage state.
transform_escaped = EscapedTransformer()
class IPythonInputSplitter(InputSplitter):
"""An input splitter that recognizes all of IPython's special syntax."""
# String with raw, untransformed input.
source_raw = ''
# Flag to track when we're in the middle of processing a cell magic, since
# the logic has to change. In that case, we apply no transformations at
# all.
processing_cell_magic = False
# Storage for all blocks of input that make up a cell magic
cell_magic_parts = []
# Private attributes
# List with lines of raw input accumulated so far.
_buffer_raw = None
def __init__(self, input_mode=None):
super(IPythonInputSplitter, self).__init__(input_mode)
self._buffer_raw = []
self._validate = True
def reset(self):
"""Reset the input buffer and associated state."""
super(IPythonInputSplitter, self).reset()
self._buffer_raw[:] = []
self.source_raw = ''
self.cell_magic_parts = []
self.processing_cell_magic = False
def source_raw_reset(self):
"""Return input and raw source and perform a full reset.
"""
out = self.source
out_r = self.source_raw
self.reset()
return out, out_r
def push_accepts_more(self):
if self.processing_cell_magic:
return not self._is_complete
else:
return super(IPythonInputSplitter, self).push_accepts_more()
def _handle_cell_magic(self, lines):
"""Process lines when they start with %%, which marks cell magics.
"""
self.processing_cell_magic = True
first, _, body = lines.partition('\n')
magic_name, _, line = first.partition(' ')
magic_name = magic_name.lstrip(ESC_MAGIC)
# We store the body of the cell and create a call to a method that
# will use this stored value. This is ugly, but it's a first cut to
# get it all working, as right now changing the return API of our
# methods would require major refactoring.
self.cell_magic_parts = [body]
tpl = 'get_ipython()._run_cached_cell_magic(%r, %r)'
tlines = tpl % (magic_name, line)
self._store(tlines)
self._store(lines, self._buffer_raw, 'source_raw')
# We can actually choose whether to allow for single blank lines here
# during input for clients that use cell mode to decide when to stop
# pushing input (currently only the Qt console).
# My first implementation did that, and then I realized it wasn't
# consistent with the terminal behavior, so I've reverted it to one
# line. But I'm leaving it here so we can easily test both behaviors,
# I kind of liked having full blank lines allowed in the cell magics...
#self._is_complete = last_two_blanks(lines)
self._is_complete = last_blank(lines)
return self._is_complete
def _line_mode_cell_append(self, lines):
"""Append new content for a cell magic in line mode.
"""
# Only store the raw input. Lines beyond the first one are only only
# stored for history purposes; for execution the caller will grab the
# magic pieces from cell_magic_parts and will assemble the cell body
self._store(lines, self._buffer_raw, 'source_raw')
self.cell_magic_parts.append(lines)
# Find out if the last stored block has a whitespace line as its
# last line and also this line is whitespace, case in which we're
# done (two contiguous blank lines signal termination). Note that
# the storage logic *enforces* that every stored block is
# newline-terminated, so we grab everything but the last character
# so we can have the body of the block alone.
last_block = self.cell_magic_parts[-1]
self._is_complete = last_blank(last_block) and lines.isspace()
return self._is_complete
def transform_cell(self, cell):
"""Process and translate a cell of input.
"""
self.reset()
self.push(cell)
return self.source_reset()
def push(self, lines):
"""Push one or more lines of IPython input.
This stores the given lines and returns a status code indicating
whether the code forms a complete Python block or not, after processing
all input lines for special IPython syntax.
Any exceptions generated in compilation are swallowed, but if an
exception was produced, the method returns True.
Parameters
----------
lines : string
One or more lines of Python input.
Returns
-------
is_complete : boolean
True if the current input source (the result of the current input
plus prior inputs) forms a complete Python execution block. Note that
this value is also stored as a private attribute (_is_complete), so it
can be queried at any time.
"""
if not lines:
return super(IPythonInputSplitter, self).push(lines)
# We must ensure all input is pure unicode
lines = cast_unicode(lines, self.encoding)
# If the entire input block is a cell magic, return after handling it
# as the rest of the transformation logic should be skipped.
if lines.startswith('%%') and not \
(len(lines.splitlines()) == 1 and lines.strip().endswith('?')):
return self._handle_cell_magic(lines)
# In line mode, a cell magic can arrive in separate pieces
if self.input_mode == 'line' and self.processing_cell_magic:
return self._line_mode_cell_append(lines)
# The rest of the processing is for 'normal' content, i.e. IPython
# source that we process through our transformations pipeline.
lines_list = lines.splitlines()
transforms = [transform_ipy_prompt, transform_classic_prompt,
transform_help_end, transform_escaped,
transform_assign_system, transform_assign_magic]
# Transform logic
#
# We only apply the line transformers to the input if we have either no
# input yet, or complete input, or if the last line of the buffer ends
# with ':' (opening an indented block). This prevents the accidental
# transformation of escapes inside multiline expressions like
# triple-quoted strings or parenthesized expressions.
#
# The last heuristic, while ugly, ensures that the first line of an
# indented block is correctly transformed.
#
# FIXME: try to find a cleaner approach for this last bit.
# If we were in 'block' mode, since we're going to pump the parent
# class by hand line by line, we need to temporarily switch out to
# 'line' mode, do a single manual reset and then feed the lines one
# by one. Note that this only matters if the input has more than one
# line.
changed_input_mode = False
if self.input_mode == 'cell':
self.reset()
changed_input_mode = True
saved_input_mode = 'cell'
self.input_mode = 'line'
# Store raw source before applying any transformations to it. Note
# that this must be done *after* the reset() call that would otherwise
# flush the buffer.
self._store(lines, self._buffer_raw, 'source_raw')
try:
push = super(IPythonInputSplitter, self).push
buf = self._buffer
for line in lines_list:
if self._is_complete or not buf or \
(buf and buf[-1].rstrip().endswith((':', ','))):
for f in transforms:
line = f(line)
out = push(line)
finally:
if changed_input_mode:
self.input_mode = saved_input_mode
return out
|
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/IPython/core/inputsplitter.py
|
Python
|
lgpl-3.0
| 34,393
|
[
"Brian"
] |
88fcc9def25cb5f15ff127f5291fca075543b86041312f0082aa5569c476d7d2
|
"""
Collection of functions for inspecting assembly results
"""
import os
import sys
import re
import logging
import pandas
import numpy
from Bio import SeqIO, SeqUtils
try:
from edl.util import ascii_histogram, InputFile
from edl.blastm8 import FilterParams, filterM8Stream
except ImportError:
# This is a little hack to make this module runnable as a script
sys.path[0] += "/.."
from edl.util import ascii_histogram, InputFile
from edl.blastm8 import FilterParams, filterM8Stream
logger = logging.getLogger(__name__)
def main():
"""
Simple hook for running some of the functions below as a script.
Only works with positional arguments that are strings.
Examples:
python assembly.py contig_read_counts file_1
will run countig_read_counts("file_1")
"""
log_level = logging.WARN
while sys.argv[1] == '-v':
sys.argv.pop(1)
log_level -= 10
logger.setLevel(log_level)
logger.debug("Log level: %s", log_level)
function = eval(sys.argv[1])
args = []
kwargs = {}
for arg in sys.argv[2:]:
try:
param, value = arg.split("=", 1)
try:
value = eval(value)
except NameError:
pass
kwargs[param] = value
except ValueError:
args.append(arg)
logger.debug("Function: {}\nArguments: {}\nKWArgs: {}".format(function,
args,
kwargs))
function(*args, **kwargs)
def get_contig_stats(contigs_fasta,
contig_depth_file=None,
contig_read_counts_file=None,
contig_stats_file=None,
contig_histogram_file=None,
**kwargs):
"""
Extracts GC and length from contigs fasta
Can optionally merge with read counts and mapped coverage if
samtools output files given.
provide a contig_stats_file location to write data to disk instead
of just returning a pandas DataFrame.
provide contig_histogram_file to produce a file with summary stats
and histgrams for each metric. See contig_length_stats() and numpy.
histogram() for additional kwargs that can be passed when using this
option.
"""
# parse contigs fasta
logger.info("Parsing contig fasta file: {}".format(contigs_fasta))
contig_stats = get_stats_from_contigs(contigs_fasta)
# add other files if requested
if contig_read_counts_file is not None:
# read counts
logger.info("Parsing read count file: {}"
.format(contig_read_counts_file))
read_count_table = pandas.read_table(contig_read_counts_file,
delim_whitespace=True,
names=['read count',
'contig'])\
.set_index('contig')
contig_stats = contig_stats.join(read_count_table, how='left')
if contig_depth_file is not None:
# convert base by base depth data into coverage
logger.info("Parsing read depth file: {}"
.format(contig_depth_file))
mapping_depth_table = get_samtool_depth_table(contig_depth_file)
contig_stats = contig_stats.join(mapping_depth_table, how='left')
# sort and get cumulative length
contig_stats.fillna(0, inplace=True)
contig_stats.sort_values(by='length', ascending=False, inplace=True)
contig_stats['cumul length'] = contig_stats.length.cumsum()
for col in ['length', 'read count', 'mx cov', 'mn cov', 'cumul length']:
if col in contig_stats.columns:
contig_stats[col] = contig_stats[col].astype(int)
if contig_stats_file is not None:
logger.info("Writing stats table to: {}".format(contig_stats_file))
contig_stats.to_csv(contig_stats_file, sep='\t', float_format="%0.2f")
if contig_histogram_file is not None:
with open(contig_histogram_file, 'w') as OUTF:
if 'min_lengths' in kwargs:
min_lengths = kwargs.pop('min_lengths')
elif 'min_length' in kwargs:
min_lengths = [kwargs.pop('min_length'), ]
else:
min_lengths = [0, 500, 2000]
for i, min_length in enumerate(min_lengths):
logger.info("Making report for contigs >= {}"
.format(min_length))
if i > 0:
OUTF.write("\n===============================\
================\n\n")
OUTF.write("CONTIGS longer or equal to {}bp:\n\n"
.format(min_length))
OUTF.write(contig_length_stats(contig_stats,
return_type='report',
min_length=min_length,
**kwargs
))
return contig_stats
def get_stats_from_contigs(contigs_fasta):
"""
Use BioPython parser and GC calculator to get contig lengths and
GCs from contigs fasta
"""
# initialize lists
contigs = []
lengths = []
gcs = []
# loop over fasta records (this is 2-3 times faster than SeqIO.parse)
# (and only marginally slower than my custom built parser.)
with open(contigs_fasta, 'r') as CF:
for title, sequence in SeqIO.FastaIO.SimpleFastaParser(CF):
# parse title with RegEx
contig = title.split(None, 1)[0]
length = len(sequence)
contigs.append(contig)
lengths.append(length)
gcs.append(SeqUtils.GC(sequence))
# convert to DataFrame and return
return pandas.DataFrame({'contig': contigs,
'length': lengths,
'GC': gcs}).set_index('contig')
def get_samtool_depth_table(depth_file):
"""
Calculate coverage stats for each contig in an assembly
Params:
depth_file: output file from the command:
`samtools depth reads.v.contigs.bam`
this is a 3 column file with one line per base.
columns are:
'contig_id base_index base_depth'
Returns:
pandas.DataFrame with one row per contig and the three following columns:
contig av cov mx cov
"""
with open(depth_file, 'r') as DEPTHS:
return get_samtool_depth_table_from_handle(DEPTHS)
def get_samtool_depth_table_from_handle(depth_stream):
"""
Calculate coverage stats for each contig in an assembly
Params:
depth_stream: output file from the command:
`samtools depth reads.v.contigs.bam`
passed as an open file-like object (aka a file handle)
this is a 3 column file with one line per base.
columns are:
'contig_id base_index base_depth'
Returns:
pandas.DataFrame with one row per contig and the three following columns:
contig av cov mx cov
"""
# reading into lists is a fast way to build a big DataFrame
contigs, av_covs, mn_covs, mx_covs, md_covs = [], [], [], [], []
# loop over contig bases
current_contig = None
depths = []
for line in depth_stream:
contig, base, depth = line.split()
depth = int(depth)
if contig != current_contig:
if current_contig is not None:
# end of contig, save numbers
contigs.append(current_contig)
depth_array = numpy.array(depths)
av_covs.append(depth_array.mean())
mn_covs.append(depth_array.min())
mx_covs.append(depth_array.max())
md_covs.append(numpy.median(depth_array))
depths = []
current_contig = contig
# update contig numbers with current base
depths.append(depth)
# end of final contig, save numbers
contigs.append(current_contig)
depth_array = numpy.array(depths)
av_covs.append(depth_array.mean())
mn_covs.append(depth_array.min())
mx_covs.append(depth_array.max())
md_covs.append(numpy.median(depth_array))
return pandas.DataFrame({'contig': contigs,
'av cov': av_covs,
'mx cov': mx_covs,
'mn cov': mn_covs,
'md cov': md_covs},
columns=['contig',
'av cov',
'mn cov',
'mx cov',
'md cov'],
).set_index('contig')
##
# this evolved from (but now bears little resemblance to) the
# assemlbly_quality_stats.py script by:
# Author: Travis Poulsen
# Date: 09 Feb. 2013
# http://travispoulsen.com/blog/2013/07/basic-assembly-statistics/
# https://gist.github.com/tpoulsen/422b1a19cbd8c0f514fe/raw/assembly_quality_stats.py
def contig_length_stats(contig_stats, return_type=None,
txt_width=0,
log=False,
min_length=0,
**kwargs):
"""
Given contig stats table
* calculate length stats (including N50)
* optionally plot histogram (use txt_width and backend to select format)
(if txt_width is greater than 0 (should be at least 40 for a good plot))
* return_types:
None: just print text to STDOUT
'report': return text
'data': return dictionary of data
"""
report_data = {"min_length": min_length}
contig_stats = contig_stats.loc[contig_stats.length >= min_length]
if contig_stats.shape[0] == 0:
report_data['Assembly'] = {'count': 0}
else:
report_data['Assembly'] = get_N_stats(contig_stats)
for column, label in {'length': 'Contig Lengths',
'read count': 'Reads per Contig',
'av cov': 'Mean Mapped Depth',
'mx cov': 'Maximum Mapped Depth',
'mn cov': 'Minimum Mapped Depth',
'GC': 'GC Content'}.items():
if column not in contig_stats.columns:
continue
report_data[label] = get_column_stats(contig_stats[column])
if txt_width > 0:
report_data[label]['log'] = "(log)" if log else ""
report_data[label]['histogram'] = \
ascii_histogram(numpy.histogram(contig_stats[column],
**kwargs),
log=log,
width=txt_width)
if return_type == 'data':
return report_data
report = get_contig_stats_report(report_data)
if return_type is None:
print(report)
else:
return report
def get_contig_stats_report(report_data):
"""
return a formatted string summarizing contig length data
"""
N_stats = report_data.pop("Assembly")
if N_stats['count'] == 0:
return """\
Assembly Summary Stats:
Contigs: 0
"""
report = """\
Assembly Summary Stats:
Contigs: {count}
N50: {N50}
N75: {N75}
N90: {N90}
""".format(**N_stats)
for column in ['Contig Lengths',
'Reads per Contig',
'GC Content',
'Mean Mapped Depth',
'Maximum Mapped Depth',
'Minimum Mapped Depth',
]:
if column not in report_data:
continue
report += """
Summary of {column}:
Min: {min}
Max: {max}
Mean: {mean}
Median: {median}
StdDev: {std}
""".format(column=column, **report_data[column])
if 'histogram' in report_data[column]:
report += """
Histogram of {column} {log}:
{histogram}
""".format(column=column, **report_data[column])
return report
def get_N_stats(contig_stats, N_levels=[50, 75, 90]):
"""
uses "length" and "cumulength" columns in contig_stats table
to quickly get N50 and others for given N_levels (def: 50,75,90)
"""
N_stats = {'count': contig_stats.shape[0]}
# tota length is last cumulative length value
total_length = contig_stats['cumul length'].iloc[-1]
# set up iterator over just these columns
cumulen_iter = iter(contig_stats[['length', 'cumul length']].iterrows())
# Loop over N's. Since they are sorted, we don't need to restart
# the length/cumul_length iterator
cumulength = 0
for N in sorted(N_levels):
# looking for N% of the total length
target = total_length * N / 100
# stop when we get there
while cumulength < target:
contig, (length, cumulength) = next(cumulen_iter)
# Save the contig length that got use here
N_key = "N{0:02d}".format(N)
N_stats[N_key] = length
return N_stats
def get_column_stats(data):
"""
return a dict of useful stats
"""
return {'min': data.min(),
'max': data.max(),
'mean': data.mean(),
'median': data.median(),
'std': data.std()
}
##
# the calc_stat, plot_assembly, and getN50 methods originally come from the
# assemlbly_quality_stats.py script by:
# Author: Travis Poulsen
# Date: 09 Feb. 2013
# http://travispoulsen.com/blog/2013/07/basic-assembly-statistics/
# https://gist.github.com/tpoulsen/422b1a19cbd8c0f514fe/raw/assembly_quality_stats.py
def calc_stats(file_in,
return_type=None,
txt_width=0,
log=False,
backend=None,
format='fasta',
minLength=0,
**kwargs):
"""
Given contigs in fastsa format:
* calculate length stats (including N50)
* plot histogram (use txt_width and backend to select format)
* return_types:
None: just print text to STDOUT
'report': return text
'data': return dictionary of data
"""
with open(file_in, 'r') as seq:
sizes = [len(record) for record in SeqIO.parse(
seq, format) if len(record) >= minLength]
sizes = numpy.array(sizes)
data = get_contig_length_stats(sizes)
if return_type != 'data':
report = get_contig_length_report(data)
if backend is not None:
h = plot_assembly(sizes, file_in, data, log=log,
backend=backend, **kwargs)
if txt_width > 0:
if backend is None:
h = numpy.histogram(sizes, **kwargs)
histogramText = ascii_histogram(h, log=log, width=txt_width)
if return_type != 'data':
if log:
report += "\n\nContig length histogram (log):\n"
else:
report += "\n\nContig length histogram:\n"
report += histogramText
else:
data['histogram'] = histogramText
if return_type == 'data':
return data
elif return_type is None:
print(report)
else:
return report
def get_contig_length_stats(sizes):
"""
return a dict of useful contig length stats
"""
return {'min': numpy.min(sizes),
'max': numpy.max(sizes),
'mean': numpy.mean(sizes),
'median': numpy.median(sizes),
'N50': int(getN50(sizes)),
'N75': int(getN50(sizes, N=75)),
'N90': int(getN50(sizes, N=90)),
'count': len(sizes),
}
def get_contig_length_report(data):
"""
return a formatted string summarizing contig length data
"""
report = 'Number of contigs:\t%i' % data['count']
report += '\nN50:\t%i' % data['N50']
report += '\nN75:\t%i' % data['N75']
report += '\nN90:\t%i' % data['N90']
report += '\nMean contig length:\t%.2f' % data['mean']
report += '\nMedian contig length:\t%.2f' % data['median']
report += '\nMinimum contig length:\t%i' % data['min']
report += '\nMaximum contig length:\t%i' % data['max']
return report
def mira_stats(contigStatsFile, minLength=0, bins=20, **kwargs):
"""
Get length, coverage, and GC stats from mira info file
Returns text with N50 and histograms
"""
contigStats = pandas.read_csv(contigStatsFile, index_col=0, sep='\t')
if minLength > 0:
contigStats = contigStats[contigStats.length >= minLength]
sizes = contigStats['length']
data = get_contig_length_stats(sizes)
report = get_contig_length_report(data)
# add histograms to report
report += '\nHistograms:\n'
for key in ['length', 'GC%', 'av.cov', 'mx.cov.', 'av.qual']:
report += '\n'
report += ascii_histogram(
numpy.histogram(contigStats[key], bins=bins), label=key, **kwargs)
return report
def plot_assembly(sizes, file_in, length_data, backend=None, **kwargs):
min_contig = length_data['min']
max_contig = length_data['max']
avg_contig = length_data['mean']
num_contig = length_data['count']
if backend:
import matplotlib
matplotlib.use(backend)
from matplotlib import pyplot as plt
h = plt.hist(sizes, **kwargs)
plt.title('%i %s sequences\n\
Lengths %i to %i, \
Average contig length: %.2f' % (num_contig, file_in,
min_contig, max_contig,
avg_contig))
plt.xlabel('Sequence length (bp)')
plt.ylabel('Count')
return h
def getN50(sizes, N=50):
"""
Get the N50 of the contigs. This is the sequence length at which point
half of the bases in the entire assembly are contained in contigs of a
smaller size.
"""
totalLength = sum(sizes)
targetLength = float(totalLength) * N / 100.
totalLength = 0
for size in sorted(sizes, reverse=True):
totalLength += size
if totalLength >= targetLength:
return size
else:
raise Exception("Target length never reached!\
\nN=%d, target=%d, total=%d" % (N,
targetLength,
totalLength))
######
# A set of methods for plotting the quality of reseq hits to a set of contigs
def plotHitStats(axes, sequenceFile, hitsFile,
referenceLengths=None,
sequenceFormat='fasta',
bins=20, hlog=False, lengthRange=None,
barcolor='b', baredgecolor='k', hcolor='r', params=None,
**kwargs):
"""
Given two or three matplotlib.axes.AxesSubplot objects create plot in
each binned by sequence length:
* overlay a histogram of sequence lengths on the fraction of sequences
in each bin that have a hit
* same bins as above, but use total sequence bases on top of fraction
of bases covered by hits
* if fasta or lengths of reference hits given, plot (using same bins)
fraction of reference bases used in hits
Positional Arguments:
* axes: length 2 list or tuple of ax objects
* sequenceFile: fasta or similar file of sequence data
* hitsFile: text hit table
Parameters:
* hit parsing
* params=None edl.blatm8.FilterParams object to filter hits
* **kwargs used to create FilterParams object if params object not given
* sequence parsing
* sequenceFormat='fasta'. Can be anything supported by BioPython
* referenceLengths=None: if give, create 3rd plot using given
dictionary of hits. It can also just be the fasta of the reference
sequences and the code will look up the lengths.
* plotting:
* bins=20 Number of length bins to divide sequence data into
* barcolor='b' Color of data bars
* baredgecolor='k' Color of data bar edges
* hcolor='r' Color of histogram line and axis labels
* lengthRange=None Can be used to force the x axis to span a
specific range
* hlog=False If set to True, histogram data plotted in log scale
"""
# get sequence lengths
lengths = getSequenceLengths(sequenceFile, format=sequenceFormat)
# parse hit file
if params is None:
params = FilterParams(**kwargs)
hits = getSequenceHits(hitsFile, params)
# plot data
plotTranscriptHitRateByLengthBins(axes[0], lengths, hits,
bins=bins, lengthRange=lengthRange,
barcolor=barcolor,
baredgecolor=baredgecolor,
hcolor=hcolor, hlog=hlog)
plotTranscriptCoverageByLengthBins(axes[1], lengths, hits,
bins=bins, lengthRange=lengthRange,
barcolor=barcolor,
baredgecolor=baredgecolor,
hcolor=hcolor, hlog=hlog)
if referenceLengths is not None:
plotHitCoverageByLengthBins(axes[2], lengths, hits, referenceLengths,
bins=bins, lengthRange=lengthRange,
barcolor=barcolor,
baredgecolor=baredgecolor,
hcolor=hcolor, hlog=hlog)
def getSequenceHits(hitsFile, params):
"""
build a map from sequences to their hits
"""
sequenceHits = {}
hitCount = 0
with InputFile(hitsFile) as m8stream:
for seqid, hits in filterM8Stream(m8stream,
params,
return_lines=False):
if len(hits) == 0:
continue
hitCount += len(hits)
sequenceHits[seqid] = hits
logging.debug("Parsed %d hits for %d sequences fromm %d lines" %
(hitCount, len(sequenceHits), m8stream.lines))
return sequenceHits
def getSequenceLengths(sequenceFile, format='fasta'):
"""
Get the sequence sizes from a fasta or other file
"""
if format == 'CAF':
return getContigLengthsFromCAF(sequenceFile)
sequenceLengths = {}
for record in SeqIO.parse(sequenceFile, format=format):
sequenceLengths[record.id] = len(record)
logging.debug("Parsed lengths for %d sequences" % (len(sequenceLengths)))
return sequenceLengths
def plotTranscriptHitRateByLengthBins(ax, lengths, hits,
bins=20,
lengthRange=None,
barcolor='b',
baredgecolor='k',
hlog=False,
hcolor='r'):
"""
Given a dictionary of transcript lengths and a dictionary of hits,
Produce a plot of hit rate by length bin.
"""
# Don't try to plot empty data
if len(lengths) == 0:
raise Exception("Lengths cannot be empty!")
# Draw counts as steppted histogram
ax2 = ax.twinx()
transcriptCounts, boundaries = ax2.hist(lengths.values(),
bins=bins,
range=lengthRange,
histtype='step',
log=hlog,
color=hcolor)[:2]
ax2.set_ylabel('counts', color=hcolor)
for tl in ax2.get_yticklabels():
tl.set_color(hcolor)
# count hits by bin
hitCounts = numpy.zeros(transcriptCounts.shape)
for transcript in hits:
try:
index = getBin(lengths[transcript], boundaries)
except ValueError:
# length was outside range
continue
hitCounts[index] += 1
# normalize hit counts by transcript counts
hitRate = hitCounts / transcriptCounts
# remove infinities
hitRate[transcriptCounts == 0] = 0
# Draw histogram bars
lefts = boundaries[:-1]
widths = [boundaries[i + 1] - boundaries[i]
for i in range(len(boundaries) - 1)]
ax.bar(lefts, hitRate, width=widths,
color=barcolor, edgecolor=baredgecolor)
ax.set_ylim([0, 1])
ax.set_ylabel('hit rate')
ax.set_xlabel('transcript length')
def plotTranscriptCoverageByLengthBins(ax, lengths, hits,
bins=20,
lengthRange=None,
barcolor='b',
baredgecolor='k',
hlog=False,
hcolor='r',
includeMissed=True):
"""
Given a dictionary of transcript lengths and a dictionary of hits,
Produce a plot of coverate rate by length bin. IE: What fracton of
total transcript bases were matched.
"""
# Don't try to plot empty data
if len(lengths) == 0:
raise Exception("Lengths cannot be empty!")
transcriptCounts, boundaries = numpy.histogram(
lengths.values(), bins=bins, range=lengthRange)
# count bases by bin
hitBaseCounts = numpy.zeros(transcriptCounts.shape)
totalBaseCounts = numpy.zeros(transcriptCounts.shape)
for transcript, hitList in hits.iteritems():
try:
index = getBin(lengths[transcript], boundaries)
except ValueError:
# length was outside range
continue
totalBaseCounts[index] += lengths[transcript]
hitBaseCounts[index] += longestHit(hitList)
if includeMissed:
for transcript, length in lengths.iteritems():
if transcript not in hits:
totalBaseCounts[index] += length
# Simulate stepped histogram of total bases
ax2 = ax.twinx()
x, y = getSteppedBars(totalBaseCounts, boundaries)
if hlog:
ax2.set_yscale("log", nonposy='clip')
ax2.plot(x, y, color=hcolor)
ax2.set_ylabel('total bases', color=hcolor)
for tl in ax2.get_yticklabels():
tl.set_color(hcolor)
# normalize hit counts by transcript counts
hitRate = hitBaseCounts / totalBaseCounts
# remove infinities
hitRate[totalBaseCounts == 0] = 0
# Draw histogram bars
lefts = boundaries[:-1]
widths = [boundaries[i + 1] - boundaries[i]
for i in range(len(boundaries) - 1)]
ax.bar(lefts, hitRate, width=widths,
color=barcolor, edgecolor=baredgecolor)
ax.set_ylim([0, 1])
ax.set_ylabel('bases matched')
ax.set_xlabel('transcript length')
def plotHitCoverageByLengthBins(ax, lengths, hits, referenceLengths,
bins=20,
lengthRange=None,
barcolor='b',
baredgecolor='k',
hlog=False,
hcolor='r',
includeMissed=False):
"""
Given a dictionary of transcript lengths, a dictionary
of hits, and a dict of reference sequence lengths...
Produce a plot of reference coverate rate by length bin.
IE: What fracton of total residues in the reference sequences were
matched.
The param referenceLengths can be a dictionary from hit names to
lengths or a fasta file of sequences. The names in both should
match the hit names in the "hits" dictionary.
"""
# Don't try to plot empty data
if len(lengths) == 0:
raise Exception("Lengths cannot be empty!")
transcriptCounts, boundaries = numpy.histogram(
lengths.values(), bins=bins, range=lengthRange)
get_hit_length = build_get_hit_length_function(referenceLengths)
# count bases by bin
hitBaseCounts = numpy.zeros(transcriptCounts.shape)
referenceBaseCounts = numpy.zeros(transcriptCounts.shape)
totalBaseCounts = numpy.zeros(transcriptCounts.shape)
for transcript, hitList in hits.iteritems():
try:
index = getBin(lengths[transcript], boundaries)
except ValueError:
# length was outside range
continue
totalBaseCounts[index] += lengths[transcript]
firstHit = hitList[0]
hitLength = getHitLength(firstHit.hit)
logger.debug("Hit of length %d goes from %d to %d" % (hitLength,
firstHit.hstart,
firstHit.hend))
referenceBaseCounts[index] += hitLength
hitBaseCounts[index] += numpy.abs(firstHit.hend - firstHit.hstart) + 1
if includeMissed:
for transcript, length in lengths.iteritems():
if transcript not in hits:
totalBaseCounts[index] += length
# Simulate stepped histogram of total bases
ax2 = ax.twinx()
x, y = getSteppedBars(totalBaseCounts, boundaries)
if hlog:
ax2.set_yscale("log", nonposy='clip')
ax2.plot(x, y, color=hcolor)
ax2.set_ylabel('total bases', color=hcolor)
for tl in ax2.get_yticklabels():
tl.set_color(hcolor)
# normalize hit counts by transcript counts
hitRate = hitBaseCounts / referenceBaseCounts
# remove infinities
hitRate[totalBaseCounts == 0] = 0
# Draw histogram bars
lefts = boundaries[:-1]
widths = [boundaries[i + 1] - boundaries[i]
for i in range(len(boundaries) - 1)]
ax.bar(lefts, hitRate, width=widths,
color=barcolor, edgecolor=baredgecolor)
ax.set_ylim([0, 1])
ax.set_ylabel('% reference matched')
ax.set_xlabel('transcript length')
def build_get_hit_length_function(referenceLengths):
"""
Given the referenceLengths parameter return a lambda function that will
map a reference sequence id to its sequence length
The referenceLenths parameter may be either a python dict or a str name
of a fasta file. In the latter case, the file is parsed to get lengths
"""
if isinstance(referenceLengths, str):
import screed
# assume we have the path to a fasta file
# has it been parsed by screed?
if not os.path.exists("%s_screed" % (referenceLengths)):
# TODO: just use Bio.SeqIO to get lengths if
# screed module or screed index is missing.
# screed is overkill here.
screed.read_fasta_sequences(referenceLengths)
refScreed = screed.ScreedDB(referenceLengths)
return lambda h: len(refScreed[h]['sequence'])
else:
return lambda h: referenceLengths[h]
def plotSortedContigLengths(ax, lengths,
linecolor='b',
log=False,
minContigLength=500):
"""
Given a list of contig lengths, create a stepped plot of ordered lengths.
"""
# Don't try to plot empty data
if len(lengths) == 0:
raise Exception("Lengths cannot be empty!")
x, y = getSteppedBars(sorted([length for length in lengths
if length > minContigLength],
reverse=True))
if log:
ax.set_yscale("log", nonposy='clip')
objects = ax.plot(x, y, color=linecolor)
ax.set_ylabel('contig length')
ax.set_xlabel('contig number')
return objects
def longestHit(hits):
return max([numpy.abs(hit.qend - hit.qstart) + 1 for hit in hits])
def getSteppedBars(values, boundaries=None):
"""
return lists of x and y coordinates for a stepped line/bar plot
given N values (bar heights) and N+1 boundaries (bar edges)
"""
x = []
y = []
def translate(b):
return b if numpy.isfinite(b) else 0
if boundaries is None:
boundaries = range(len(values) + 1)
x.append(boundaries[0])
y.append(0)
for i in range(len(values)):
y.append(translate(values[i]))
x.append(boundaries[i])
y.append(translate(values[i]))
x.append(boundaries[i + 1])
x.append(boundaries[-1])
y.append(0)
return x, y
def getBin(value, binboundaries):
"""
Given a value and a set of N+1 ordered values defining N segments or bins,
return the index of the bin containing value.
Note, this currently uses brute force and could be sped up with a
simple dividing by two search
"""
if binboundaries[0] > value:
raise ValueError("Value too low")
for i in range(len(binboundaries) - 1):
if binboundaries[i] <= value and value <= binboundaries[i + 1]:
return i
raise ValueError("Value too high")
sequenceRE = re.compile(r'^Sequence\s+:\s+(\S+)')
dnaRE = re.compile(r'^DNA\s+:\s+(\S+)')
qualityRE = re.compile(r'^BaseQuality\s+:\s+(\S+)')
def getContigLengthsFromCAF(cafFile):
"""
Return a dictionary of contig names and lengths from a CAF file
"""
if (isinstance(cafFile, str)):
cafHandle = open(cafFile)
else:
cafHandle = cafFile
lengths = {}
try:
while True:
line = cafHandle.next()
m = sequenceRE.match(line)
if m is None:
continue
# found a sequence header
seqName = m.group()
if cafHandle.next().strip() != 'Is_contig':
continue
# it is a contig
contigName = seqName
# jump to sequence
while True:
line = cafHandle.next()
if dnaRE.match(line):
break
# parse out sequence
sequenceLength = 0
while True:
line = cafHandle.next()
sequenceLength += len(line.strip())
if qualityRE.match(line):
break
lengths[seqName] = sequenceLength
except StopIteration:
pass
if (isinstance(cafFile, str)):
cafHandle.close()
return lengths
if __name__ == '__main__':
main()
|
jmeppley/py-metagenomics
|
edl/assembly.py
|
Python
|
mit
| 34,517
|
[
"Biopython"
] |
58467aff5cd9bb596290c54c90e499045bae0bad0f837daf732378b021a33bd5
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CustomValue'
db.create_table(u'profiles_customvalue', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('indicator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.Indicator'])),
('value_operator', self.gf('django.db.models.fields.CharField')(max_length='255', null=True, blank=True)),
('display_value', self.gf('django.db.models.fields.CharField')(max_length='255')),
))
db.send_create_signal(u'profiles', ['CustomValue'])
def backwards(self, orm):
# Deleting model 'CustomValue'
db.delete_table(u'profiles_customvalue')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 11, 12, 26, 28, 407755)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 11, 12, 26, 28, 407331)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'maps.shapefile': {
'Meta': {'object_name': 'ShapeFile'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'geo_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geo_meta_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'geom_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'shape_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'zoom_threshold': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
u'profiles.customvalue': {
'Meta': {'object_name': 'CustomValue'},
'display_value': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'value_operator': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'})
},
u'profiles.datadomain': {
'Meta': {'ordering': "['weight']", 'object_name': 'DataDomain'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Indicator']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'subdomain_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subdomains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'symmetrical': 'False', 'blank': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.datapoint': {
'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'DataPoint'},
'change_from_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_from'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
'change_to_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_to'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']", 'null': 'True'})
},
u'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'profiles.denominator': {
'Meta': {'object_name': 'Denominator'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'multiplier': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sort': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.denominatorpart': {
'Meta': {'object_name': 'DenominatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.IndicatorPart']"})
},
u'profiles.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataSource']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
u'profiles.georecord': {
'Meta': {'unique_together': "(('slug', 'level'), ('level', 'geo_id', 'custom_name', 'owner'))", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geo_searchable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']"}),
'mappings': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'shapefile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maps.ShapeFile']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'blank': 'True'})
},
u'profiles.indicator': {
'Meta': {'object_name': 'Indicator'},
'data_domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_distribution': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display_percent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_generated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'limitations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'routine_use': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'default': "'U.S. Census Bureau'", 'max_length': '300', 'blank': 'True'}),
'universe': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
u'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataDomain']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.indicatorpart': {
'Meta': {'object_name': 'IndicatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']"})
},
u'profiles.precalculatedvalue': {
'Meta': {'object_name': 'PrecalculatedValue'},
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'geo_record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'table': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'profiles.taskstatus': {
'Meta': {'object_name': 'TaskStatus'},
'error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
't_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'task': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'profiles.time': {
'Meta': {'object_name': 'Time'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
},
u'profiles.value': {
'Meta': {'object_name': 'Value'},
'datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataPoint']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
}
}
complete_apps = ['profiles']
|
216software/Profiles
|
communityprofiles/profiles/oldmigrations/0051_auto__add_customvalue.py
|
Python
|
mit
| 18,452
|
[
"MOE"
] |
12154e2c6340585d4695bebd23728f5791685d6f1d1edafcab759f1f67dc2636
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class accessapprovalCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'approve_approval_request': ('name', 'expire_time', ),
'delete_access_approval_settings': ('name', ),
'dismiss_approval_request': ('name', ),
'get_access_approval_settings': ('name', ),
'get_approval_request': ('name', ),
'list_approval_requests': ('parent', 'filter', 'page_size', 'page_token', ),
'update_access_approval_settings': ('settings', 'update_mask', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=accessapprovalCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the accessapproval client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
googleapis/python-access-approval
|
scripts/fixup_accessapproval_v1_keywords.py
|
Python
|
apache-2.0
| 6,343
|
[
"VisIt"
] |
a5dd21c3d62a1c71f4149967c71be1c8fa7078179555fffca756b68543be3c4d
|
import os, re, subprocess, unittest
# enable test mode
os.putenv('LAMMPS_SHELL_TESTING','1')
shell_prompt_re = r"([^>]*LAMMPS Shell> ([a-z0-9_]+) *([a-z0-9_\.]+)?.*\n)+"
cmd_group_re = r"([^>]*LAMMPS Shell> ([a-z0-9_]+) +([a-z0-9]+) +([a-z0-9]+)? *([a-z/0-9]+)?.*\n)+"
#
class LammpsShell(unittest.TestCase):
def setUp(self):
self.proc = subprocess.Popen('./lammps-shell',
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def tearDown(self):
self.proc.kill()
def InputRunner(self,text):
"""Test tab expansions"""
try:
[outs,errs] = self.proc.communicate(input=text, timeout=10)
self.timeout = 0
except subprocess.TimeoutExpired:
self.proc.kill()
[outs,errs] = self.proc.communicate()
self.timeout = 1
return outs.decode('UTF-8')
def testExpandClearHistory(self):
"""Test expansion of a shell specific command"""
matches = re.findall(shell_prompt_re, self.InputRunner(b'clear_his\t\n'), re.MULTILINE)
if self.timeout:
self.fail("Timeout")
else:
self.assertEqual(matches[0][1],"clear_history")
def testExpandDimension(self):
"""Test expansion of a LAMMPS command"""
matches = re.findall(shell_prompt_re, self.InputRunner(b'dimens\t\n'), re.MULTILINE)
if self.timeout:
self.fail("Timeout")
else:
self.assertEqual(matches[0][1],"dimension")
def testExpandPairStyle(self):
"""Test expansion of a pair style"""
matches = re.findall(shell_prompt_re, self.InputRunner(b'pair_st\t zer\t\n'), re.MULTILINE)
if self.timeout:
self.fail("Timeout")
else:
self.assertEqual(matches[0][1],"pair_style")
self.assertEqual(matches[0][2],"zero")
def testExpandBondStyle(self):
"""Test expansion of a bond style"""
matches = re.findall(shell_prompt_re, self.InputRunner(b'bond_st\t zer\t\n'), re.MULTILINE)
if self.timeout:
self.fail("Timeout")
else:
self.assertEqual(matches[0][1],"bond_style")
self.assertEqual(matches[0][2],"zero")
def testExpandAngleStyle(self):
"""Test expansion of a angle style"""
matches = re.findall(shell_prompt_re, self.InputRunner(b'angle_st\t zer\t\n'), re.MULTILINE)
if self.timeout:
self.fail("Timeout")
else:
self.assertEqual(matches[0][1],"angle_style")
self.assertEqual(matches[0][2],"zero")
def testExpandDihedralStyle(self):
"""Test expansion of a dihedral style"""
matches = re.findall(shell_prompt_re, self.InputRunner(b'dihedral_st\t zer\t\n'), re.MULTILINE)
if self.timeout:
self.fail("Timeout")
else:
self.assertEqual(matches[0][1],"dihedral_style")
self.assertEqual(matches[0][2],"zero")
def testExpandImproperStyle(self):
"""Test expansion of a improper style"""
matches = re.findall(shell_prompt_re, self.InputRunner(b'improper_st\t zer\t\n'), re.MULTILINE)
if self.timeout:
self.fail("Timeout")
else:
self.assertEqual(matches[0][1],"improper_style")
self.assertEqual(matches[0][2],"zero")
def testExpandComputeGroup(self):
"""Test expansion of a group-ID and a compute command"""
matches = re.findall(cmd_group_re, self.InputRunner(b'compute test al\tcentro/at\t\n'), re.MULTILINE)
if self.timeout:
self.fail("Timeout")
else:
self.assertEqual(matches[0][1],"compute")
self.assertEqual(matches[0][2],"test")
self.assertEqual(matches[0][3],"all")
self.assertEqual(matches[0][4],"centro/atom")
def testExpandFixGroup(self):
"""Test expansion of a group-ID and a fix command"""
matches = re.findall(cmd_group_re, self.InputRunner(b'fix test al\tcontroll\t\n'), re.MULTILINE)
if self.timeout:
self.fail("Timeout")
else:
self.assertEqual(matches[0][1],"fix")
self.assertEqual(matches[0][2],"test")
self.assertEqual(matches[0][3],"all")
self.assertEqual(matches[0][4],"controller")
def testExpandSource(self):
"""Test expansion of a shell command and a file name"""
with open('.tmp.in.source', 'w') as out:
print('units real', file=out)
out.close()
matches = re.findall(shell_prompt_re, self.InputRunner(b'sour\t.tmp.in.sou\t\n'), re.MULTILINE)
os.remove('.tmp.in.source')
if self.timeout:
self.fail("Timeout")
else:
self.assertEqual(matches[0][1],"source")
self.assertEqual(matches[0][2],".tmp.in.source")
def testHistory(self):
"""Test history expansion"""
out = self.InputRunner(b'clear_history\nunits real\ndimension 2\n!!:p\n!-3:p\n!dim:p\n!uni:p\nprint !!:$\nprint !dim:1\n')
idx = 0
if self.timeout:
self.fail("Timeout")
else:
lines = out.splitlines()
for line in lines:
if line.startswith('LAMMPS Shell>'): break
idx += 1
self.assertEqual(lines[idx+4],"dimension 2")
self.assertEqual(lines[idx+6],"units real")
self.assertEqual(lines[idx+8],"dimension 2")
self.assertEqual(lines[idx+10],"units real")
self.assertEqual(lines[idx+12],"real")
self.assertEqual(lines[idx+14],"2")
###########################
if __name__ == "__main__":
unittest.main()
|
rbberger/lammps
|
unittest/tools/test_lammps_shell.py
|
Python
|
gpl-2.0
| 5,733
|
[
"LAMMPS"
] |
6c72cce45dd7ac0dc98e1cb05e723ab97f4263455bee2c130bbc34c02785f3c1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from morphforge.stdimports import *
from morphforgecontrib.stdimports import *
def main():
""" Change parameters from 3 and show a single spiking and mutltiple spiking neuron in one plot"""
"""In this example, we build a single section neuron, with passive channels,
and stimulate it with a current clamp"""
env = NEURONEnvironment()
morphDict1 = {'root': {'length': 20, 'diam': 20, 'id': 'soma'}}
m1 = MorphologyTree.fromDictionary(morphDict1)
def getKSInfTau(env):
i = InfTauInterpolation(
V = [-100, -80, -40, 0, 40],
inf = [0.0, 0.0, 0.2, 0.5, 1.0],
tau = [0.0, 50, 12, 15, 10]
)
ks_vars = {'ks': i}
ks_chl = env.Channel(
MM_InfTauInterpolatedChannel,
name='InfTau1',
equation='ks*ks*ks*ks',
conductance='2.:pS/um2',
reversalpotential='-80:mV',
statevars_new=ks_vars,
)
return ks_chl
tr0 = get_voltageclamp_soma_current_trace(env=env, V='-50:mV',
channel_functor=getKSInfTau, morphology=m1)
tr1 = get_voltageclamp_soma_current_trace(env=env, V='-20:mV',
channel_functor=getKSInfTau, morphology=m1)
tr2 = get_voltageclamp_soma_current_trace(env=env, V='20:mV',
channel_functor=getKSInfTau, morphology=m1)
TagViewer([tr0, tr1, tr2])
#def build_simulation(gbar_multiplier):
# # Create the morphology for the cell:
# morphDict1 = {'root': {'length': 20, 'diam': 20, 'id':'soma'} }
# m1 = MorphologyTree.fromDictionary(morphDict1, name="SimpleMorphology1")
#
#
# # Create the environment:
# env = NEURONEnvironment()
#
# # Create the simulation:
# sim = env.Simulation(name="TestSim1")
# cell = sim.create_cell(name="Cell1", morphology=m1)
#
#
# parameters = {
# 'e_rev': qty('50:mV'),
# 'gbar': qty('120:pS/um2'),
#
# 'm_alpha_a': qty('13.01e3:s-1'),
# 'm_alpha_b': qty('0e0:V-1 s'),
# 'm_alpha_c': qty('4.0:'),
# 'm_alpha_d': qty('6.01e-3:V'),
# 'm_alpha_e': qty('-12.56e-3:V'),
#
# 'm_beta_a': qty('5.73e3:s-1'),
# 'm_beta_b': qty('0e3:V-1 s'),
# 'm_beta_c': qty('1.0:'),
# 'm_beta_d': qty('16.01e-3:V'),
# 'm_beta_e': qty('9.69e-3:V'),
#
# 'h_alpha_a': qty('0.04e3:s-1'),
# 'h_alpha_b': qty('0.0e3:V-1 s'),
# 'h_alpha_c': qty('1.0:'),
# 'h_alpha_d': qty('29.88e-3:V'),
# 'h_alpha_e': qty('26e-3:V'),
#
# 'h_beta_a': qty('2.04e3:s-1'),
# 'h_beta_b': qty('0.0e3:V-1 s'),
# 'h_beta_c': qty('1:'),
# 'h_beta_d': qty('-8.09e-3:V'),
# 'h_beta_e': qty('-10.21e-3:V'),
# }
#
#
# ks = getKSInfTau(env)
#
#
#
#
# eqnset = EquationSetLoader.load('std_leak_chl.txt',
# dir=LocMgr.getTestEqnSetsPath())
# lk_chl = env.Channel(EqnSetChl, eqnset=eqnset,
# chlname='LeakChls',
# parameters={'gl': qty('5:pS/um2'), 'e_rev': qty('-70:mV')}
# )
#
#
#
#
#
# # Apply the mechanisms to the cells
# cell.apply_channel( lk_chl)
# cell.apply_channel( ks)
#
# cell.set_passive( PassiveProperty.SpecificCapacitance, qty('1.0:uF/cm2'))
#
#
# sim.record(cell, what=StandardTags.Voltage, name="SomaVoltage", cell_location = cell.soma, description='Membrane Voltage (gbar_multiplier = %2.2f)'%gbar_multiplier)
#
#
# sim.create_currentclamp(name='Stim1', amp=qty('200:pA'),
# dur=qty('100:ms'), delay=qty('100:ms'),
# cell_location=cell.soma)
#
#
# result = sim.run()
# return result
#
#
#results = [
# build_simulation(gbar_multiplier = 1.0),
#
# ]
#
#TagViewer(results, timerange=(95, 200)*units.ms)
#pylab.show()
if __name__=='__main__':
main()
|
mikehulluk/morphforge
|
src/morphforgecontrib/indev/testInfTau.py
|
Python
|
bsd-2-clause
| 5,846
|
[
"NEURON"
] |
a71431a136ed42224babcf2117ee54b76c0a077c78b80f4f97d2d1200c743b24
|
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
@deprecated("l1_cross_distances is deprecated and will be removed in 0.20.")
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
@deprecated("GaussianProcess is deprecated and will be removed in 0.20. "
"Use the GaussianProcessRegressor instead.")
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The legacy Gaussian Process model class.
Note that this class is deprecated and will be removed in 0.20.
Use the GaussianProcessRegressor instead.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
|
arjoly/scikit-learn
|
sklearn/gaussian_process/gaussian_process.py
|
Python
|
bsd-3-clause
| 34,913
|
[
"Gaussian"
] |
329db10dd7d97ba59d61db87da4581da668db94632dd96206391458aeb38cecd
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package Setup script for ML Metadata."""
import os
import platform
import shutil
import subprocess
import setuptools
from setuptools import find_packages
from setuptools import setup
from setuptools.command.install import install
from setuptools.dist import Distribution
# pylint: disable=g-bad-import-order
# It is recommended to import setuptools prior to importing distutils to avoid
# using legacy behavior from distutils.
# https://setuptools.readthedocs.io/en/latest/history.html#v48-0-0
from distutils.command import build
# pylint: enable=g-bad-import-order
class _BuildCommand(build.build):
"""Build everything that is needed to install.
This overrides the original distutils "build" command to to run bazel_build
command before any sub_commands.
build command is also invoked from bdist_wheel and install command, therefore
this implementation covers the following commands:
- pip install . (which invokes bdist_wheel)
- python setup.py install (which invokes install command)
- python setup.py bdist_wheel (which invokes bdist_wheel command)
"""
def _build_cc_extensions(self):
return True
# Add "bazel_build" command as the first sub_command of "build". Each
# sub_command of "build" (e.g. "build_py", "build_ext", etc.) is executed
# sequentially when running a "build" command, if the second item in the tuple
# (predicate method) is evaluated to true.
sub_commands = [
('bazel_build', _build_cc_extensions),
] + build.build.sub_commands
# MLMD is not a purelib. However because of the extension module is not built
# by setuptools, it will be incorrectly treated as a purelib. The following
# works around that bug.
class _InstallPlatlibCommand(install):
def finalize_options(self):
install.finalize_options(self)
self.install_lib = self.install_platlib
class _BinaryDistribution(Distribution):
"""This class is needed in order to create OS specific wheels."""
def is_pure(self):
return False
def has_ext_modules(self):
return True
class _BazelBuildCommand(setuptools.Command):
"""Build Bazel artifacts and move generated files."""
def initialize_options(self):
pass
def finalize_options(self):
self._bazel_cmd = shutil.which('bazel')
if not self._bazel_cmd:
raise RuntimeError(
'Could not find "bazel" binary. Please visit '
'https://docs.bazel.build/versions/master/install.html for '
'installation instruction.')
self._additional_build_options = []
if platform.system() == 'Darwin':
# This flag determines the platform qualifier of the macos wheel.
if platform.machine() == 'arm64':
self._additional_build_options = ['--macos_minimum_os=11.0',
'--config=macos_arm64']
else:
self._additional_build_options = ['--macos_minimum_os=10.14']
def run(self):
subprocess.check_call(
[self._bazel_cmd, 'run',
'--compilation_mode', 'opt',
'--define', 'grpc_no_ares=true',
'--verbose_failures',
*self._additional_build_options,
'//ml_metadata:move_generated_files'],
# Bazel should be invoked in a directory containing bazel WORKSPACE
# file, which is the root directory.
cwd=os.path.dirname(os.path.realpath(__file__)),)
# Get version from version module.
with open('ml_metadata/version.py') as fp:
globals_dict = {}
exec(fp.read(), globals_dict) # pylint: disable=exec-used
__version__ = globals_dict['__version__']
# Get the long description from the README file.
with open('README.md') as fp:
_LONG_DESCRIPTION = fp.read()
setup(
name='ml-metadata',
version=__version__,
author='Google LLC',
author_email='tensorflow-extended-dev@googlegroups.com',
license='Apache 2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
namespace_packages=[],
# Make sure to sync the versions of common dependencies (absl-py, numpy,
# six, and protobuf) with TF.
install_requires=[
'absl-py>=0.9,<2.0.0',
'attrs>=20.3,<21',
'grpcio>=1.8.6,<2',
'protobuf>=3.13,<4',
'six>=1.10,<2',
],
python_requires='>=3.7,<4',
packages=find_packages(),
include_package_data=True,
package_data={'': ['*.so', '*.pyd']},
zip_safe=False,
distclass=_BinaryDistribution,
description='A library for maintaining metadata for artifacts.',
long_description=_LONG_DESCRIPTION,
long_description_content_type='text/markdown',
keywords='machine learning metadata tfx',
url='https://github.com/google/ml-metadata',
download_url='https://github.com/google/ml-metadata/tags',
requires=[],
cmdclass={
'install': _InstallPlatlibCommand,
'build': _BuildCommand,
'bazel_build': _BazelBuildCommand,
})
|
google/ml-metadata
|
setup.py
|
Python
|
apache-2.0
| 6,377
|
[
"VisIt"
] |
6e3d8d9fccb603aec54ccc313f558078a9ab9ec80b8f7cca90a529d6107ecaef
|
# Copyright (c) 2012 Purdue University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with o without
# modification, are permitted provided that the following coditions are
# met: redistributions of source code must retain the above cpyright
# notice, this list of conditions and the following disclaimer
# redistributions in binary form must reproduce the above copyrght
# notice, this list of conditions and the following disclaimer i the
# documentation and/or other materials provided with the distribuion;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived fom
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Malek Musleh
### The following file was referenced from the following site:
### http://www.m5sim.org/SPEC_CPU2006_benchmarks
###
### and subsequent changes were made
import os
import optparse
import sys
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import addToPath, fatal
addToPath('../common')
addToPath('../ruby')
addToPath('../topologies')
import Options
import Ruby
import Simulation
import CacheConfig
from Caches import *
from cpu2000 import *
import spec2k6
from Ruby import *
# Get paths we might need. It's expected this file is in m5/configs/example.
config_path = os.path.dirname(os.path.abspath(__file__))
print config_path
config_root = os.path.dirname(config_path)
print config_root
m5_root = os.path.dirname(config_root)
print m5_root
execfile(os.path.join(config_root, "ruby", "Ruby.py"))
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
Options.addSEOptions(parser)
Ruby.define_options(parser)
# Benchmark options
parser.add_option("-b", "--benchmark", default="",
help="The benchmark to be loaded.")
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
if options.benchmark == 'perlbench':
process = spec2k6.perlbench
elif options.benchmark == 'bzip2':
process = spec2k6.bzip2
elif options.benchmark == 'gcc':
process = spec2k6.gcc
elif options.benchmark == 'bwaves':
process = spec2k6.bwaves
elif options.benchmark == 'gamess':
process = spec2k6.gamess
elif options.benchmark == 'mcf':
process = spec2k6.mcf
elif options.benchmark == 'milc':
process = spec2k6.milc
elif options.benchmark == 'zeusmp':
process = spec2k6.zeusmp
elif options.benchmark == 'gromacs':
process = spec2k6.gromacs
elif options.benchmark == 'cactusADM':
process = spec2k6.cactusADM
elif options.benchmark == 'leslie3d':
process = spec2k6.leslie3d
elif options.benchmark == 'namd':
process = spec2k6.namd
elif options.benchmark == 'gobmk':
process = spec2k6.gobmk;
elif options.benchmark == 'dealII':
process = spec2k6.dealII
elif options.benchmark == 'soplex':
process = spec2k6.soplex
elif options.benchmark == 'povray':
process = spec2k6.povray
elif options.benchmark == 'calculix':
process = spec2k6.calculix
elif options.benchmark == 'hmmer':
process = spec2k6.hmmer
elif options.benchmark == 'sjeng':
process = spec2k6.sjeng
elif options.benchmark == 'GemsFDTD':
process = spec2k6.GemsFDTD
elif options.benchmark == 'libquantum':
process = spec2k6.libquantum
elif options.benchmark == 'h264ref':
process = spec2k6.h264ref
elif options.benchmark == 'tonto':
process = spec2k6.tonto
elif options.benchmark == 'lbm':
process = spec2k6.lbm
elif options.benchmark == 'omnetpp':
process = spec2k6.omnetpp
elif options.benchmark == 'astar':
process = spec2k6.astar
elif options.benchmark == 'wrf':
process = spec2k6.wrf
elif options.benchmark == 'sphinx3':
process = spec2k6.sphinx3
elif options.benchmark == 'xalancbmk':
process = spec2k6.xalancbmk
elif options.benchmark == 'specrand_i':
process = spec2k6.specrand_i
elif options.benchmark == 'specrand_f':
process = spec2k6.specrand_f
multiprocesses = []
numThreads = 1
(CPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
CPUClass.clock = '1.0GHz'
CPUClass.numThreads = numThreads
multiprocesses.append(process)
np = options.num_cpus
system = System(cpu = [CPUClass(cpu_id=i) for i in xrange(np)],
physmem = SimpleMemory(range=AddrRange("512MB")),
membus = CoherentBus(), mem_mode = test_mem_mode)
for i in xrange(np):
if options.smt:
system.cpu[i].workload = multiprocesses
elif len(multiprocesses) == 1:
system.cpu[i].workload = multiprocesses[0]
else:
system.cpu[i].workload = multiprocesses[i]
options.use_map = True
Ruby.create_system(options, system)
assert(options.num_cpus == len(system.ruby._cpu_ruby_ports))
for i in xrange(np):
ruby_port = system.ruby._cpu_ruby_ports[i]
# Create the interrupt controller and connect its ports to Ruby
system.cpu[i].createInterruptController()
# Connect the cpu's cache ports to Ruby
system.cpu[i].icache_port = ruby_port.slave
system.cpu[i].dcache_port = ruby_port.slave
if buildEnv['TARGET_ISA'] == 'x86':
system.cpu[i].interrupts.pio = ruby_port.master
system.cpu[i].interrupts.int_master = ruby_port.slave
system.cpu[i].interrupts.int_slave = ruby_port.master
system.cpu[i].itb.walker.port = ruby_port.slave
system.cpu[i].dtb.walker.port = ruby_port.slave
root = Root(full_system = False, system = system)
Simulation.run(options, root, system, FutureClass)
|
lastweek/gem5
|
configs/spec2k6/run.py
|
Python
|
bsd-3-clause
| 7,132
|
[
"GAMESS",
"Gromacs",
"NAMD"
] |
2da2a1fc82d8fdb9ef12e723d8de148248841f218a7be87104265e2c3645e461
|
# ******************************************************************************
# pysimm.system module
# ******************************************************************************
#
# ******************************************************************************
# License
# ******************************************************************************
# The MIT License (MIT)
#
# Copyright (c) 2016 Michael E. Fortunato
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function
import os
import re
import sys
import json
from xml.etree import ElementTree as Et
from random import random
from io import StringIO
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
from itertools import permutations
from math import sin, cos, sqrt, pi, acos, floor, ceil
try:
from subprocess import call
except ImportError:
call = None
try:
import numpy as np
except ImportError:
np = None
try:
import pandas as pd
except ImportError:
pd = None
from pysimm import calc
from pysimm import error_print
from pysimm import warning_print
from pysimm import verbose_print
from pysimm import debug_print
from pysimm import PysimmError
from pysimm.calc import rotate_vector
from pysimm.utils import PysimmError, Item, ItemContainer
class Particle(Item):
"""pysimm.system.Particle
Objects inheriting from :class:`~pysimm.utils.Item` can contain arbitrary data.
Keyword arguments are assigned as attributes.
Attributes usually used are given below.
Attributes:
x: x coordinate
y: y coordinate
z: z coordinate
charge: partial charge
type: :class:`~ParticleType` object reference
"""
def __init__(self, **kwargs):
Item.__init__(self, **kwargs)
def coords(self):
return (self.x, self.y, self.z)
def check(self, style='full'):
if style == 'full':
if self.x is None:
error_print('particle %s has no x coordinate' % self.tag)
return False
if self.y is None:
return False
if self.z is None:
return False
if self.charge is None:
return False
if self.type is None or not self.type.check():
return False
return True
else:
error_print('style %s not supported yet' % style)
return False
def delete_bonding(self, s):
"""pysimm.system.Particle.delete_bonding
Iterates through s.bonds, s.angles, s.dihedrals, and s.impropers and removes
those which contain this :class:`~pysimm.system.Particle`.
Args:
s: :class:`~pysimm.system.System` object from which bonding objects will be removed
Returns:
None
"""
if self.bonds:
for b in self.bonds:
if b in s.bonds:
s.bonds.remove(b.tag)
else:
for b in s.bonds:
if self is b.a or self is b.b:
s.bonds.remove(b.tag)
if self.angles:
for a in self.angles:
if a in s.angles:
s.angles.remove(a.tag)
else:
for a in s.angles:
if self is a.a or self is a.b or self is a.c:
s.angles.remove(a.tag)
if self.dihedrals:
for d in self.dihedrals:
if d in s.dihedrals:
s.dihedrals.remove(d.tag)
else:
for d in s.dihedrals:
if self is d.a or self is d.b or self is d.c or self is d.d:
s.dihedrals.remove(d.tag)
if self.impropers:
for i in self.impropers:
if i in s.impropers:
s.impropers.remove(i.tag)
else:
for i in s.impropers:
if self is i.a or self is i.b or self is i.c or self is i.d:
s.impropers.remove(i.tag)
def translate(self, dx, dy, dz):
"""pysimm.system.Particle.translate
Shifts Particle position by dx, dy, dz.
Args:
dx: distance to shift in x direction
dy: distance to shift in y direction
dz: distance to shift in z direction
Returns:
None
"""
self.x += dx
self.y += dy
self.z += dz
def __sub__(self, other):
"""pysimm.system.Particle.__sub__
Implements subtraction between :class:`~pysimm.system.Particle` objects to calculate distance.
Args:
other: :class:`~pysimm.system.Particle` object
Returns:
distance calculated by :func:`~pysimm.calc.distance`. This does not consider pbc
"""
if isinstance(other, Particle):
return calc.distance(self, other)
else:
return None
def __rsub__(self, other):
self.__sub__(other)
class ParticleType(Item):
"""pysimm.system.ParticleType
Objects inheriting from :class:`~pysimm.utils.Item` can contain arbitrary data.
Keyword arguments are assigned as attributes.
Attributes usually used are given below.
Attributes:
sigma: LJ sigma value (Angstrom)
epsilon: LJ epsilon value (kcal/mol)
elem: element abbreviation, i.e. 'H' for Hydrogen, 'Cl' for Chlorine
name: force field particle type name
"""
def __init__(self, **kwargs):
Item.__init__(self, **kwargs)
def form(self, style='lj_12-6', d_range=None):
"""pysimm.system.ParticleType.form
Returns data to plot functional form for the potential energy with
the given style.
Args:
style: string for pair style of ParticleType (lj_12-6, lj_9-6, buck)
Returns:
x, y for plotting functional form (energy vs distance)
"""
if not d_range:
d_range = np.linspace(0.1, 8, 79)
if style == 'lj_12-6':
e = np.array([calc.LJ_12_6(self, d) for d in d_range])
return d_range, e
elif style == 'lj_9-6':
e = np.array([calc.LJ_9_6(self, d) for d in d_range])
return d_range, e
elif style.startswith('buck'):
e = np.array([calc.buckingham(self, d) for d in d_range])
return d_range, e
@classmethod
def guess_style(cls, nparam):
if nparam == 2:
return 'lj'
elif nparam == 3:
return 'buck'
elif nparam == 4:
return 'charmm'
else:
raise PysimmError('Cannot guess pair style')
@classmethod
def parse_lammps(cls, line, style):
tmp = line.split('#')
data = tmp.pop(0).strip().split()
name = ','.join(re.split(',|\s+', tmp[0].strip())) if tmp else None
if style == 'mass':
if len(data) != 2:
raise PysimmError('LAMMPS data improperly formatted for mass info')
return cls(tag=int(data[0]), name=name, mass=float(data[1]))
elif style.startswith('lj') or style.startswith('class2'):
if len(data) != 3:
raise PysimmError('LAMMPS data improperly formatted for LJ style')
return cls(
tag=int(data[0]), name=name,
epsilon=float(data[1]), sigma=float(data[2])
)
elif style.startswith('charmm'):
if len(data) == 3:
return cls(
tag=int(data[0]), name=name,
epsilon=float(data[1]), sigma=float(data[2]),
epsilon_14=float(data[1]), sigma_14=float(data[2])
)
elif len(data) == 5:
return cls(
tag=int(data[0]), name=name,
epsilon=float(data[1]), sigma=float(data[2]),
epsilon_14=float(data[3]), sigma_14=float(data[4])
)
else:
raise PysimmError('LAMMPS data improperly formatted for charmm style')
elif style.startswith('buck'):
if len(data) != 4:
raise PysimmError('LAMMPS data improperly formatted for buckingham style')
return cls(
tag=int(data[0]), name=name,
a=float(data[1]), rho=float(data[2]), c=float(data[3])
)
else:
raise PysimmError('LAMMPS pair style {} not supported yet'.format(style))
def write_lammps(self, style='lj'):
"""pysimm.system.ParticleType.write_lammps
Formats a string to define particle type coefficients for a LAMMPS
data file given the provided style.
Args:
style: string for pair style of ParticleType (lj, class2, mass, buck)
Returns:
LAMMPS formatted string with pair coefficients
"""
if style.startswith('lj/charmm'):
eps14 = self.epsilon_14 if 'epsilon_14' in self.__dict__.keys() else ''
sgm14 = self.sigma_14 if 'sigma_14' in self.__dict__.keys() else ''
return '{:4}\t{}\t{}\t{}\t{}\t# {}\n'.format(
self.tag, self.epsilon, self.sigma, eps14, sgm14, self.name
)
elif style.startswith('lj'):
return '{:4}\t{}\t{}\t# {}\n'.format(
self.tag, self.epsilon, self.sigma, self.name
)
elif style.startswith('class2'):
return '{:4}\t{}\t{}\t# {}\n'.format(
self.tag, self.epsilon, self.sigma, self.name
)
elif style.startswith('mass'):
return '{:4}\t{}\t# {}\n'.format(
self.tag, self.mass, self.name
)
elif style.startswith('buck'):
return '{:4}\t{}\t{}\t{}\t# {}\n'.format(
self.tag, self.a, self.rho, self.c, self.name
)
else:
raise PysimmError('cannot understand pair style {}'.format(style))
class Bond(Item):
"""pysimm.system.Bond
Bond between particle a and b
a--b
Objects inheriting from :class:`~pysimm.utils.Item` can contain arbitrary data.
Keyword arguments are assigned as attributes.
Attributes usually used are given below.
Attributes:
a: :class:`~pysimm.system.Particle` object involved in bond
b: :class:`~pysimm.system.Particle` object involved in bond
type: BondType object reference
"""
def __init__(self, **kwargs):
Item.__init__(self, **kwargs)
def get_other_particle(self, p):
if p is not self.a and p is not self.b:
return None
else:
return self.a if p is self.b else self.b
def distance(self):
"""pysimm.system.Bond.distance
Calculates distance between :class:`~pysimm.system.Particle` a and :class:`~pysimm.system.Particle` b in this Bond object.
Sets distance to dist attribute of self. Does not consider pbc.
Args:
None
Returns:
Distance between Particle a and Particle b (not considering pbc)
"""
if isinstance(self.a, Particle) and isinstance(self.b, Particle):
self.dist = calc.distance(self.a, self.b)
return self.dist
else:
return None
class BondType(Item):
"""pysimm.system.BondType
Objects inheriting from :class:`~pysimm.utils.Item` can contain arbitrary data.
Keyword arguments are assigned as attributes.
Attributes usually used are given below.
Attributes:
k: harmonic bond force constant (kcal/mol/A^2)
r0: bond equilibrium distance (Angstrom)
name: force field bond type name
"""
def __init__(self, **kwargs):
Item.__init__(self, **kwargs)
if self.name:
self.rname = ','.join(reversed(self.name.split(',')))
@classmethod
def guess_style(cls, nparam):
if nparam == 2:
return 'harmonic'
elif nparam == 4:
return 'class2'
else:
raise PysimmError('Cannot guess bond style')
@classmethod
def parse_lammps(cls, line, style):
tmp = line.split('#')
data = tmp.pop(0).strip().split()
name = ','.join(re.split(',|\s+', tmp[0].strip())) if tmp else None
if style.startswith('harm'):
if len(data) != 3:
raise PysimmError('LAMMPS data improperly formatted for harmonic bond')
return cls(
tag=int(data[0]), name=name,
k=float(data[1]), r0=float(data[2])
)
elif style.startswith('class2'):
if len(data) != 5:
raise PysimmError('LAMMPS data improperly formatted for class2 bond')
return cls(
tag=int(data[0]), name=name,
r0=float(data[1]), k2=float(data[2]),
k3=float(data[3]), k4=float(data[4])
)
else:
raise PysimmError('LAMMPS bond style {} not supported yet'.format(style))
def write_lammps(self, style='harmonic'):
"""pysimm.system.BondType.write_lammps
Formats a string to define bond type coefficients for a LAMMPS
data file given the provided style.
Args:
style: string for pair style of BondType (harmonic, class2)
Returns:
LAMMPS formatted string with bond coefficients
"""
if style.startswith('harm'):
return '{:4}\t{}\t{}\t# {}\n'.format(
self.tag, self.k, self.r0, self.name
)
elif style.startswith('class2'):
return '{:4}\t{}\t{}\t{}\t{}\t# {}\n'.format(
self.tag, self.r0, self.k2, self.k3, self.k4, self.name
)
else:
raise PysimmError('cannot understand pair style {}'.format(style))
def form(self, style='harmonic', d_range=None):
"""pysimm.system.BondType.form
Returns data to plot functional form for the potential energy with
the given style.
Args:
style: string for pair style of BondType (harmonic, class2)
Returns:
x, y for plotting functional form (energy vs distance)
"""
if not d_range:
d_range = np.linspace(self.r0-0.5, self.r0+0.5, 100)
if style == 'harmonic':
e = np.array([calc.harmonic_bond(self, d) for d in d_range])
return d_range, e
elif style == 'class2':
e = np.array([calc.class2_bond(self, d) for d in d_range])
return d_range, e
class Angle(Item):
"""pysimm.system.Angle
Angle between particles a, b, and c
a--b--c
Objects inheriting from :class:`~pysimm.utils.Item` can contain arbitrary data.
Keyword arguments are assigned as attributes.
Attributes usually used are given below.
Attributes:
a: :class:`~pysimm.system.Particle` object involved in angle
b: :class:`~pysimm.system.Particle` object involved in angle (middle particle)
c: :class:`~pysimm.system.Particle` object involved in angle
type: AngleType object reference
"""
def __init__(self, **kwargs):
Item.__init__(self, **kwargs)
def angle(self, radians=False):
"""pysimm.system.Angle.angle
Calculate angle.
Args:
radians: True to return value in radians (default: False)
Returns:
Angle between Particle a, b, and c
"""
self.theta = calc.angle(self.a, self.b, self.c, radians)
return self.theta
class AngleType(Item):
"""pysimm.system.AngleType
Objects inheriting from :class:`~pysimm.utils.Item` can contain arbitrary data.
Keyword arguments are assigned as attributes.
Attributes usually used are given below.
Attributes:
k: harmonic angle bend force constant (kcal/mol/radian^2)
theta0: angle equilibrium value (degrees)
name: force field angle type name
"""
def __init__(self, **kwargs):
Item.__init__(self, **kwargs)
if self.name:
self.rname = ','.join(reversed(self.name.split(',')))
@classmethod
def guess_style(cls, nparam):
if nparam == 2:
return 'harmonic'
elif nparam == 4:
return 'class2'
else:
raise PysimmError('Cannot guess angle style')
@classmethod
def parse_lammps(cls, line, style):
tmp = line.split('#')
data = tmp.pop(0).strip().split()
name = ','.join(re.split(',|\s+', tmp[0].strip())) if tmp else None
if style.startswith('harm'):
if len(data) != 3:
raise PysimmError('LAMMPS data improperly formatted for harmonic angle')
return cls(
tag=int(data[0]), name=name,
k=float(data[1]), theta0=float(data[2])
)
elif style.startswith('class2'):
if len(data) != 5:
raise PysimmError('LAMMPS data improperly formatted for class2 angle')
return cls(
tag=int(data[0]), name=name,
theta0=float(data[1]), k2=float(data[2]),
k3=float(data[3]), k4=float(data[4])
)
elif style.startswith('charmm'):
if len(data) != 5:
raise PysimmError('LAMMPS data improperly formatted for harmonic angle')
return cls(
tag=int(data[0]), name=name,
k=float(data[1]), theta0=float(data[2]),
k_ub=float(data[3]), r_ub=float(data[4])
)
else:
raise PysimmError('LAMMPS angle style {} not supported yet'.format(style))
def write_lammps(self, style='harmonic', cross_term=None):
"""pysimm.system.AngleType.write_lammps
Formats a string to define angle type coefficients for a LAMMPS
data file given the provided style.
Args:
style: string for pair style of AngleType (harmonic, class2, charmm)
cross_term: type of class2 cross term to write (default=None)
- BondBond
- BondAngle
Returns:
LAMMPS formatted string with angle coefficients
"""
if style.startswith('harm'):
return '{:4}\t{}\t{}\t# {}\n'.format(
self.tag, self.k, self.theta0, self.name
)
elif style.startswith('class2'):
if not cross_term:
return '{:4}\t{}\t{}\t{}\t{}\t# {}\n'.format(
self.tag, self.theta0, self.k2, self.k3, self.k4, self.name
)
elif cross_term == 'BondBond':
return '{:4}\t{}\t{}\t{}\t# {}\n'.format(
self.tag, self.m, self.r1, self.r2, self.name
)
elif cross_term == 'BondAngle':
return '{:4}\t{}\t{}\t{}\t{}\t# {}\n'.format(
self.tag, self.n1, self.n2, self.r1, self.r2, self.name
)
elif style.startswith('charmm'):
return '{:4}\t{}\t{}\t{}\t{}\t# {}\n'.format(
self.tag, self.k, self.theta0, self.k_ub, self.r_ub, self.name
)
else:
raise PysimmError('cannot understand pair style {}'.format(style))
def form(self, style='harmonic', d_range=None):
"""pysimm.system.AngleType.form
Returns data to plot functional form for the potential energy with
the given style.
Args:
style: string for pair style of AngleType (harmonic, class2, charmm)
Returns:
x, y for plotting functional form (energy vs angle)
"""
if not d_range:
d_range = np.linspace(self.theta0-1, self.theta0+1, 100)
if style == 'harmonic':
e = np.array([calc.harmonic_angle(self, d) for d in d_range])
return d_range, e
elif style == 'charmm':
e = np.array([calc.harmonic_angle(self, d) for d in d_range])
return d_range, e
elif style == 'class2':
e = np.array([calc.class2_angle(self, d) for d in d_range])
return d_range, e
class Dihedral(Item):
"""pysimm.system.Dihedral
Dihedral between particles a, b, c, and d
a--b--c--d
Objects inheriting from :class:`~pysimm.utils.Item` can contain arbitrary data.
Keyword arguments are assigned as attributes.
Attributes usually used are given below.
Attributes:
a: :class:`~pysimm.system.Particle` object involved in dihedral
b: :class:`~pysimm.system.Particle` object involved in dihedral (middle particle)
c: :class:`~pysimm.system.Particle` object involved in dihedral (middle particle)
d: :class:`~pysimm.system.Particle` object involved in dihedral
type: :class:`~pysimm.system.DihedralType` object reference
"""
def __init__(self, **kwargs):
Item.__init__(self, **kwargs)
class DihedralType(Item):
"""pysimm.system.DihedralType
Objects inheriting from :class:`~pysimm.utils.Item` can contain arbitrary data.
Keyword arguments are assigned as attributes.
Attributes usually used are given below.
Attributes:
k: dihedral energy barrier (kcal/mol)
d: minimum (+1 or -1)
n: multiplicity (integer >= 0)
name: force field dihedral type name
"""
def __init__(self, **kwargs):
Item.__init__(self, **kwargs)
if self.name:
self.rname = ','.join(reversed(self.name.split(',')))
@classmethod
def guess_style(cls, nparam):
if nparam == 3:
return 'harmonic'
elif nparam % 3 == 1:
return 'fourier'
elif nparam == 6:
return 'class2'
else:
raise PysimmError('Cannot guess dihedral style')
@classmethod
def parse_lammps(cls, line, style):
tmp = line.split('#')
data = tmp.pop(0).strip().split()
name = ','.join(re.split(',|\s+', tmp[0].strip())) if tmp else None
if style.startswith('harm'):
if len(data) != 4:
raise PysimmError('LAMMPS data improperly formatted for harmonic dihedral')
return cls(
tag=int(data[0]), name=name,
k=float(data[1]), d=int(data[2]), n=int(data[3])
)
elif style.startswith('fourier'):
if len(data) % 3 != 2:
raise PysimmError('LAMMPS data improperly formatted for fourier dihedral')
tag = int(data.pop(0))
m = int(data.pop(0))
k = []
n = []
d = []
for i in range(m):
k.append(data.pop(0))
n.append(data.pop(0))
d.append(data.pop(0))
return cls(
tag=tag, name=name,
m=m, k=list(map(float, k)), n=list(map(int, n)), d=list(map(float, d))
)
elif style.startswith('class2'):
if len(data) != 7:
raise PysimmError('LAMMPS data improperly formatted for class2 dihedral')
return cls(
tag=int(data[0]), name=name,
k1=float(data[1]), phi1=float(data[2]),
k2=float(data[3]), phi2=float(data[4]),
k3=float(data[5]), phi3=float(data[6]),
)
elif style.startswith('charmm'):
if len(data) != 5:
raise PysimmError('LAMMPS data improperly formatted for charmm dihedral')
return cls(
tag=int(data[0]), name=name,
k=float(data[1]), n=float(data[2]),
d=float(data[3]), w=float(data[4])
)
elif style.startswith('opls'):
if len(data) != 5:
raise PysimmError('LAMMPS data improperly formatted for opls dihedral')
return cls(
tag=int(data[0]), name=name,
k1=float(data[1]), k2=float(data[2]),
k3=float(data[3]), k4=float(data[4])
)
else:
raise PysimmError('LAMMPS dihedral style {} not supported yet'.format(style))
def write_lammps(self, style='harmonic', cross_term=None):
"""pysimm.system.DihedralType.write_lammps
Formats a string to define dihedral type coefficients for a LAMMPS
data file given the provided style.
Args:
style: string for pair style of DihedralType (harmonic, class2, fourier)
cross_term: type of class2 cross term to write (default=None)
- MiddleBond
- EndBond
- Angle
- AngleAngle
- BondBond13
Returns:
LAMMPS formatted string with dihedral coefficients
"""
if style.startswith('harm'):
return '{:4}\t{:f}\t{:d}\t{:d}\t# {}\n'.format(
self.tag, self.k, int(self.d), int(self.n), self.name
)
elif style.startswith('charmm'):
return '{:4}\t{:f}\t{:d}\t{:d}\t{:f}\t# {}\n'.format(
self.tag, self.k, int(self.n), int(self.d), self.w, self.name
)
elif style.startswith('opls'):
return '{:4}\t{:f}\t{:f}\t{:f}\t{:f}\t# {}\n'.format(
self.tag, self.k1, self.k2, self.k3, self.k4, self.name
)
elif style.startswith('fourier'):
st = '{:4}\t{:d}'.format(self.tag, self.m)
for k, n, d in zip(self.k, self.n, self.d):
st += '\t{}\t{:d}\t{}'.format(k, int(n), d)
st += '\t# {}\n'.format(self.name)
return st
elif style.startswith('class2'):
if not cross_term:
return '{:4}\t{}\t{}\t{}\t{}\t{}\t{}\t# {}\n'.format(
self.tag,
self.k1, self.phi1,
self.k2, self.phi2,
self.k3, self.phi3,
self.name
)
elif cross_term == 'MiddleBond':
return '{:4}\t{}\t{}\t{}\t{}\t# {}\n'.format(
self.tag,
self.a1, self.a2, self.a3, self.r2,
self.name
)
elif cross_term == 'EndBond':
return '{:4}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t# {}\n'.format(
self.tag,
self.b1, self.b2, self.b3,
self.c1, self.c2, self.c3,
self.r1, self.r3,
self.name
)
elif cross_term == 'Angle':
return '{:4}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t# {}\n'.format(
self.tag,
self.d1, self.d2, self.d3,
self.e1, self.e2, self.e3,
self.theta1, self.theta2,
self.name
)
elif cross_term == 'AngleAngle':
return '{:4}\t{}\t{}\t{}\t# {}\n'.format(
self.tag,
self.m,
self.theta1, self.theta2,
self.name
)
elif cross_term == 'BondBond13':
if self.n is None:
self.n = 0.0
return '{:4}\t{}\t{}\t{}\t# {}\n'.format(
self.tag,
self.n,
self.r1, self.r3,
self.name
)
else:
raise PysimmError('cannot understand pair style {}'.format(style))
def form(self, style='harmonic', d_range=None):
"""pysimm.system.DihedralType.form
Returns data to plot functional form for the potential energy with
the given style.
Args:
style: string for pair style of DihedralType (harmonic, class2, fourier)
Returns:
x, y for plotting functional form (energy vs angle)
"""
if not d_range:
d_range = np.linspace(-180, 180, 100)
if style == 'harmonic':
e = np.array([calc.harmonic_dihedral(self, d) for d in d_range])
return d_range, e
elif style == 'fourier':
e = np.array([calc.fourier_dihedral(self, d) for d in d_range])
return d_range, e
elif style == 'class2':
e = np.array([calc.class2_dihedral(self, d) for d in d_range])
return d_range, e
elif style == 'opls':
e = np.array([calc.opls_dihedral(self, d) for d in d_range])
return d_range, e
class Improper(Item):
"""pysimm.system.Improper
Improper dihedral around particle a, bonded to b, c, and d
| b
| |
| a--d
| |
| c
Objects inheriting from :class:`~pysimm.utils.Item` can contain arbitrary data.
Keyword arguments are assigned as attributes.
Attributes usually used are given below.
Attributes:
a: :class:`~pysimm.system.Particle` object involved in improper (middle particle)
b: :class:`~pysimm.system.Particle` object involved in improper
c: :class:`~pysimm.system.Particle` object involved in improper
d: :class:`~pysimm.system.Particle` object involved in improper
type: :class:`~pysimm.system.ImproperType` object reference
"""
def __init__(self, **kwargs):
Item.__init__(self, **kwargs)
class ImproperType(Item):
"""pysimm.system.ImproperType
Objects inheriting from :class:`~pysimm.utils.Item` can contain arbitrary data.
Keyword arguments are assigned as attributes.
Attributes usually used are given below.
Attributes:
k: improper energy barrier (kcal/mol)
x0: equilibrium value (degrees)
name: force field improper type name
"""
def __init__(self, **kwargs):
Item.__init__(self, **kwargs)
if self.name:
self.rname = ','.join(reversed(self.name.split(',')))
@classmethod
def guess_style(cls, nparam):
if nparam == 2:
return 'harmonic'
if nparam == 3:
return 'cvff'
else:
raise PysimmError('Cannot guess improper style')
@classmethod
def parse_lammps(cls, line, style):
tmp = line.split('#')
data = tmp.pop(0).strip().split()
name = ','.join(re.split(',|\s+', tmp[0].strip())) if tmp else None
if style.startswith('harm') or style.startswith('class2') or style.startswith('umbrella'):
if len(data) != 3:
raise PysimmError('LAMMPS data improperly formatted for harmonic improper')
return cls(
tag=int(data[0]), name=name,
k=float(data[1]), x0=float(data[2])
)
elif style.startswith('cvff'):
if len(data) != 4:
raise PysimmError('LAMMPS data improperly formatted for harmonic improper')
return cls(
tag=int(data[0]), name=name,
k=float(data[1]), d=int(data[2]), n=int(data[3])
)
else:
raise PysimmError('LAMMPS improper style {} not supported yet'.format(style))
def write_lammps(self, style='harmonic', cross_term=None):
"""pysimm.system.ImproperType.write_lammps
Formats a string to define improper type coefficients for a LAMMPS
data file given the provided style.
Args:
style: string for pair style of ImproperType (harmonic, class2, cvff)
cross_term: type of class2 cross term to write (default=None)
- AngleAngle
Returns:
LAMMPS formatted string with dihedral coefficients
"""
if style.startswith('harmonic'):
return '{:4}\t{}\t{}\t# {}\n'.format(
self.tag, self.k, self.x0, self.name
)
elif style.startswith('umbrella'):
return '{:4}\t{}\t{}\t# {}\n'.format(
self.tag, self.k, self.x0, self.name
)
elif style.startswith('cvff'):
return '{:4}\t{}\t{}\t{}\t# {}\n'.format(
self.tag, self.k, self.d, self.n, self.name
)
elif style.startswith('class2'):
if self.k is None:
self.k = 0.0
if self.x0 is None:
self.x0 = 0.0
if not cross_term:
return '{:4}\t{}\t{}\t# {}\n'.format(
self.tag, self.k, self.x0, self.name
)
elif cross_term == 'AngleAngle':
return '{:4}\t{}\t{}\t{}\t{}\t{}\t{}\t# {}\n'.format(
self.tag,
self.m1, self.m2, self.m3,
self.theta1, self.theta2, self.theta3,
self.name
)
else:
raise PysimmError('cannot understand pair style {}'.format(style))
def form(self, style='harmonic', d_range=None):
"""pysimm.system.ImproperType.form
Returns data to plot functional form for the potential energy with
the given style.
Args:
style: string for pair style of ImproperType (harmonic, cvff)
Returns:
x, y for plotting functional form (energy vs angle)
"""
if not d_range:
d_range = np.linspace(-2, 2, 100)
if style == 'harmonic':
e = np.array([calc.harmonic_improper(self, d) for d in d_range])
return d_range, e
elif style == 'cvff':
e = np.array([calc.cvff_improper(self, d) for d in d_range])
return d_range, e
elif style == 'umbrella':
e = np.array([calc.umbrella_improper(self, d) for d in d_range])
return d_range, e
class Dimension(Item):
"""pysimm.system.Dimension
Objects inheriting from :class:`~pysimm.utils.Item` can contain arbitrary data.
Keyword arguments are assigned as attributes.
Attributes usually used are given below.
Attributes:
xlo: minimum value in x dimension
xhi: maximum value in x dimension
ylo: minimum value in y dimension
yhi: maximum value in y dimension
zlo: minimum value in z dimension
zhi: maximum value in z dimension
dx: distance in x dimension
dy: distance in y dimension
dz: distance in z dimension
"""
def __init__(self, **kwargs):
center = kwargs.get('center')
Item.__init__(self, **kwargs)
if center:
self.translate(*center)
del self.center
def check(self):
if self.dx is not None and self.dy is not None and self.dz is not None:
return True
else:
return False
def size(self):
return (self.dx, self.dy, self.dz)
def translate(self, x, y, z):
"""pysimm.system.Dimension.translate
Shifts box bounds by x, y, z.
Args:
x: distance to shift box bounds in x direction
y: distance to shift box bounds in y direction
z: distance to shift box bounds in z direction
Returns:
None
"""
self.xlo += x
self.xhi += x
self.ylo += y
self.yhi += y
self.zlo += z
self.zhi += z
@property
def dx(self):
if self.xhi is None or self.xlo is None:
return None
else:
return self.xhi-self.xlo
@dx.setter
def dx(self, dx):
if dx is None:
return
center = 0
if self.xlo is not None and self.xhi is not None:
center = float(self.xhi + self.xlo)/2
self.xlo = center - float(dx)/2
self.xhi = center + float(dx)/2
@property
def dy(self):
if self.yhi is None or self.ylo is None:
return None
else:
return self.yhi-self.ylo
@dy.setter
def dy(self, dy):
if dy is None:
return
center = 0
if self.ylo is not None and self.yhi is not None:
center = float(self.yhi + self.ylo)/2
self.ylo = center - float(dy)/2
self.yhi = center + float(dy)/2
@property
def dz(self):
if self.zhi is None or self.zlo is None:
return None
else:
return self.zhi-self.zlo
@dz.setter
def dz(self, dz):
if dz is None:
return
center = 0
if self.zlo is not None and self.zhi is not None:
center = float(self.zhi + self.zlo)/2
self.zlo = center - float(dz)/2
self.zhi = center + float(dz)/2
class System(object):
"""pysimm.system.System
Object representation of molecular system.
Contains information required for molecular simulation.
Attributes:
dim: Dimension object reference
particles: :class:`~pysimm.utils.ItemContainer` for Particle organization
particle_types: :class:`~pysimm.utils.ItemContainer` for ParticleType organization
bonds: :class:`~pysimm.utils.ItemContainer` for Bond organization
bond_types: :class:`~pysimm.utils.ItemContainer` for BondType organization
angles: :class:`~pysimm.utils.ItemContainer` for Angle organization
angle_types: :class:`~pysimm.utils.ItemContainer` for AngleType organization
dihedrals: :class:`~pysimm.utils.ItemContainer` for Dihedral organization
dihedral_types: :class:`~pysimm.utils.ItemContainer` for DihedralType organization
impropers: :class:`~pysimm.utils.ItemContainer` for Improper organization
improper_types: :class:`~pysimm.utils.ItemContainer` for ImproperType organization
molecules: :class:`~pysimm.utils.ItemContainer` for Molecule organization
"""
def __init__(self, **kwargs):
self.objectified = False
self.name = kwargs.get('name', 'pySIMM System Object')
self.ff_class = kwargs.get('ff_class')
self.forcefield = kwargs.get('forcefield')
self.dim = Dimension(xlo=kwargs.get('xlo'), xhi=kwargs.get('xhi'),
ylo=kwargs.get('ylo'), yhi=kwargs.get('yhi'),
zlo=kwargs.get('zlo'), zhi=kwargs.get('zhi'),
dx=kwargs.get('dx'), dy=kwargs.get('dy'),
dz=kwargs.get('dz'), center=kwargs.get('center'))
self.dim_check = self.dim.check()
self.mass = kwargs.get('mass', 0.0)
self.particle_types = kwargs.get('particle_types', ItemContainer())
self.bond_types = kwargs.get('bond_types', ItemContainer())
self.angle_types = kwargs.get('angle_types', ItemContainer())
self.dihedral_types = kwargs.get('dihedral_types', ItemContainer())
self.improper_types = kwargs.get('improper_types', ItemContainer())
self.molecule_types = kwargs.get('molecule_types', ItemContainer())
self.particles = kwargs.get('particles', ItemContainer())
self.bonds = kwargs.get('bonds', ItemContainer())
self.angles = kwargs.get('angles', ItemContainer())
self.dihedrals = kwargs.get('dihedrals', ItemContainer())
self.impropers = kwargs.get('impropers', ItemContainer())
self.molecules = kwargs.get('molecules', ItemContainer())
self.write_coeffs = kwargs.get('write_coeffs', True)
self.set_mass()
self.set_volume()
self.set_density()
self.set_cog()
def __getattr__(self, name):
return None
def copy(self, rotate_x=None, rotate_y=None, rotate_z=None,
dx=0, dy=0, dz=0):
"""pysimm.system.System.copy
Create duplicate :class:`~pysimm.system.System` object. Default behavior does not modify particle positions.
Args:
rotate_x: rotate duplicate system around x axis by this value (radians)
rotate_y: rotate duplicate system around y axis by this value (radians)
rotate_z: rotate duplicate system around z axis by this value (radians)
dx: translate duplicate system in x dimension by this value (Angstrom)
dy: translate duplicate system in y dimension by this value (Angstrom)
dz: translate duplicate system in z dimension by this value (Angstrom)
"""
new = System()
new.ff_class = self.ff_class
new.forcefield = self.forcefield
new.pair_style = self.pair_style
new.bond_style = self.bond_style
new.angle_style = self.angle_style
new.dihedral_style = self.dihedral_style
new.improper_style = self.improper_style
new.dim = self.dim.copy()
for _ in self.molecules:
new.molecules.add(Molecule(tag=_.tag))
for pt in self.particle_types:
new.particle_types.add(pt.copy())
for bt in self.bond_types:
new.bond_types.add(bt.copy())
for at in self.angle_types:
new.angle_types.add(at.copy())
for dt in self.dihedral_types:
new.dihedral_types.add(dt.copy())
for it in self.improper_types:
new.improper_types.add(it.copy())
for p in self.particles:
new_p = p.copy()
if p.type:
new_p.type = new.particle_types[p.type.tag]
new_p.molecule = new.molecules[p.molecule.tag]
if rotate_x or rotate_y or rotate_z:
new_p.x, new_p.y, new_p.z = rotate_vector(new_p.x, new_p.y, new_p.z,
rotate_x, rotate_y, rotate_z)
new_p.x += dx
new_p.y += dy
new_p.z += dz
new.particles.add(new_p)
new_p.molecule.particles.add(new_p)
new_p.bonds = ItemContainer()
new_p.angles = ItemContainer()
new_p.dihedrals = ItemContainer()
new_p.impropers = ItemContainer()
for b in self.bonds:
new_b = b.copy()
new_b.a = new.particles[b.a.tag]
new_b.b = new.particles[b.b.tag]
if b.type:
new_b.type = new.bond_types[b.type.tag]
new.bonds.add(new_b)
new_b.a.molecule.bonds.add(new_b)
new_b.a.bonds.add(new_b)
new_b.b.bonds.add(new_b)
for a in self.angles:
new_a = Angle(a=new.particles[a.a.tag],
b=new.particles[a.b.tag],
c=new.particles[a.c.tag])
if a.type:
new_a.type=new.angle_types[a.type.tag]
new.angles.add(new_a)
new_a.a.molecule.angles.add(new_a)
for d in self.dihedrals:
new_d = Dihedral(a=new.particles[d.a.tag],
b=new.particles[d.b.tag],
c=new.particles[d.c.tag],
d=new.particles[d.d.tag])
if d.type:
new_d.type=new.dihedral_types[d.type.tag]
new.dihedrals.add(new_d)
new_d.a.molecule.dihedrals.add(new_d)
for i in self.impropers:
new_i = Improper(a=new.particles[i.a.tag],
b=new.particles[i.b.tag],
c=new.particles[i.c.tag],
d=new.particles[i.d.tag])
if i.type:
new_i.type = new.improper_types[i.type.tag]
new.impropers.add(new_i)
new_i.a.molecule.impropers.add(new_i)
for k, v in vars(self).items():
if not isinstance(v, ItemContainer) and not isinstance(v, Item):
setattr(new, k, v)
return new
def add(self, other, **kwargs):
"""pysimm.system.System.add
Add other :class:`~pysimm.system.System` to this. Optionally remove duplicate types (default behavior).
Args:
other: :class:`~pysimm.system.System` object to add
unique_types (optional): Remove duplicate types and reassign references to existing types (True)
change_dim (optional): Update :class:`~pysimm.system.Dimension` object so that :class:`~pysimm.system.Particle` objects do not exist
outside of :class:`~pysimm.system.Dimension` extremes (True)
update_properties (optional): Update system-wide mass, volume, density, center of gravity, and velocity
properties (True)
"""
unique_types = kwargs.get('unique_types', True)
change_dim = kwargs.get('change_dim', True)
update_properties = kwargs.get('update_properties', True)
for pt in other.particle_types:
if unique_types:
if pt.name not in [x.name for x in self.particle_types]:
del pt.tag
self.particle_types.add(pt)
else:
del pt.tag
self.particle_types.add(pt)
for bt in other.bond_types:
if unique_types:
if bt.name not in [x.name for x in self.bond_types]:
del bt.tag
self.bond_types.add(bt)
else:
del bt.tag
self.bond_types.add(bt)
for at in other.angle_types:
if unique_types:
if at.name not in [x.name for x in self.angle_types]:
del at.tag
self.angle_types.add(at)
else:
del at.tag
self.angle_types.add(at)
for dt in other.dihedral_types:
if unique_types:
if dt.name not in [x.name for x in self.dihedral_types]:
del dt.tag
self.dihedral_types.add(dt)
else:
del dt.tag
self.dihedral_types.add(dt)
for it in other.improper_types:
if unique_types:
if it.name not in [x.name for x in self.improper_types]:
del it.tag
self.improper_types.add(it)
else:
del it.tag
self.improper_types.add(it)
for p in other.particles:
del p.tag
if change_dim:
self.dim.xhi = max(p.x, self.dim.xhi)
self.dim.xlo = min(p.x, self.dim.xlo)
self.dim.yhi = max(p.y, self.dim.yhi)
self.dim.ylo = min(p.y, self.dim.ylo)
self.dim.zhi = max(p.z, self.dim.zhi)
self.dim.zlo = min(p.z, self.dim.zlo)
if unique_types and p.type not in self.particle_types:
pt = self.particle_types.get(p.type.name)
if not pt or len(pt) > 1:
error_print('ParticleType error')
else:
p.type = pt[0]
self.particles.add(p)
for b in other.bonds:
del b.tag
if unique_types and b.type not in self.bond_types:
bt = self.bond_types.get(b.type.name)
if not bt or len(bt) > 1:
error_print('BondType error')
else:
b.type = bt[0]
self.bonds.add(b)
for a in other.angles:
del a.tag
if unique_types and a.type not in self.angle_types:
at = self.angle_types.get(a.type.name)
if not at or len(at) > 1:
error_print('AngleType error')
else:
a.type = at[0]
self.angles.add(a)
for d in other.dihedrals:
del d.tag
if unique_types and d.type not in self.dihedral_types:
dt = self.dihedral_types.get(d.type.name)
if not dt:
error_print('DihedralType error')
elif len(dt) > 1:
index = 0
x = 5
for i in range(len(dt)):
if dt[i].name.count('X') < x:
index = i
x = dt[i].name.count('X')
d.type = dt[index]
else:
d.type = dt[0]
self.dihedrals.add(d)
for i in other.impropers:
del i.tag
if unique_types and i.type not in self.improper_types:
it = self.improper_types.get(i.type.name)
if not it:
error_print('ImproperType error')
else:
i.type = it[0]
self.impropers.add(i)
for m in other.molecules:
del m.tag
self.molecules.add(m)
p_list = m.particles.get('all')
m.particles.remove('all')
for p in p_list:
m.particles.add(p)
if update_properties:
self.set_mass()
self.set_volume()
self.set_density()
self.set_cog()
self.set_velocity()
def distance(self, p1, p2):
"""pysimm.system.System.distance
Calculate distance between two particles considering pbc.
Args:
p1: :class:`~pysimm.system.Particle` object
p2: :class:`~pysimm.system.Particle` object
Returns:
distance between particles considering pbc
"""
return calc.pbc_distance(self, p1, p2)
def wrap(self):
"""pysimm.system.System.wrap
Wrap :class:`~pysimm.system.Particle` images into box defined by :class:`~pysimm.system.Dimension` object.
Ensure particles are contained within simulation box.
Args:
None
Returns:
None
"""
self.dim.check()
for p in self.particles:
while p.x > self.dim.xhi:
p.x -= self.dim.dx
while p.x < self.dim.xlo:
p.x += self.dim.dx
while p.y > self.dim.yhi:
p.y -= self.dim.dy
while p.y < self.dim.ylo:
p.y += self.dim.dy
while p.z > self.dim.zhi:
p.z -= self.dim.dz
while p.z < self.dim.zlo:
p.z += self.dim.dz
def unwrap(self):
"""pysimm.system.System.unwrap()
Unwraps :class:`~pysimm.system.Particle` images such that no bonds cross box edges.
Args:
None
Returns:
None
"""
self.dim.check()
self.add_particle_bonding()
next_to_unwrap = []
for p in self.particles:
p.unwrapped = False
for m in self.molecules:
for p0 in m.particles:
p0.unwrapped = True
next_to_unwrap.append(p0)
for p in next_to_unwrap:
for pb in p.bonded_to:
if pb.unwrapped:
continue
next_to_unwrap.append(pb)
pb.unwrapped = True
dx = p.x - pb.x
while abs(dx) > self.dim.dx / 2:
if dx > 0:
pb.x += self.dim.dx
else:
pb.x -= self.dim.dx
dx = p.x - pb.x
dy = p.y - pb.y
while abs(dy) > self.dim.dy / 2:
if dy > 0:
pb.y += self.dim.dy
else:
pb.y -= self.dim.dy
dy = p.y - pb.y
dz = p.z - pb.z
while abs(dz) > self.dim.dz / 2:
if dz > 0:
pb.z += self.dim.dz
else:
pb.z -= self.dim.dz
dz = p.z - pb.z
for b in self.bonds:
if b.distance() > 5:
print('unwrap probably failed')
return False
return True
def particles_df(self, columns=['tag', 'x', 'y', 'z', 'q'], index='tag', extras=[]):
if pd is None:
raise PysimmError('pysimm.system.System.particles_df function requires pandas')
data = [{c: getattr(p, c) for c in columns} for p in self.particles]
if extras:
for d in data:
if 'type.name' in extras:
d['type.name'] = self.particles[d['tag']].type.name
if 'type.tag' in extras:
d['type.tag'] = self.particles[d['tag']].type.tag
df = pd.DataFrame(data=data)
if index in columns:
df = df.set_index(index)
return df
def unite_atoms(self):
for p in self.particles:
p.implicit_h = 0
if p.elem != 'C':
continue
for b in p.bonds:
pb = b.a if b.b is p else b.b
if pb.elem =='H':
p.implicit_h += 1
p.charge += pb.charge
self.particles.remove(pb.tag, update=False)
self.remove_spare_bonding()
def quality(self, tolerance=0.1):
"""pysimm.system.System.quality
Attemps to assess quality of :class:`~pysimm.system.System` based on bond lengths in unwrapped system.
Args:
tolerance: fractional value of equilibrium bond length that is acceptable
Returns:
number of bonds in system outside tolerance
"""
self.unwrap()
bad_bonds = 0
for b in self.bonds:
if b.distance() > b.type.r0*(1+tolerance) or b.distance() < b.type.r0*(1-tolerance):
bad_bonds += 1
verbose_print('%s of %s bonds found to be outside of tolerance' % (bad_bonds, self.bonds.count))
self.wrap()
return bad_bonds
def shift_to_origin(self):
"""pysimm.system.System.shift_to_origin
Shifts simulation box to begin at origin. i.e. xlo=ylo=zlo=0
Args:
None
Returns:
None
"""
for p in self.particles:
p.x -= self.dim.xlo
p.y -= self.dim.ylo
p.z -= self.dim.zlo
self.dim.xhi -= self.dim.xlo
self.dim.yhi -= self.dim.ylo
self.dim.zhi -= self.dim.zlo
self.dim.xlo -= self.dim.xlo
self.dim.ylo -= self.dim.ylo
self.dim.zlo -= self.dim.zlo
def set_charge(self):
"""pysimm.system.System.set_charge
Sets total charge of all :class:`~pysimm.system.Particle` objects in System.particles
Args:
None
Returns:
None
"""
self.charge = 0
for p in self.particles:
self.charge += p.charge
def zero_charge(self):
"""pysimm.system.System.zero_charge
Enforces total :class:`~pysimm.system.System` charge to be 0.0 by subtracting excess charge from last particle
Args:
None:
Returns:
None
"""
charge = 0.
for p in self.particles:
charge += p.charge
if charge != 0:
p.charge -= charge
self.set_charge()
def check_items(self):
"""pysimm.system.System.check_items
Checks particles, bonds, angles, dihedrals, impropers, and molecules containers and raises exception if the length of items in the container does not equal the count property
Args:
None:
Returns:
None
"""
if len(self.particles) != self.particles.count:
raise PysimmError('particles missing')
if len(self.bonds) != self.bonds.count:
raise PysimmError('bonds missing')
if len(self.angles) != self.angles.count:
raise PysimmError('angles missing')
if len(self.dihedrals) != self.dihedrals.count:
raise PysimmError('dihedrals missing')
if len(self.impropers) != self.impropers.count:
raise PysimmError('impropers missing')
if len(self.molecules) != self.molecules.count:
raise PysimmError('molecules missing')
def update_ff_types_from_ac(self, ff, acname):
"""pysimm.system.System.update_ff_types_from_ac
Updates :class:`~pysimm.system.ParticleType` objects in system using type names given in antechamber (ac) file. Retrieves type from System if possible, then searches force field provided by ff.
Args:
ff: forcefield to search for Type objects
acname: ac filename containing type names
Returns:
None
"""
self.particle_types.remove('all')
with open(acname) as f:
for line in f:
if line.startswith('ATOM'):
tag = int(line.split()[1])
tname = line.split()[-1]
s_pt = self.particle_types.get(tname)
if not s_pt:
s_pt = ff.particle_types.get(tname)
if not s_pt:
error_print('cannot find type with name {}'.format(tname))
self.particle_types.add(s_pt[0].copy())
self.particles[tag].type = self.particle_types.get(tname)[0]
def update_particle_types_from_forcefield(self, f):
"""pysimm.system.System.update_types_from_forcefield
Updates :class:`~pysimm.system.ParticleType` data from :class:`~pysimm.forcefield.Forcefield` object f based on :class:`~pysimm.system.ParticleType`.name
Args:
f: :class:`~pysimm.forcefield.Forcefield` object reference
Returns:
None
"""
for pt in self.particle_types:
name_ = pt.name.split('@')[-1]
linker = False
if pt.name.find('@') >= 0:
linker = pt.name.split('@')[0]
pt_ = f.particle_types.get(name_)
if pt_:
new = pt_[0].copy()
new.tag = pt.tag
if linker:
new.name = '%s@%s' % (linker, new.name)
self.particle_types.remove(pt.tag)
self.particle_types.add(new)
def make_linker_types(self):
"""pysimm.system.System.make_linker_types
Identifies linker particles and creates duplicate :class:`~pysimm.system.ParticleType objects with new names.
Identification is performed by :class:`~pysimm.system.Particle`.linker attribute.
New :class:`~pysimm.system.ParticleType` name is prepended with [H or T]L@ to designate head or tail linker
Args:
None
Returns:
None
"""
for p in self.particles:
if p.linker == 'head':
head_linker = self.particle_types.get('HL@%s' % p.type.name)
if head_linker:
p.type = head_linker[0]
else:
p.type = p.type.copy()
p.type.name = 'HL@%s' % p.type.name
self.particle_types.add(p.type)
elif p.linker == 'tail':
tail_linker = self.particle_types.get('TL@%s' % p.type.name)
if tail_linker:
p.type = tail_linker[0]
else:
p.type = p.type.copy()
p.type.name = 'TL@%s' % p.type.name
self.particle_types.add(p.type)
elif p.linker:
linker = self.particle_types.get('L@%s' % p.type.name)
if linker:
p.type = linker[0]
else:
p.type = p.type.copy()
p.type.name = 'L@%s' % p.type.name
self.particle_types.add(p.type)
def remove_linker_types(self):
"""pysimm.system.System.remove_linker_types
Reassigns :class:`~pysimm.system.Particle`.type references to original :class:`~pysimm.system.ParticleType` objects without linker prepend
Args:
None
Returns:
None
"""
for p in self.particles:
if p.type.name.find('@') >= 0:
pt = self.particle_types.get(p.type.name.split('@')[-1])
if pt:
p.type = pt[0]
else:
print('cannot find regular type for linker %s'
% p.type.name)
def read_lammps_dump(self, fname):
"""pysimm.system.System.read_lammps_dump
Updates particle positions and box size from LAMMPS dump file.
Assumes following format for each atom line:
tag charge xcoord ycoord zcoord xvelocity yvelocity zvelocity
Args:
fname: LAMMPS dump file
Returns:
None
"""
nparticles = 0
with open(fname) as f:
line = f.readline()
while line:
if len(line.split()) > 1 and line.split()[1] == 'NUMBER':
nparticles = int(f.readline())
elif len(line.split()) > 1 and line.split()[1] == 'BOX':
self.dim.xlo, self.dim.xhi = map(float, f.readline().split())
self.dim.ylo, self.dim.yhi = map(float, f.readline().split())
self.dim.zlo, self.dim.zhi = map(float, f.readline().split())
self.set_volume()
self.set_density()
elif len(line.split()) > 1 and line.split()[1] == 'ATOMS':
for i in range(nparticles):
tag, q, x, y, z, vx, vy, vz = map(float, f.readline().split())
tag = int(tag)
if self.particles[tag]:
p = self.particles[tag]
p.charge = q
p.x = x
p.vx = vx
p.y = y
p.vy = vy
p.z = z
p.vz = vz
line = f.readline()
def read_lammpstrj(self, trj, frame=1):
"""pysimm.system.System.read_lammpstrj
Updates particle positions and box size from LAMMPS trajectory file at given frame.
Assumes one of following formats for each atom line:
tag xcoord ycoord zcoord
OR
tag type_id xcoord ycoord zcoord
OR
tag type_id xcoord ycoord zcoord ximage yimage zimage
Args:
trj: LAMMPS trajectory file
frame: sequential frame number (not LAMMPS timestep) default=1
Returns:
None
"""
t_frame = 0
nparticles = 0
updated = 0
with open(trj) as f:
line = f.readline()
while line:
if len(line.split()) > 1 and line.split()[1] == 'TIMESTEP':
t_frame += 1
elif len(line.split()) > 1 and line.split()[1] == 'NUMBER':
nparticles = int(f.readline())
elif (len(line.split()) > 1 and line.split()[1] == 'BOX' and
t_frame == frame):
self.dim.xlo, self.dim.xhi = map(float,
f.readline().split())
self.dim.ylo, self.dim.yhi = map(float,
f.readline().split())
self.dim.zlo, self.dim.zhi = map(float,
f.readline().split())
elif (len(line.split()) > 1 and line.split()[1] == 'ATOMS' and
t_frame == frame):
for i in range(nparticles):
line = f.readline().split()
if len(line) == 4:
id_, x, y, z = map(float, line)
elif len(line) == 5:
id_, type_, x, y, z = map(float, line)
elif len(line) == 8:
id_, type_, x, y, z, ix, iy, iz = map(float, line)
else:
error_print('cannot understand lammpstrj formatting; exiting')
return
id_ = int(id_)
if self.particles[id_]:
updated += 1
self.particles[id_].x = x
self.particles[id_].y = y
self.particles[id_].z = z
line = f.readline()
verbose_print('updated particle positions for %s of %s particles from trajectory' % (updated, nparticles))
def read_xyz(self, xyz, frame=1, quiet=False):
"""pysimm.system.System.read_xyz
Updates particle positions and box size from xyz file at given frame
Args:
xyz: xyz trajectory file
frame: sequential frame number default=1
quiet: True to print status default=False
Returns:
None
"""
if not quiet:
verbose_print('reading particle positions from %s' % xyz)
warning_print('particles are assumed to be in order in xyz file')
t_frame = 0
with open(xyz) as f:
line = f.readline()
while line:
t_frame += 1
assert int(line.split()[0]) == self.particles.count
line = f.readline()
for n in range(1, self.particles.count + 1):
p = self.particles[n]
if t_frame == 1:
if not p.type.elem and p.type.name:
if p.type.name[0].lower() != 'l':
p.type.elem = p.type.name[0].upper()
else:
p.type.elem = p.type.name[1].upper()
line = f.readline()
if t_frame == frame:
x, y, z = map(float, line.split()[1:])
p.x = x
p.y = y
p.z = z
if t_frame == frame:
print('read %s particle positions from %s'
% (self.particles.count, xyz))
line = f.readline()
def update_types(self, ptypes, btypes, atypes, dtypes, itypes):
"""pysimm.system.System.update_types
Updates type objects from a given list of types.
Args:
ptypes: list of :class:`~pysimm.system.ParticleType` objects from which to update
btypes: list of :class:`~pysimm.system.BondType` objects from which to update
atypes: list of :class:`~pysimm.system.AngleType` objects from which to update
dtypes: list of :class:`~pysimm.system.DihedralType` objects from which to update
itypes: list of :class:`~pysimm.system.ImproperType` objects from which to update
"""
if ptypes is not None:
for p in self.particles:
pt = self.particle_types.get(p.type.name, first=True)
if pt:
p.type = pt[0]
self.particle_types.remove('all')
for pt in ptypes:
self.particle_types.add(pt)
if btypes is not None:
for b in self.bonds:
bt = self.bond_types.get(b.type.name, first=True)
if bt:
b.type = bt[0]
self.bond_types.remove('all')
for bt in btypes:
self.bond_types.add(bt)
if atypes is not None:
for a in self.angles:
at = self.angle_types.get(a.type.name, first=True)
if at:
a.type = at[0]
self.angle_types.remove('all')
for at in atypes:
self.angle_types.add(at)
if dtypes is not None:
for d in self.dihedrals:
dt = self.dihedral_types.get(d.type.name, first=True)
if dt:
d.type = dt[0]
self.dihedral_types.remove('all')
for dt in dtypes:
self.dihedral_types.add(dt)
if itypes is not None:
for i in self.impropers:
it = self.improper_types.get(i.type.name, first=True)
if it:
i.type = it[0]
self.improper_types.remove('all')
for it in itypes:
self.improper_types.add(it)
def read_type_names(self, types_file):
"""pysimm.system.System.read_type_names
Update :class:`~pysimm.system.ParticleType` names from file.
Args:
types_file: type dictionary file name
Returns:
None
"""
ptypes = dict()
btypes = dict()
atypes = dict()
dtypes = dict()
itypes = dict()
if os.path.isfile(types_file):
f = open(types_file)
elif isinstance(types_file, str):
f = StringIO(types_file)
for line in f:
line = line.split()
if line and line[0].lower() == 'atom':
for i in range(self.particle_types.count):
line = next(f).split()
ptypes[int(line[0])] = line[1]
elif line and line[0].lower() == 'bond':
for i in range(self.bond_types.count):
line = next(f).split()
btypes[int(line[0])] = line[1]
elif line and line[0].lower() == 'angle':
for i in range(self.angle_types.count):
line = next(f).split()
atypes[int(line[0])] = line[1]
elif line and line[0].lower() == 'dihedral':
for i in range(self.dihedral_types.count):
line = next(f).split()
dtypes[int(line[0])] = line[1]
elif line and line[0].lower() == 'improper':
for i in range(self.improper_types.count):
line = next(f).split()
itypes[int(line[0])] = line[1]
for t in self.particle_types:
t.name = ptypes[t.tag]
if t.name[0] == 'L':
if t.name[1].upper() in ['H', 'C', 'N', 'O']:
t.elem = t.name[1].upper()
else:
if t.name[0].upper() in ['H', 'C', 'N', 'O']:
t.elem = t.name[0].upper()
for t in self.bond_types:
t.name = btypes[t.tag]
t.rname = ','.join(reversed(t.name.split(',')))
for t in self.angle_types:
t.name = atypes[t.tag]
t.rname = ','.join(reversed(t.name.split(',')))
for t in self.dihedral_types:
t.name = dtypes[t.tag]
t.rname = ','.join(reversed(t.name.split(',')))
for t in self.improper_types:
t.name = itypes[t.tag]
t.rname = ','.join(reversed(t.name.split(',')))
def remove_spare_bonding(self, update_tags=True):
"""pysimm.system.System.remove_spare_bonding
Removes bonds, angles, dihedrals and impropers that reference particles not in :class:`~pysimm.system.System`.particles
Args:
update_tags: True to update all tags after removal of bonding items default=True
"""
for b in self.bonds:
if b.a not in self.particles or b.b not in self.particles:
self.bonds.remove(b.tag, update=False)
for a in self.angles:
if (a.a not in self.particles or a.b not in self.particles or
a.c not in self.particles):
self.angles.remove(a.tag, update=False)
for d in self.dihedrals:
if (d.a not in self.particles or d.b not in self.particles or
d.c not in self.particles or d.d not in self.particles):
self.dihedrals.remove(d.tag, update=False)
for i in self.impropers:
if (i.a not in self.particles or i.b not in self.particles or
i.c not in self.particles or i.d not in self.particles):
self.impropers.remove(i.tag, update=False)
if update_tags:
self.update_tags()
def update_tags(self):
"""pysimm.system.System.update_tags
Update Item tags in :class:`~pysimm.utils.ItemContainer` objects to preserve continuous tags. Removes all objects and then reinserts them.
Args:
None
Returns:
None
"""
particles = self.particles.get('all')
self.particles.remove('all')
for p in particles:
del p.tag
self.particles.add(p)
ptypes = self.particle_types.get('all')
self.particle_types.remove('all')
for pt in ptypes:
del pt.tag
self.particle_types.add(pt)
bonds = self.bonds.get('all')
self.bonds.remove('all')
for b in bonds:
del b.tag
self.bonds.add(b)
btypes = self.bond_types.get('all')
self.bond_types.remove('all')
for bt in btypes:
del bt.tag
self.bond_types.add(bt)
angles = self.angles.get('all')
self.angles.remove('all')
for a in angles:
del a.tag
self.angles.add(a)
atypes = self.angle_types.get('all')
self.angle_types.remove('all')
for at in atypes:
del at.tag
self.angle_types.add(at)
dihedrals = self.dihedrals.get('all')
self.dihedrals.remove('all')
for d in dihedrals:
del d.tag
self.dihedrals.add(d)
dtypes = self.dihedral_types.get('all')
self.dihedral_types.remove('all')
for dt in dtypes:
del dt.tag
self.dihedral_types.add(dt)
impropers = self.impropers.get('all')
self.impropers.remove('all')
for i in impropers:
del i.tag
self.impropers.add(i)
itypes = self.improper_types.get('all')
self.improper_types.remove('all')
for it in itypes:
del it.tag
self.improper_types.add(it)
def set_references(self):
"""pysimm.system.System.set_references
Set object references when :class:`~pysimm.system.System` information read from text file.
For example, if bond type value 2 is read from file, set :class:`~pysimm.system.Bond`.type to bond_types[2]
Args:
None
Returns:
None
"""
for p in self.particles:
if isinstance(p.type, int) and self.particle_types[p.type]:
p.type = self.particle_types[p.type]
elif isinstance(p.type, int) and not self.particle_types[p.type]:
error_print('error: Cannot find type with tag %s in system '
'particles types' % p.type)
for b in self.bonds:
if isinstance(b.type, int) and self.bond_types[b.type]:
b.type = self.bond_types[b.type]
elif isinstance(b.type, int) and not self.bond_types[b.type]:
error_print('error: Cannot find type with tag %s in system '
'bond types' % b.type)
for a in self.angles:
if isinstance(a.type, int) and self.angle_types[a.type]:
a.type = self.angle_types[a.type]
elif isinstance(b.type, int) and not self.angle_types[a.type]:
error_print('error: Cannot find type with tag %s in system '
'angle types' % a.type)
for d in self.dihedrals:
if isinstance(d.type, int) and self.dihedral_types[d.type]:
d.type = self.dihedral_types[d.type]
elif isinstance(d.type, int) and not self.dihedral_types[d.type]:
error_print('error: Cannot find type with tag %s in system '
'dihedral types' % d.type)
for i in self.impropers:
if isinstance(i.type, int) and self.improper_types[i.type]:
i.type = self.improper_types[i.type]
elif isinstance(i.type, int) and not self.improper_types[i.type]:
error_print('error: Cannot find type with tag %s in system '
'improper types' % i.type)
def objectify(self):
"""pysimm.system.System.objectify
Set references for :class:`~pysimm.system.Bond`, :class:`~pysimm.system.Angle`, :class:`~pysimm.system.Dihedral`, :class:`~pysimm.system.Improper` objects.
For example, if read from file that bond #1 is between particle 1 and 2 set :class:`~pysimm.system.Bond`.a to particles[1], etc.
Args:
None
Returns:
None
"""
if self.objectified:
return 'already objectified'
self.set_references()
for p in self.particles:
if not isinstance(p.molecule, Molecule):
if not self.molecules[p.molecule]:
m = Molecule()
m.tag = p.molecule
self.molecules.add(m)
p.molecule = self.molecules[p.molecule]
if not self.molecules[p.molecule.tag].particles[p.tag]:
self.molecules[p.molecule.tag].particles.add(p)
if p != self.molecules[p.molecule.tag].particles[p.tag]:
self.molecules[p.molecule.tag].particles.remove(p.tag, update=False)
self.molecules[p.molecule.tag].particles.add(p)
for prop in ['bonds', 'angles' 'dihedrals', 'impropers']:
if not getattr(p, prop):
setattr(p, prop, ItemContainer())
for b in self.bonds:
if type(b.a) == int:
b.a = self.particles[b.a]
b.b = self.particles[b.b]
b.a.bonds.add(b)
b.b.bonds.add(b)
if b.a.molecule:
b.a.molecule.bonds.add(b)
for a in self.angles:
if type(a.a) == int:
a.a = self.particles[a.a]
a.b = self.particles[a.b]
a.c = self.particles[a.c]
if a.a.molecule:
a.a.molecule.angles.add(a)
for d in self.dihedrals:
if type(d.a) == int:
d.a = self.particles[d.a]
d.b = self.particles[d.b]
d.c = self.particles[d.c]
d.d = self.particles[d.d]
if d.a.molecule:
d.a.molecule.dihedrals.add(d)
for i in self.impropers:
if type(i.a) == int:
i.a = self.particles[i.a]
i.b = self.particles[i.b]
i.c = self.particles[i.c]
i.d = self.particles[i.d]
if i.a.molecule:
i.a.molecule.impropers.add(i)
self.objectified = True
def add_particle_bonding(self):
"""pysimm.system.System.add_particle_bonding
Update :class:`~pysimm.system.Particle` objects such that :class:`~pysimm.system.Particle`.bonded_to contains other :class:`~pysimm.system.Particle` objects invloved in bonding
Args:
None
Returns:
None
"""
for p in self.particles:
p.bonded_to = ItemContainer()
p.bonds = ItemContainer()
for b in self.bonds:
b.a.bonded_to.add(b.b)
b.a.bonds.add(b)
b.b.bonded_to.add(b.a)
b.b.bonds.add(b)
def set_excluded_particles(self, bonds=True, angles=True, dihedrals=True):
"""pysimm.system.System.set_excluded_particles
Updates :class:`~pysimm.system.Particle` object such that :class:`~pysimm.system.Particle`.excluded_particles contains other :class:`~pysimm.system.Particle` objects involved in
1-2, 1-3, and/or 1-4 interactions
Args:
bonds: exclude particles involved in 1-2 interactions
angles: exclude particles involved in 1-3 interactions
dihedrals: exclude particles involved in 1-4 interactions
"""
for p in self.particles:
p.excluded_particles = ItemContainer()
if bonds:
for b in self.bonds:
if b.a.tag < b.b.tag:
b.a.excluded_particles.add(b.b)
else:
b.b.excluded_particles.add(b.a)
if angles:
for a in self.angles:
if a.a.tag < a.b.tag:
a.a.excluded_particles.add(a.b)
if a.a.tag < a.c.tag:
a.a.excluded_particles.add(a.c)
if a.b.tag < a.a.tag:
a.b.excluded_particles.add(a.a)
if a.b.tag < a.c.tag:
a.b.excluded_particles.add(a.c)
if a.c.tag < a.a.tag:
a.c.excluded_particles.add(a.a)
if a.c.tag < a.b.tag:
a.c.excluded_particles.add(a.b)
if dihedrals:
for d in self.dihedrals:
if d.a.tag < d.b.tag:
d.a.excluded_particles.add(d.b)
if d.a.tag < d.c.tag:
d.a.excluded_particles.add(d.c)
if d.a.tag < d.d.tag:
d.a.excluded_particles.add(d.d)
if d.b.tag < d.a.tag:
d.b.excluded_particles.add(d.a)
if d.b.tag < d.c.tag:
d.b.excluded_particles.add(d.c)
if d.b.tag < d.d.tag:
d.b.excluded_particles.add(d.d)
if d.c.tag < d.a.tag:
d.c.excluded_particles.add(d.a)
if d.c.tag < d.b.tag:
d.c.excluded_particles.add(d.b)
if d.c.tag < d.d.tag:
d.c.excluded_particles.add(d.d)
if d.d.tag < d.a.tag:
d.d.excluded_particles.add(d.a)
if d.d.tag < d.b.tag:
d.d.excluded_particles.add(d.b)
if d.d.tag < d.c.tag:
d.d.excluded_particles.add(d.c)
def set_atomic_numbers(self):
"""pysimm.system.System.set_atomic_numbers
Updates :class:`~pysimm.system.ParticleType` objects with atomic number based on :class:`~pysimm.system.ParticleType`.elem
Args:
None
Returns:
None
"""
for pt in self.particle_types:
if pt.elem == 'H':
pt.atomic_number = 1
elif pt.elem == 'He':
pt.atomic_number = 2
elif pt.elem == 'Li':
pt.atomic_number = 3
elif pt.elem == 'Be':
pt.atomic_number = 4
elif pt.elem == 'B':
pt.atomic_number = 5
elif pt.elem == 'C':
pt.atomic_number = 6
elif pt.elem == 'N':
pt.atomic_number = 7
elif pt.elem == 'O':
pt.atomic_number = 8
elif pt.elem == 'F':
pt.atomic_number = 9
elif pt.elem == 'Ne':
pt.atomic_number = 10
elif pt.elem == 'Na':
pt.atomic_number = 11
elif pt.elem == 'Mg':
pt.atomic_number = 12
elif pt.elem == 'Al':
pt.atomic_number = 13
elif pt.elem == 'Si':
pt.atomic_number = 14
elif pt.elem == 'P':
pt.atomic_number = 15
elif pt.elem == 'S':
pt.atomic_number = 16
elif pt.elem == 'Cl':
pt.atomic_number = 17
elif pt.elem == 'Ar':
pt.atomic_number = 18
elif pt.elem == 'K':
pt.atomic_number = 19
elif pt.elem == 'Ca':
pt.atomic_number = 20
elif pt.elem == 'Br':
pt.atomic_number = 35
elif pt.elem == 'I':
pt.atomic_number = 53
def add_particle_bonded_to(self, p, p0, f=None, sep=1.5):
"""pysimm.system.System.add_particle_bonded_to
Add new :class:`~pysimm.system.Particle` to :class:`~pysimm.system.System` bonded to p0 and automatically update new forcefield types
Args:
p: new :class:`~pysimm.system.Particle` object to be added
p0: original :class:`~pysimm.system.Particle` object in :class:`~pysimm.system.System` to which p will be bonded
f: :class:`~pysimm.forcefield.Forcefield` object from which new force field types will be retrieved
Returns:
new Particle being added to system for convenient reference
"""
if p.x is None or p.y is None or p.z is None:
phi = random() * 2 * pi
theta = acos(random() * 2 - 1)
p.x = p0.x + sep * cos(theta) * sin(phi)
p.y = p0.y + sep * sin(theta) * sin(phi)
p.z = p0.z + sep * cos(phi)
if p.charge is None:
p.charge = 0
if p.molecule is None:
p.molecule = p0.molecule
self.add_particle(p)
self.add_bond(p0, p, f)
if not p0.bonded_to:
self.add_particle_bonding()
for p_ in p0.bonded_to:
if p_ is not p:
self.add_angle(p_, p0, p, f)
for p_b in p_.bonded_to:
if p_b is not p0:
self.add_dihedral(p_b, p_, p0, p, f)
return p
def add_particle(self, p):
"""pysimm.system.System.add_particle
Add new :class:`~pysimm.system.Particle` to :class:`~pysimm.system.System`.
Args:
p: new :class:`~pysimm.system.Particle` object to be added
Returns:
None
"""
self.particles.add(p)
def rotate(self, around=None, theta_x=0, theta_y=0, theta_z=0, rot_matrix=None):
"""pysimm.system.System.rotate
*** REQUIRES NUMPY ***
Rotates entire system around given :class:`~pysimm.system.Particle` by user defined angles
Args:
around: :class:`~pysimm.system.Particle` around which :class:`~pysimm.system.System` will be rotated default=None
theta_x: angle around which system will be rotated on x axis
theta_y: angle around which system will be rotated on y axis
theta_z: angle around which system will be rotated on z axis
rot_matrix: rotation matrix to use for rotation
Returns:
None
"""
if not np:
raise PysimmError('pysimm.system.System.rotate function requires numpy')
theta_x = random() * 2 * pi if theta_x == 'random' else theta_x
theta_y = random() * 2 * pi if theta_y == 'random' else theta_y
theta_z = random() * 2 * pi if theta_z == 'random' else theta_z
if around is None:
around = []
self.set_cog()
around.append(self.cog[0])
around.append(self.cog[1])
around.append(self.cog[2])
elif isinstance(around, Particle):
around = [around.x, around.y, around.z]
if (isinstance(around, list) and len(around) == 3 and
len(set([isinstance(x, float) for x in around])) == 1 and isinstance(around[0], float)):
for p in self.particles:
p.x -= around[0]
p.y -= around[1]
p.z -= around[2]
if rot_matrix is not None:
p.x, p.y, p.z = [x[0] for x in (rot_matrix*np.matrix([[p.x], [p.y], [p.z]])).tolist()]
else:
p.x, p.y, p.z = rotate_vector(p.x, p.y, p.z, theta_x, theta_y, theta_z)
p.x += around[0]
p.y += around[1]
p.z += around[2]
def make_new_bonds(self, p1=None, p2=None, f=None, angles=True, dihedrals=True, impropers=True):
"""pysimm.system.System.make_new_bonds
Makes new bond between two particles and updates new force field types
Args:
p1: :class:`~pysimm.system.Particle` object involved in new bond
p2: :class:`~pysimm.system.Particle` object involved in new bond
f: :class:`~pysimm.forcefield.Forcefield` object from which new force field types will be retrieved
angles: True to update new angles default=True
dihedrals: True to update new dihedrals default=True
impropers: True to update new impropers default=True
Returns:
None
"""
self.add_particle_bonding()
if p1.molecule is not p2.molecule:
if p1.molecule.particles.count < p2.molecule.particles.count:
old_molecule_tag = p1.molecule.tag
for p_ in p1.molecule.particles:
p_.molecule = p2.molecule
else:
old_molecule_tag = p2.molecule.tag
for p_ in p2.molecule.particles:
p_.molecule = p1.molecule
self.molecules.remove(old_molecule_tag)
self.add_bond(p1, p2, f)
if angles or dihedrals or impropers:
for p in p1.bonded_to:
if angles:
if p is not p2:
self.add_angle(p, p1, p2, f)
if dihedrals:
for pb in p.bonded_to:
if pb is not p1 and p is not p2:
self.add_dihedral(pb, p, p1, p2, f)
for p in p2.bonded_to:
if angles:
if p is not p1:
self.add_angle(p1, p2, p, f)
if dihedrals:
for pb in p.bonded_to:
if pb is not p2 and p is not p1:
self.add_dihedral(p1, p2, p, pb, f)
if dihedrals:
for pb1 in p1.bonded_to:
for pb2 in p2.bonded_to:
if pb1 is not p2 and pb2 is not p1:
self.add_dihedral(pb1, p1, p2, pb2, f)
if impropers:
if self.ff_class == '2':
for perm in permutations(p1.bonded_to, 3):
unique = True
for i in self.impropers:
if i.a is not p1:
continue
if set([i.b, i.c, i.d]) == set([perm[0], perm[1],
perm[2]]):
unique = False
break
if unique:
self.add_improper(p1, perm[0], perm[1], perm[2], f)
for perm in permutations(p2.bonded_to, 3):
unique = True
for i in self.impropers:
if i.a is not p2:
continue
if set([i.b, i.c, i.d]) == set([perm[0], perm[1],
perm[2]]):
unique = False
break
if unique:
self.add_improper(p2, perm[0], perm[1], perm[2], f)
def add_bond(self, a=None, b=None, f=None):
"""pysimm.system.System.add_bond
Add :class:`~pysimm.system.Bond` to system between two particles
Args:
a: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Bond`
b: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Bond`
f: :class:`~pysimm.forcefield.Forcefield` object from which new force field type will be retrieved
Returns:
None
"""
if a is b:
return
a_name = a.type.eq_bond or a.type.name
b_name = b.type.eq_bond or b.type.name
btype = self.bond_types.get('%s,%s' % (a_name, b_name))
if not btype and f:
btype = f.bond_types.get('%s,%s' % (a_name, b_name))
if btype:
bt = btype[0].copy()
self.bond_types.add(bt)
btype = self.bond_types.get('%s,%s' % (a_name, b_name))
if btype:
new_b = Bond(type=btype[0], a=a, b=b)
self.bonds.add(new_b)
if a.bonded_to is None or b.bonded_to is None:
self.add_particle_bonding()
if a.bonded_to and b not in a.bonded_to:
a.bonded_to.add(b)
if b.bonded_to and a not in b.bonded_to:
b.bonded_to.add(a)
else:
error_print('error: system does not contain bond type named %s,%s '
'or could not find type in forcefield supplied'
% (a_name, b_name))
return
def add_angle(self, a=None, b=None, c=None, f=None):
"""pysimm.system.System.add_angle
Add :class:`~pysimm.system.Angle` to system between three particles
Args:
a: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Angle`
b: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Angle` (middle particle)
c: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Angle`
f: :class:`~pysimm.forcefield.Forcefield` object from which new force field type will be retrieved
Returns:
None
"""
if a is c:
return
a_name = a.type.eq_angle or a.type.name
b_name = b.type.eq_angle or b.type.name
c_name = c.type.eq_angle or c.type.name
atype = self.angle_types.get(
'%s,%s,%s' % (a_name, b_name, c_name),
item_wildcard=None
)
if not atype and f:
atype = self.angle_types.get(
'%s,%s,%s' % (a_name, b_name, c_name)
)
atype.extend(
f.angle_types.get(
'%s,%s,%s' % (a_name, b_name, c_name)
)
)
atype = sorted(atype, key=lambda x: x.name.count('X'))
if atype:
if not self.angle_types.get(atype[0].name, item_wildcard=None):
atype = self.angle_types.add(atype[0].copy())
else:
atype = self.angle_types.get(atype[0].name, item_wildcard=None)[0]
elif atype:
atype = atype[0]
if atype:
self.angles.add(Angle(type=atype, a=a, b=b, c=c))
else:
error_print('error: system does not contain angle type named '
'%s,%s,%s or could not find type in forcefield supplied'
% (a_name, b_name, c_name))
return
def add_dihedral(self, a=None, b=None, c=None, d=None, f=None):
"""pysimm.system.System.add_dihedral
Add :class:`~pysimm.system.Dihedral` to system between four particles
Args:
a: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Dihedral`
b: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Dihedral` (middle particle)
c: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Dihedral` (middle particle)
d: :class:`~pysimm.system.Particle` involved in new :class:`~pysimm.system.Dihedral`
f: :class:`~pysimm.forcefield.Forcefield` object from which new force field type will be retrieved
Returns:
None
"""
if a is c or b is d:
return
a_name = a.type.eq_dihedral or a.type.name
b_name = b.type.eq_dihedral or b.type.name
c_name = c.type.eq_dihedral or c.type.name
d_name = d.type.eq_dihedral or d.type.name
dtype = self.dihedral_types.get(
'%s,%s,%s,%s' % (a_name, b_name, c_name, d_name),
item_wildcard=None
)
if not dtype and f:
dtype = self.dihedral_types.get(
'%s,%s,%s,%s' % (a_name, b_name, c_name, d_name)
)
dtype.extend(
f.dihedral_types.get(
'%s,%s,%s,%s' % (a_name, b_name, c_name, d_name)
)
)
dtype = sorted(dtype, key=lambda x: x.name.count('X'))
if dtype:
if not self.dihedral_types.get(dtype[0].name, item_wildcard=None):
dtype = self.dihedral_types.add(dtype[0].copy())
else:
dtype = self.dihedral_types.get(dtype[0].name, item_wildcard=None)[0]
elif dtype:
dtype = dtype[0]
if dtype:
self.dihedrals.add(Dihedral(type=dtype, a=a, b=b, c=c, d=d))
else:
error_print('error: system does not contain dihedral type named '
'%s,%s,%s,%s or could not find type in forcefield '
'supplied' % (a_name, b_name,
c_name, d_name))
error_print('tags: %s %s %s %s' % (a.tag, b.tag, c.tag, d.tag))
return
def add_improper(self, a=None, b=None, c=None, d=None, f=None):
"""pysimm.system.System.add_improper
Add :class:`~pysimm.system.Improper` to system between four particles
Args:
a: :class:`~pysimm.system.pysimm.system.Particle` involved in new :class:`~pysimm.system.Improper` (middle particle)
b: :class:`~pysimm.system.pysimm.system.Particle` involved in new :class:`~pysimm.system.Improper`
c: :class:`~pysimm.system.pysimm.system.Particle` involved in new :class:`~pysimm.system.Improper`
d: :class:`~pysimm.system.pysimm.system.Particle` involved in new :class:`~pysimm.system.Improper`
f: :class:`~pysimm.system.pysimm.forcefield.Forcefield` object from which new force field type will be retrieved
Returns:
None
"""
if a is b or a is c or a is d:
return
a_name = a.type.eq_improper or a.type.name
b_name = b.type.eq_improper or b.type.name
c_name = c.type.eq_improper or c.type.name
d_name = d.type.eq_improper or d.type.name
itype = self.improper_types.get('%s,%s,%s,%s'
% (a_name, b_name,
c_name, d_name),
improper_type=True,
item_wildcard=None)
if not itype and f:
itype = self.improper_types.get(
'%s,%s,%s,%s' % (a_name, b_name, c_name, d_name),
improper_type=True
)
itype.extend(
f.improper_types.get(
'%s,%s,%s,%s' % (a_name, b_name, c_name, d_name),
improper_type=True
)
)
itype = sorted(itype, key=lambda x: x.name.count('X'))
if itype:
if not self.improper_types.get(itype[0].name, item_wildcard=None, improper_type=True):
itype = self.improper_types.add(itype[0].copy())
else:
itype = self.improper_types.get(itype[0].name, item_wildcard=None, improper_type=True)[0]
elif itype:
itype = itype[0]
if itype:
self.impropers.add(Improper(type=itype, a=a, b=b, c=c, d=d))
else:
return
def check_forcefield(self):
"""pysimm.system.System.check_forcefield
Iterates through particles and prints the following:
tag
type name
type element
type description
bonded elements
Args:
None
Returns:
None
"""
if not self.objectified:
self.objectify()
for p in self.particles:
p.bond_elements = [x.a.type.elem if p is x.b else
x.b.type.elem for x in p.bonds]
p.nbonds = len(p.bond_elements)
print(p.tag, p.type.name, p.type.elem, p.type.desc, p.bond_elements)
def apply_forcefield(self, f, charges='default', set_box=True, box_padding=10,
update_ptypes=False, skip_ptypes=False):
"""pysimm.system.System.apply_forcefield
Applies force field data to :class:`~pysimm.system.System` based on typing rules defined in :class:`~pysimm.forcefield.Forcefield` object f
Args:
f: :class:`~pysimm.forcefield.Forcefield` object from which new force field type will be retrieved
charges: type of charges to be applied default='default'
set_box: Update simulation box information based on particle positions default=True
box_padding: Add padding to simulation box if updating dimensions default=10 (Angstroms)
update_ptypes: If True, update particle types based on current :class:`~pysimm.system.ParticleType` names default=False
skip_ptypes: if True, do not change particle types
Returns:
None
"""
self.ff_class = f.ff_class
self.forcefield = f.name
if update_ptypes:
self.update_particle_types_from_forcefield(f)
skip_ptypes = True
if not skip_ptypes:
f.assign_ptypes(self)
if self.bonds.count > 0:
f.assign_btypes(self)
f.assign_atypes(self)
f.assign_dtypes(self)
f.assign_itypes(self)
if charges:
f.assign_charges(self, charges=charges)
if set_box:
self.set_box(box_padding, center=False)
def apply_charges(self, f, charges='default'):
"""pysimm.system.System.apply_charges
Applies charges derived using method provided by user. Defaults to 'default'.
Calls :func:`~pysimm.forcefield.Forcefield.assign_charges` method of forcefield object provided.
Args:
f: :class:`~pysimm.forcefield.Forcefield` object
charges: type of charges to be applied default='default'
Returns:
None
"""
f.assign_charges(self, charges=charges)
def write_lammps_mol(self, out_data):
"""pysimm.system.System.write_lammps_mol
Write :class:`~pysimm.system.System` data formatted as LAMMPS molecule template
Args:
out_data: where to write data, file name or 'string'
Returns:
None or string if data file if out_data='string'
"""
if out_data == 'string':
out_file = StringIO()
else:
out_file = open(out_data, 'w+')
self.set_mass()
self.set_cog()
out_file.write('%s\n\n' % self.name)
out_file.write('%s atoms\n' % self.particles.count)
out_file.write('%s bonds\n' % self.bonds.count)
out_file.write('%s angles\n' % self.angles.count)
out_file.write('%s dihedrals\n' % self.dihedrals.count)
out_file.write('%s impropers\n' % self.impropers.count)
if self.particles.count > 0:
out_file.write('Coords\n\n')
for p in self.particles:
out_file.write('{} {} {} {}\n'.format(p.tag, p.x, p.y, p.z))
out_file.write('\n')
if self.particles.count > 0:
out_file.write('Types\n\n')
for p in self.particles:
out_file.write('{} {}\n'.format(p.tag, p.type.tag))
out_file.write('\n')
if self.particles.count > 0:
out_file.write('Charges\n\n')
for p in self.particles:
out_file.write('{} {}\n'.format(p.tag, p.charge))
out_file.write('\n')
if self.bonds.count > 0:
out_file.write('Bonds\n\n')
for b in self.bonds:
out_file.write('{} {} {} {}\n'.format(b.tag, b.type.tag, b.a.tag, b.b.tag))
out_file.write('\n')
if self.angles.count > 0:
out_file.write('Angles\n\n')
for a in self.angles:
out_file.write('{} {} {} {} {}\n'.format(a.tag, a.type.tag, a.a.tag, a.b.tag, a.c.tag))
out_file.write('\n')
if self.dihedrals.count > 0:
out_file.write('Dihedrals\n\n')
for d in self.dihedrals:
out_file.write('{} {} {} {} {} {}\n'.format(d.tag, d.type.tag, d.a.tag, d.b.tag, d.c.tag, d.d.tag))
out_file.write('\n')
if self.impropers.count > 0:
out_file.write('Impropers\n\n')
for i in self.impropers:
out_file.write('{} {} {} {} {} {}\n'.format(i.tag, i.type.tag, i.a.tag, i.b.tag, i.c.tag, i.d.tag))
if out_data == 'string':
s = out_file.getvalue()
out_file.close()
return s
else:
out_file.close()
def write_lammps(self, out_data, **kwargs):
"""pysimm.system.System.write_lammps
Write :class:`~pysimm.system.System` data formatted for LAMMPS
Args:
out_data: where to write data, file name or 'string'
Returns:
None or string if data file if out_data='string'
"""
empty = kwargs.get('empty')
pair_style = kwargs.get('pair_style', self.pair_style)
bond_style = kwargs.get('bond_style', self.bond_style)
angle_style = kwargs.get('angle_style', self.angle_style)
dihedral_style = kwargs.get('dihedral_style', self.dihedral_style)
improper_style = kwargs.get('improper_style', self.improper_style)
if out_data == 'string':
out_file = StringIO()
else:
out_file = open(out_data, 'w+')
if empty:
out_file.write('%s\n\n' % self.name)
out_file.write('%s atoms\n' % 0)
out_file.write('%s bonds\n' % 0)
out_file.write('%s angles\n' % 0)
out_file.write('%s dihedrals\n' % 0)
out_file.write('%s impropers\n' % 0)
else:
out_file.write('%s\n\n' % self.name)
out_file.write('%s atoms\n' % self.particles.count)
out_file.write('%s bonds\n' % self.bonds.count)
out_file.write('%s angles\n' % self.angles.count)
out_file.write('%s dihedrals\n' % self.dihedrals.count)
out_file.write('%s impropers\n' % self.impropers.count)
out_file.write('\n')
out_file.write('%s atom types\n' % self.particle_types.count)
if self.bond_types.count > 0:
out_file.write('%s bond types\n' % self.bond_types.count)
if self.angle_types.count > 0:
out_file.write('%s angle types\n' % self.angle_types.count)
if self.dihedral_types.count > 0:
out_file.write('%s dihedral types\n' % self.dihedral_types.count)
if self.improper_types.count > 0:
out_file.write('%s improper types\n' % self.improper_types.count)
out_file.write('\n')
out_file.write('%f %f xlo xhi\n' % (self.dim.xlo, self.dim.xhi))
out_file.write('%f %f ylo yhi\n' % (self.dim.ylo, self.dim.yhi))
out_file.write('%f %f zlo zhi\n' % (self.dim.zlo, self.dim.zhi))
out_file.write('\n')
if self.particle_types.count > 0:
out_file.write('Masses\n\n')
for pt in self.particle_types:
out_file.write(pt.write_lammps('mass'))
out_file.write('\n')
if self.write_coeffs and self.particle_types.count > 0:
out_file.write('Pair Coeffs\n\n')
for pt in self.particle_types:
out_file.write(pt.write_lammps(pair_style))
out_file.write('\n')
if self.write_coeffs and self.bond_types.count > 0:
out_file.write('Bond Coeffs\n\n')
for b in self.bond_types:
out_file.write(b.write_lammps(bond_style))
out_file.write('\n')
if self.write_coeffs and self.angle_types.count > 0:
out_file.write('Angle Coeffs\n\n')
for a in self.angle_types:
out_file.write(a.write_lammps(angle_style))
out_file.write('\n')
if self.write_coeffs and (self.angle_types.count > 0 and (self.ff_class == '2' or
angle_style == 'class2')):
out_file.write('BondBond Coeffs\n\n')
for a in self.angle_types:
out_file.write(a.write_lammps(angle_style, cross_term='BondBond'))
out_file.write('\n')
out_file.write('BondAngle Coeffs\n\n')
for a in self.angle_types:
out_file.write(a.write_lammps(angle_style, cross_term='BondAngle'))
out_file.write('\n')
if self.write_coeffs and self.dihedral_types.count > 0:
out_file.write('Dihedral Coeffs\n\n')
for dt in self.dihedral_types:
out_file.write(dt.write_lammps(dihedral_style))
out_file.write('\n')
if self.write_coeffs and self.dihedral_types.count > 0 and (self.ff_class == '2' or
dihedral_style == 'class2'):
out_file.write('MiddleBondTorsion Coeffs\n\n')
for d in self.dihedral_types:
out_file.write(d.write_lammps(dihedral_style, cross_term='MiddleBond'))
out_file.write('\n')
out_file.write('EndBondTorsion Coeffs\n\n')
for d in self.dihedral_types:
out_file.write(d.write_lammps(dihedral_style, cross_term='EndBond'))
out_file.write('\n')
out_file.write('AngleTorsion Coeffs\n\n')
for d in self.dihedral_types:
out_file.write(d.write_lammps(dihedral_style, cross_term='Angle'))
out_file.write('\n')
out_file.write('AngleAngleTorsion Coeffs\n\n')
for d in self.dihedral_types:
out_file.write(d.write_lammps(dihedral_style, cross_term='AngleAngle'))
out_file.write('\n')
out_file.write('BondBond13 Coeffs\n\n')
for d in self.dihedral_types:
out_file.write(d.write_lammps(dihedral_style, cross_term='BondBond13'))
out_file.write('\n')
if self.write_coeffs and self.improper_types.count > 0:
out_file.write('Improper Coeffs\n\n')
for i in self.improper_types:
out_file.write(i.write_lammps(improper_style))
out_file.write('\n')
if self.write_coeffs and self.improper_types.count > 0 and (self.ff_class == '2' or
improper_style == 'class2'):
out_file.write('AngleAngle Coeffs\n\n')
for i in self.improper_types:
out_file.write(i.write_lammps(improper_style, cross_term='AngleAngle'))
out_file.write('\n')
if self.particles.count > 0 and not empty:
out_file.write('Atoms\n\n')
for p in self.particles:
if not p.molecule:
p.molecule = Item()
p.molecule.tag = 1
if not p.charge:
p.charge = 0
if isinstance(p.molecule, int):
out_file.write('%4d\t%d\t%d\t%s\t%s\t%s\t%s\n'
% (p.tag, p.molecule, p.type.tag, p.charge,
p.x, p.y, p.z))
else:
out_file.write('%4d\t%d\t%d\t%s\t%s\t%s\t%s\n'
% (p.tag, p.molecule.tag, p.type.tag, p.charge,
p.x, p.y, p.z))
out_file.write('\n')
out_file.write('Velocities\n\n')
for p in self.particles:
if not p.vx:
p.vx = 0.
if not p.vy:
p.vy = 0.
if not p.vz:
p.vz = 0.
out_file.write('%4d\t%s\t%s\t%s\n' % (p.tag, p.vx, p.vy, p.vz))
out_file.write('\n')
if self.bonds.count > 0 and not empty:
out_file.write('Bonds\n\n')
for b in self.bonds:
out_file.write('%4d\t%d\t%d\t%d\n'
% (b.tag, b.type.tag, b.a.tag, b.b.tag))
out_file.write('\n')
if self.angles.count > 0 and not empty:
out_file.write('Angles\n\n')
for a in self.angles:
out_file.write('%4d\t%d\t%d\t%d\t%d\n'
% (a.tag, a.type.tag, a.a.tag, a.b.tag, a.c.tag))
out_file.write('\n')
if self.dihedrals.count > 0 and not empty:
out_file.write('Dihedrals\n\n')
for d in self.dihedrals:
out_file.write('%4d\t%d\t%d\t%d\t%d\t%d\n'
% (d.tag, d.type.tag,
d.a.tag, d.b.tag, d.c.tag, d.d.tag))
out_file.write('\n')
if self.impropers.count > 0 and not empty:
out_file.write('Impropers\n\n')
for i in self.impropers:
if self.ff_class == '2' or self.improper_style == 'class2':
out_file.write('%4d\t%d\t%d\t%d\t%d\t%d\n'
% (i.tag, i.type.tag,
i.b.tag, i.a.tag, i.c.tag, i.d.tag))
else:
out_file.write('%4d\t%d\t%d\t%d\t%d\t%d\n'
% (i.tag, i.type.tag,
i.a.tag, i.b.tag, i.c.tag, i.d.tag))
out_file.write('\n')
if out_data == 'string':
s = out_file.getvalue()
out_file.close()
return s
else:
out_file.close()
def write_xyz(self, outfile='data.xyz', **kwargs):
"""pysimm.system.System.write_xyz
Write :class:`~pysimm.system.System` data in xyz format
Args:
outfile: where to write data, file name or 'string'
Returns:
None or string of data file if out_data='string'
"""
elem = kwargs.get('elem', True)
append = kwargs.get('append')
if outfile == 'string':
out = StringIO()
else:
if append:
out = open(outfile, 'a')
else:
out = open(outfile, 'w')
out.write('%s\n' % self.particles.count)
out.write('xyz file written from pySIMM system module\n')
for p in self.particles:
if elem and p.type and p.type.elem is not None:
out.write('%s %s %s %s\n' % (p.type.elem, p.x, p.y, p.z))
elif elem and p.elem is not None:
out.write('%s %s %s %s\n' % (p.elem, p.x, p.y, p.z))
else:
out.write('%s %s %s %s\n' % (p.type.tag, p.x, p.y, p.z))
if outfile == 'string':
s = out.getvalue()
out.close()
return s
else:
out.close()
def write_chemdoodle_json(self, outfile, **kwargs):
"""pysimm.system.System.write_chemdoodle_json
Write :class:`~pysimm.system.System` data in chemdoodle json format
Args:
outfile: where to write data, file name or 'string'
Returns:
None or string of data file if out_data='string'
"""
atoms = []
bonds = []
for p in self.particles:
if p.type and p.type.elem:
atoms.append({"x": p.x, "y": p.y, "z": p.z, "l": p.type.elem, "i": p.type.name, "c": p.charge})
elif p.elem and p.type:
atoms.append({"x": p.x, "y": p.y, "z": p.z, "l": p.elem, "i": p.type.name, "c": p.charge})
elif p.elem:
atoms.append({"x": p.x, "y": p.y, "z": p.z, "l": p.elem})
else:
atoms.append({"x": p.x, "y": p.y, "z": p.z, "i": p.type.name, "c": p.charge})
for b in self.bonds:
if b.order:
bonds.append({"b": b.a.tag-1, "e": b.b.tag-1, "o": b.order})
else:
bonds.append({"b": b.a.tag-1, "e": b.b.tag-1})
j = {"a": atoms, "b": bonds}
if outfile == 'string':
out = StringIO()
else:
out = open(outfile, 'w+')
out.write(json.dumps(j))
if outfile == 'string':
s = out.getvalue()
out.close()
return s
else:
out.close()
def write_mol(self, outfile='data.mol'):
"""pysimm.system.System.write_mol
Write :class:`~pysimm.system.System` data in mol format
Args:
outfile: where to write data, file name or 'string'
Returns:
None or string of data file if out_data='string'
"""
if outfile == 'string':
out = StringIO()
else:
out = open(outfile, 'w+')
out.write('system\n')
out.write('written using pySIMM system module\n\n')
out.write('%s\t%s\n' % (self.particles.count, self.bonds.count))
for p in self.particles:
if not p.charge:
p.charge = 0.0
if p.type and p.type.elem:
out.write('%10.4f%10.4f%10.4f %s 0 %10.4f\n'
% (p.x, p.y, p.z, '{0: >3}'.format(p.type.elem),
p.charge))
elif p.elem:
out.write('%10.4f%10.4f%10.4f %s 0 %10.4f\n'
% (p.x, p.y, p.z, '{0: >3}'.format(p.elem),
p.charge))
elif p.type:
out.write('%10.4f%10.4f%10.4f %s 0 %10.4f\n'
% (p.x, p.y, p.z, '{0: >3}'.format(p.type.tag),
p.charge))
for b in self.bonds:
if b.order:
out.write('%s\t%s\t%s\t%s\t%s\t%s\n'
% (b.a.tag, b.b.tag, b.order, 0, 0, 0))
else:
out.write('%s\t%s\t%s\t%s\t%s\t%s\n'
% (b.a.tag, b.b.tag, 1, 0, 0, 0))
out.write('M END')
if outfile == 'string':
s = out.getvalue()
out.close()
return s
else:
out.close()
def write_pdb(self, outfile='data.pdb', type_names=True):
"""pysimm.system.System.write_pdb
Write :class:`~pysimm.system.System` data in pdb format
Args:
outfile: where to write data, file name or 'string'
Returns:
None or string of data file if out_data='string'
"""
if outfile == 'string':
out = StringIO()
else:
out = open(outfile, 'w+')
out.write('{:<10}pdb written using pySIMM system module\n'
.format('HEADER'))
for m in self.molecules:
for p in sorted(m.particles, key=lambda x: x.tag):
if p.type:
out.write(
'{:<6}{:>5} {:>4} RES {:4} {: 8.3f}{: 8.3f}{: 8.3f}{:>22}{:>2}\n'.format(
'ATOM', p.tag, p.type.name[0:4] if type_names else p.type.elem,
p.molecule.tag, p.x, p.y, p.z, '', p.type.elem
)
)
elif p.elem:
out.write(
'{:<6}{:>5} {:>4} RES {:4} {: 8.3f}{: 8.3f}{: 8.3f}{:>22}{:>2}\n'.format(
'ATOM', p.tag, p.elem, p.molecule.tag,
p.x, p.y, p.z, '', p.elem
)
)
out.write('TER\n')
for p in self.particles:
if p.bonds:
out.write('{:<6}{:>5}'
.format('CONECT', p.tag))
for t in sorted([x.a.tag if p is x.b else x.b.tag for x in
p.bonds]):
out.write('{:>5}'.format(t))
out.write('\n')
if outfile == 'string':
s = out.getvalue()
out.close()
return s
else:
out.close()
def write_yaml(self, file_):
"""pysimm.system.System.write_yaml
Write :class:`~pysimm.system.System` data in yaml format
Args:
outfile: file name to write data
Returns:
None
"""
n = self.copy()
s = vars(n)
for k, v in s.items():
if isinstance(v, ItemContainer):
s[k] = vars(v)
for k_, v_ in s[k].items():
if k_ == '_dict':
for t, i in v_.items():
s[k][k_][t] = vars(i)
for key, value in s[k][k_][t].items():
if isinstance(value, ItemContainer) or (isinstance(value, list) and
value and isinstance(value[0], Item)):
s[k][k_][t][key] = [x.tag for x in value]
elif isinstance(value, Item) or isinstance(value, System) and value.tag:
s[k][k_][t][key] = value.tag
elif isinstance(v, Item):
s[k] = vars(v)
else:
s[k] = v
if file_ == 'string':
f = StringIO()
f.write(json.dumps(s, indent=4, separators=(',', ': ')))
yaml_ = f.getvalue()
f.close()
return yaml_
with open(file_, 'w') as f:
f.write(json.dumps(s, indent=4, separators=(',', ': ')))
def write_cssr(self, outfile='data.cssr', **kwargs):
"""pysimm.system.System.write_cssr
Write :class:`~pysimm.system.System` data in cssr format
file format: line, format, contents
1: 38X, 3F8.3 : - length of the three cell parameters (a, b, and c) in angstroms.
2: 21X, 3F8.3, 4X, 'SPGR =', I3, 1X, A11 : - a, b, g in degrees, space group number, space group name.
3: 2I4, 1X, A60 : - Number of atoms stored, coordinate system flag (0=fractional, 1=orthogonal coordinates in Angstrom), first title.
4: A53 : - A line of text that can be used to describe the file.
5-: I4, 1X, A4, 2X, 3(F9.5.1X), 8I4, 1X, F7.3 : - Atom serial number, atom name, x, y, z coordinates, bonding connectivities (max 8), charge.
Note: The atom name is a concatenation of the element symbol and the atom serial number.
Args:
outfile: where to write data, file name or 'string'
frac: 0 for using fractional coordinates
aname: 0 for using element as atom name; else using atom type name
Returns:
None or string of data file if out_data='string'
"""
if outfile == 'string':
out = StringIO()
else:
out = open(outfile, 'w+')
frac = kwargs.get('frac', 1)
aname = kwargs.get('aname', 0)
out.write('%s%8.3f%8.3f%8.3f\n' % (38*' ', self.dim.dx, self.dim.dy, self.dim.dz))
out.write('%s%8.3f%8.3f%8.3f SPGR= %3d %s\n' % (21*' ', 90.0, 90.0, 90.0, 1, 'P 1'))
out.write('%4d%4d %s\n' % (self.particles.count, frac, 'CSSR written using pySIMM system module'))
out.write('%s\n' % self.name)
for p in self.particles:
if not p.charge:
p.charge = 0.0
if p.type:
if aname == 0:
if p.type.elem:
name = p.type.elem
elif p.elem:
name = p.elem
else:
name = p.type.tag
else:
if p.type.name:
name = p.type.name
else:
name = p.type.tag
else:
name = p.tag
if frac == 0:
x = p.x/self.dim.dx
y = p.y/self.dim.dy
z = p.z/self.dim.dz
else:
x = p.x
y = p.y
z = p.z
bonds = ''
if p.bonds:
n_bonds = 0
for b in p.bonds:
if p is b.a:
bonds += ' {:4d}'.format(b.b.tag)
else:
bonds += ' {:4d}'.format(b.a.tag)
n_bonds += 1
for i in range(n_bonds+1, 9):
bonds = bonds + ' {:4d}'.format(0)
out.write('%4d %4s %9.5f %9.5f %9.5f %s %7.3f\n'
% (p.tag, name, x, y, z, bonds, p.charge))
out.write('\n')
if outfile == 'string':
s = out.getvalue()
out.close()
return s
else:
out.close()
def consolidate_types(self):
"""pysimm.system.System.consolidate_types
Removes duplicate types and reassigns references
Args:
None
Returns:
None
"""
for pt in self.particle_types:
for dup in self.particle_types:
if pt is not dup and pt.name == dup.name:
for p in self.particles:
if p.type == dup:
p.type = pt
self.particle_types.remove(dup.tag)
for bt in self.bond_types:
for dup in self.bond_types:
if bt is not dup and bt.name == dup.name:
for b in self.bonds:
if b.type == dup:
b.type = bt
self.bond_types.remove(dup.tag)
for at in self.angle_types:
for dup in self.angle_types:
if at is not dup and at.name == dup.name:
for a in self.angles:
if a.type == dup:
a.type = at
self.angle_types.remove(dup.tag)
for dt in self.dihedral_types:
for dup in self.dihedral_types:
if dt is not dup and dt.name == dup.name:
for d in self.dihedrals:
if d.type == dup:
d.type = dt
self.dihedral_types.remove(dup.tag)
for it in self.improper_types:
for dup in self.improper_types:
if it is not dup and it.name == dup.name:
for i in self.impropers:
if i.type == dup:
i.type = it
self.improper_types.remove(dup.tag)
def set_cog(self):
"""pysimm.system.System.set_cog
Calculate center of gravity of :class:`~pysimm.system.System` and assign to :class:`~pysimm.system.System`.cog
Args:
None
Returns:
None
"""
self.cog = [0, 0, 0]
for p in self.particles:
self.cog[0] += p.x
self.cog[1] += p.y
self.cog[2] += p.z
if self.particles.count:
self.cog = [c / self.particles.count for c in self.cog]
def shift_particles(self, shiftx, shifty, shiftz):
"""pysimm.system.System.shift_particles
Shifts all particles by shiftx, shifty, shiftz. Recalculates cog.
Args:
shiftx: distance to shift particles in x direction
shifty: distance to shift particles in y direction
shiftz: distance to shift particles in z direction
Returns:
None
"""
for p in self.particles:
p.translate(shiftx, shifty, shiftz)
self.set_cog()
def center(self, what='particles', at=[0, 0, 0], move_both=True):
"""pysimm.system.System.center
Centers particles center of geometry or simulation box at given coordinate. A vector is defined based on the current coordinate for the center of either the particles or the simulation box and the "at" parameter. This shift vector is applied to the entity defined by the "what" parameter. Optionally, both the particles and the box can be shifted by the same vector.
Args:
what: what is being centered: "particles" or "box"
at: new coordinate for center of particles or box
move_both: if True, determines vector for shift defined by "what" and "at" parameters, and applies shift to both particles and box. If false, only shift what is defined by "what" parameter.
Returns:
None
"""
if what == 'particles':
self.set_cog()
move_vec = [at[n] - self.cog[n] for n in range(3)]
self.shift_particles(*move_vec)
if move_both:
self.dim.translate(*move_vec)
elif what == 'box':
self.dim.size()
box_center = [self.dim.xlo+self.dim.dx/2, self.dim.ylo+self.dim.dy/2, self.dim.zlo+self.dim.dz/2]
move_vec = [at[n] - box_center[n] for n in range(3)]
self.dim.translate(*move_vec)
if move_both:
self.shift_particles(*move_vec)
else:
error_print('can only choose to center "particles" or "box"')
def center_system(self):
"""pysimm.system.System.center_system
DEPRECATED: Use :class:`~pysimm.system.System`.center('box', [0, 0, 0], True) instead
Args:
None
Returns:
None
"""
warning_print("DEPRECATED: Use System.center('box', [0, 0, 0], True) instead of System.center_system())")
self.center('box', [0, 0, 0], True)
def center_at_origin(self):
"""pysimm.system.System.center_at_origin
DEPRECATED: Use :class:`~pysimm.system.System`.center('particles', [0, 0, 0], True) instead
Args:
None
Returns:
None
"""
warning_print("DEPRECATED: Use System.center('particles', [0, 0, 0], True) instead of System.center_at_origin())")
self.center('particles', [0, 0, 0], True)
def set_mass(self):
"""pysimm.system.System.set_mass
Set total mass of particles in :class:`~pysimm.system.System`
Args:
None
Returns:
None
"""
self.mass = 0
for p in self.particles:
if p.type.mass is None:
self.mass = 0
warning_print('Some particles do not have a mass')
break
self.mass += p.type.mass
def set_volume(self):
"""pysimm.system.System.set_volume
Set volume of :class:`~pysimm.system.System` based on Dimension
Args:
None
Returns:
None
"""
if self.dim.check():
self.volume = ((self.dim.xhi - self.dim.xlo) *
(self.dim.yhi - self.dim.ylo) *
(self.dim.zhi - self.dim.zlo))
def set_density(self):
"""pysimm.system.System.set_density
Calculate density of :class:`~pysimm.system.System` from mass and volume
Args:
None
Returns:
None
"""
self.set_mass()
self.set_volume()
if self.mass and self.volume:
self.density = self.mass / 6.02e23 / self.volume * 1e24
def set_velocity(self):
"""pysimm.system.System.set_velocity
Calculate total velocity of particles in :class:`~pysimm.system.System`
Args:
None
Returns:
None
"""
self.vx = 0.0
self.vy = 0.0
self.vz = 0.0
for p in self.particles:
if p.vx is None:
p.vx = 0
self.vx += p.vx
if p.vy is None:
p.vy = 0
self.vy += p.vy
if p.vz is None:
p.vz = 0
self.vz += p.vz
def zero_velocity(self):
"""pysimm.system.System.zero_velocity
Enforce zero shift velocity in :class:`~pysimm.system.System`
Args:
None
Returns:
None
"""
self.set_velocity()
shift_x = shift_y = shift_z = 0.0
if self.vx != 0:
shift_x = self.vx / self.particles.count
if self.vy != 0:
shift_y = self.vy / self.particles.count
if self.vz != 0:
shift_z = self.vz / self.particles.count
if shift_x != 0 or shift_y != 0 or shift_z != 0:
for p in self.particles:
p.vx -= shift_x
p.vy -= shift_y
p.vz -= shift_z
self.set_velocity()
def set_box(self, padding=0., center=True):
"""pysimm.system.System.set_box
Update :class:`~pysimm.system.System`.dim with user defined padding. Used to construct a simulation box if it doesn't exist, or adjust the size of the simulation box following system modifications.
Args:
padding: add padding to all sides of box (Angstrom)
center: if True, place center of box at origin default=True
Returns:
None
"""
xmin = ymin = zmin = sys.float_info.max
xmax = ymax = zmax = sys.float_info.min
for p in self.particles:
if p.x < xmin:
xmin = p.x
if p.x > xmax:
xmax = p.x
if p.y < ymin:
ymin = p.y
if p.y > ymax:
ymax = p.y
if p.z < zmin:
zmin = p.z
if p.z > zmax:
zmax = p.z
self.dim.xlo = xmin - padding
self.dim.xhi = xmax + padding
self.dim.ylo = ymin - padding
self.dim.yhi = ymax + padding
self.dim.zlo = zmin - padding
self.dim.zhi = zmax + padding
if center:
self.center('particles', [0, 0, 0], True)
def set_mm_dist(self, molecules=None):
"""pysimm.system.System.set_mm_dist
Calculate molecular mass distribution (mainly for polymer systems).
Sets :class:`~pysimm.system.System`.mw, :class:`~pysimm.system.System`.mn, and :class:`~pysimm.system.System`.disperisty
Args:
molecules: :class:`~pysimm.utils.ItemContainer` of molecules to calculate distributions defaul='all'
Returns:
None
"""
if molecules is None or molecules == 'all':
molecules = self.molecules
for m in molecules:
m.set_mass()
self.mn = 0
self.mw = 0
for m in molecules:
self.mn += m.mass
self.mw += pow(m.mass, 2)
self.mw /= self.mn
self.mn /= molecules.count
self.dispersity = self.mw / self.mn
self.pdi = self.mw / self.mn
def set_frac_free_volume(self, v_void=None):
"""pysimm.system.System.set_frac_free_volume
Calculates fractional free volume from void volume and bulk density
Args:
v_void: void volume if not defined in :class:`~pysimm.system.System`.void_volume default=None
Returns:
None
"""
if not v_void and not self.void_volume:
error_print('Void volume not provided, cannot calculate fractional free volume')
return
elif not v_void:
self.set_density()
self.frac_free_volume = calc.frac_free_volume(1/self.density, self.void_volume)
elif not self.void_volume:
self.set_density()
self.frac_free_volume = calc.frac_free_volume(1/self.density, v_void)
if not self.frac_free_volume or self.frac_free_volume < 0:
self.frac_free_volume = 0.0
def visualize(self, vis_exec='vmd', **kwargs):
"""pysimm.system.System.visualize
Visualize system in third party software with given executable. Software must accept pdb or xyz as first
command line argument.
Args:
vis_exec: executable to launch visualization software default='vmd'
unwrap (optional): if True, unwrap :class:`~pysimm.system.System` first default=None
format (optional): set format default='xyz'
Returns:
None
"""
if not call:
raise PysimmError('pysimm.system.System.visualize function requires subprocess.call')
unwrap = kwargs.get('unwrap')
format = kwargs.get('format', 'xyz')
verbose_print(self.dim.dx, self.dim.xlo, self.dim.xhi)
verbose_print(self.dim.dy, self.dim.ylo, self.dim.yhi)
verbose_print(self.dim.dz, self.dim.zlo, self.dim.zhi)
if unwrap:
self.unwrap()
if format == 'xyz':
name_ = 'pysimm_temp.xyz'
self.write_xyz(name_)
elif format == 'pdb':
name_ = 'pysimm_temp.pdb'
self.write_pdb(name_)
call('%s %s' % (vis_exec, name_), shell=True)
os.remove(name_)
def viz(self, **kwargs):
self.visualize(vis_exec='vmd', unwrap=False, format='xyz', **kwargs)
class Molecule(System):
"""pysimm.system.Molecule
Very similar to :class:`~pysimm.system.System`, but requires less information
"""
def __init__(self, **kwargs):
System.__init__(self, **kwargs)
mt = kwargs.get('tag')
if mt and isinstance(mt, int):
self.tag = mt
def read_yaml(file_, **kwargs):
"""pysimm.system.read_yaml
Interprets yaml file and creates :class:`~pysimm.system.System` object
Args:
file_: yaml file name
Returns:
:class:`~pysimm.system.System` object
"""
if os.path.isfile(file_):
dict_ = json.loads(open(file_).read())
else:
dict_ = json.loads(file_)
s = System()
for k, v in dict_.items():
if not isinstance(v, dict):
setattr(s, k, v)
if isinstance(dict_.get('dim'), dict):
s.dim = Dimension(**dict_.get('dim'))
if isinstance(dict_.get('particle_types'), dict):
s.particle_types = ItemContainer()
for pt in dict_.get('particle_types').get('_dict').values():
s.particle_types.add(ParticleType(**pt))
if isinstance(dict_.get('bond_types'), dict):
s.bond_types = ItemContainer()
for bt in dict_.get('bond_types').get('_dict').values():
s.bond_types.add(BondType(**bt))
if isinstance(dict_.get('angle_types'), dict):
s.angle_types = ItemContainer()
for at in dict_.get('angle_types').get('_dict').values():
s.angle_types.add(AngleType(**at))
if isinstance(dict_.get('dihedral_types'), dict):
s.dihedral_types = ItemContainer()
for dt in dict_.get('dihedral_types').get('_dict').values():
s.dihedral_types.add(DihedralType(**dt))
if isinstance(dict_.get('improper_types'), dict):
s.improper_types = ItemContainer()
for it in dict_.get('improper_types').get('_dict').values():
s.improper_types.add(ImproperType(**it))
if isinstance(dict_.get('particles'), dict):
s.particles = ItemContainer()
for p in dict_.get('particles').get('_dict').values():
s.particles.add(Particle(**p))
if isinstance(dict_.get('bonds'), dict):
s.bonds = ItemContainer()
for b in dict_.get('bonds').get('_dict').values():
s.bonds.add(Bond(**b))
if isinstance(dict_.get('angles'), dict):
s.angles = ItemContainer()
for a in dict_.get('angles').get('_dict').values():
s.angles.add(Angle(**a))
if isinstance(dict_.get('dihedrals'), dict):
s.dihedrals = ItemContainer()
for d in dict_.get('dihedrals').get('_dict').values():
s.dihedrals.add(Dihedral(**d))
if isinstance(dict_.get('impropers'), dict):
s.impropers = ItemContainer()
for i in dict_.get('impropers').get('_dict').values():
s.impropers.add(Improper(**i))
if isinstance(dict_.get('molecules'), dict):
s.molecules = ItemContainer()
for m in dict_.get('molecules').get('_dict').values():
mol = Molecule()
for k, v in m.items():
if isinstance(v, list) and not v:
setattr(mol, k, ItemContainer())
else:
setattr(mol, k, v)
particles = [x for x in mol.particles]
mol.particles = ItemContainer()
for n in particles:
mol.particles.add(s.particles[n])
bonds = [x for x in mol.bonds]
mol.bonds = ItemContainer()
for n in bonds:
mol.bonds.add(s.bonds[n])
angles = [x for x in mol.angles]
mol.angles = ItemContainer()
for n in angles:
mol.angles.add(s.angles[n])
dihedrals = [x for x in mol.dihedrals]
mol.dihedrals = ItemContainer()
for n in dihedrals:
mol.dihedrals.add(s.dihedrals[n])
impropers = [x for x in mol.impropers]
mol.impropers = ItemContainer()
for n in impropers:
mol.impropers.add(s.impropers[n])
s.molecules.add(mol)
for p in s.particles:
if s.particle_types[p.type]:
p.type = s.particle_types[p.type]
if s.molecules[p.molecule]:
p.molecule = s.molecules[p.molecule]
bonds = [x for x in p.bonds]
p.bonds = ItemContainer()
for n in bonds:
p.bonds.add(s.bonds[n])
angles = [x for x in p.angles]
for n in angles:
p.angles.add(s.angles[n])
dihedrals = [x for x in p.dihedrals]
for n in dihedrals:
p.dihedrals.add(s.dihedrals[n])
impropers = [x for x in p.impropers]
for n in impropers:
p.impropers.add(s.impropers[n])
for b in s.bonds:
if s.bond_types[b.type]:
b.type = s.bond_types[b.type]
b.a = s.particles[b.a]
b.b = s.particles[b.b]
for a in s.angles:
if s.angle_types[a.type]:
a.type = s.angle_types[a.type]
a.a = s.particles[a.a]
a.b = s.particles[a.b]
a.c = s.particles[a.c]
for d in s.dihedrals:
if s.dihedral_types[d.type]:
d.type = s.dihedral_types[d.type]
d.a = s.particles[d.a]
d.b = s.particles[d.b]
d.c = s.particles[d.c]
d.d = s.particles[d.d]
for i in s.impropers:
if s.improper_types[i.type]:
i.type = s.improper_types[i.type]
i.a = s.particles[i.a]
i.b = s.particles[i.b]
i.c = s.particles[i.c]
i.d = s.particles[i.d]
return s
def read_xyz(file_, **kwargs):
"""pysimm.system.read_xyz
Interprets xyz file and creates :class:`~pysimm.system.System` object
Args:
file_: xyz file name
quiet(optional): if False, print status
Returns:
:class:`~pysimm.system.System` object
"""
quiet = kwargs.get('quiet')
if os.path.isfile(file_):
debug_print('reading file')
f = open(file_)
elif isinstance(file_, str):
debug_print('reading string')
f = StringIO(file_)
s = System()
nparticles = int(next(f).strip())
name = next(f).strip()
s.name = name
for _ in range(nparticles):
elem, x, y, z = next(f).split()
x = float(x)
y = float(y)
z = float(z)
s.particles.add(Particle(elem=elem, x=x, y=y, z=z))
f.close()
for p in s.particles:
pt = s.particle_types.get(p.elem)
if pt:
p.type = pt[0]
else:
pt = ParticleType(elem=p.elem, name=p.elem)
p.type = pt
s.particle_types.add(pt)
if not quiet:
verbose_print('read %s particles' % s.particles.count)
s.set_box(padding=0.5)
return s
def read_chemdoodle_json(file_, **kwargs):
"""pysimm.system.read_chemdoodle_json
Interprets ChemDoodle JSON (Java Script Object Notation) file and creates :class:`~pysimm.system.System` object
Args:
file_: json file name
quiet(optional): if False, print status
Returns:
:class:`~pysimm.system.System` object
"""
quiet = kwargs.get('quiet')
if os.path.isfile(file_):
debug_print('reading file')
f = open(file_)
elif isinstance(file_, str):
debug_print('reading string')
f = StringIO(file_)
s = System()
data = json.loads(f.read())
for a in data.get('a'):
s.particles.add(Particle(
x=a.get('x'),
y=a.get('y'),
z=a.get('z'),
charge=a.get('c'),
elem=a.get('l'),
type_name=a.get('i')
))
for b in data.get('b'):
s.bonds.add(Bond(
a=s.particles[b.get('b')+1],
b=s.particles[b.get('e')+1],
order=b.get('o')
))
return s
def read_lammps(data_file, **kwargs):
"""pysimm.system.read_lammps
Interprets LAMMPS data file and creates :class:`~pysimm.system.System` object
Args:
data_file: LAMMPS data file name
quiet(optional): if False, print status
atom_style (optional): option to let user override (understands charge, molecular, full)
pair_style (optional): option to let user override
bond_style (optional): option to let user override
angle_style (optional): option to let user override
dihedral_style (optional): option to let user override
improper_style (optional): option to let user override
set_types (optional): if True, objectify default=True
name (optional): provide name for system
Returns:
:class:`~pysimm.system.System` object
"""
atom_style = kwargs.get('atom_style')
pair_style = kwargs.get('pair_style')
bond_style = kwargs.get('bond_style')
angle_style = kwargs.get('angle_style')
dihedral_style = kwargs.get('dihedral_style')
improper_style = kwargs.get('improper_style')
set_types = kwargs.get('set_types', True)
name = kwargs.get('name')
quiet = kwargs.get('quiet')
if os.path.isfile(data_file):
if not quiet:
verbose_print('reading lammps data file "%s"' % data_file)
f = open(data_file)
elif isinstance(data_file, str):
if not quiet:
verbose_print('reading lammps data file from string')
f = StringIO(data_file)
else:
raise PysimmError('pysimm.system.read_lammps requires either '
'file or string as first argument')
if name:
if not quiet:
verbose_print('creating pysimm.system.System object with name %s'
% name)
s = System(name=name)
else:
s = System(name=next(f).strip())
nparticles = nparticle_types = nbonds = nbond_types = 0
nangles = nangle_types = ndihedrals = ndihedral_types = 0
nimpropers = nimproper_types = 0
for line in f:
line = line.split()
if len(line) > 1 and line[1] == 'atoms':
nparticles = int(line[0])
elif len(line) > 1 and line[1] == 'atom':
nparticle_types = int(line[0])
elif len(line) > 1 and line[1] == 'bonds':
nbonds = int(line[0])
elif len(line) > 1 and line[1] == 'bond':
nbond_types = int(line[0])
elif len(line) > 1 and line[1] == 'angles':
nangles = int(line[0])
elif len(line) > 1 and line[1] == 'angle':
nangle_types = int(line[0])
elif len(line) > 1 and line[1] == 'dihedrals':
ndihedrals = int(line[0])
elif len(line) > 1 and line[1] == 'dihedral':
ndihedral_types = int(line[0])
elif len(line) > 1 and line[1] == 'impropers':
nimpropers = int(line[0])
elif len(line) > 1 and line[1] == 'improper':
nimproper_types = int(line[0])
elif len(line) > 3 and line[2] == 'xlo':
s.dim.xlo = float(line[0])
s.dim.xhi = float(line[1])
elif len(line) > 3 and line[2] == 'ylo':
s.dim.ylo = float(line[0])
s.dim.yhi = float(line[1])
elif len(line) > 3 and line[2] == 'zlo':
s.dim.zlo = float(line[0])
s.dim.zhi = float(line[1])
elif len(line) > 0 and line[0] == 'Masses':
next(f)
for i in range(nparticle_types):
pt = ParticleType.parse_lammps(next(f), 'mass')
if s.particle_types[pt.tag]:
s.particle_types[pt.tag].mass = pt.mass
else:
s.particle_types.add(pt)
if not quiet:
verbose_print('read masses for %s ParticleTypes'
% s.particle_types.count)
elif len(line) > 0 and line[0] == 'Pair':
if '#' in line and not pair_style:
line = ' '.join(line).split('#')
pair_style = line[1].strip()
next(f)
for i in range(nparticle_types):
line = next(f)
if not pair_style:
warning_print('unknown pair style - infering from number of parameters (2=lj 3=buck 4=charmm)')
pair_style = ParticleType.guess_style(
len(line.split('#')[0].split()[1:])
)
if pair_style:
pt = ParticleType.parse_lammps(line, pair_style)
if s.particle_types[pt.tag]:
s.particle_types[pt.tag].set(**vars(pt))
else:
s.particle_types.add(pt)
verbose_print('read "%s" nonbonded parameters '
'for %s ParticleTypes'
% (pair_style, s.particle_types.count))
elif len(line) > 0 and line[0] == 'Bond':
next(f)
for i in range(nbond_types):
line = next(f)
if not bond_style:
warning_print('unknown bond_style - infering from number of parameters (2=harmonic 4=class2)')
bond_style = BondType.guess_style(
len(line.split('#')[0].split()[1:])
)
if bond_style:
s.bond_types.add(BondType.parse_lammps(line, bond_style))
verbose_print('read "%s" bond parameters '
'for %s BondTypes'
% (bond_style, s.bond_types.count))
elif len(line) > 0 and line[0] == 'Angle':
next(f)
for i in range(nangle_types):
line = next(f)
if not angle_style:
warning_print('unknown angle_style - infering from number of parameters (2=harmonic)')
angle_style = AngleType.guess_style(
len(line.split('#')[0].split()[1:])
)
if angle_style:
s.angle_types.add(AngleType.parse_lammps(line, angle_style))
verbose_print('read "%s" angle parameters '
'for %s AngleTypes'
% (angle_style, s.angle_types.count))
elif len(line) > 0 and line[0] == 'BondBond':
next(f)
for i in range(nangle_types):
line = next(f).strip().split()
tag = int(line[0])
s.angle_types[tag].m = float(line[1])
s.angle_types[tag].r1 = float(line[2])
s.angle_types[tag].r2 = float(line[3])
verbose_print('read "%s" angle (bond-bond) '
'parameters for %s AngleTypes'
% (angle_style, s.angle_types.count))
elif len(line) > 0 and line[0] == 'BondAngle':
next(f)
for i in range(nangle_types):
line = next(f).strip().split()
tag = int(line[0])
s.angle_types[tag].n1 = float(line[1])
s.angle_types[tag].n2 = float(line[2])
s.angle_types[tag].r1 = float(line[3])
s.angle_types[tag].r2 = float(line[4])
if angle_style:
verbose_print('read "%s" angle (bond-angle) '
'parameters for %s AngleTypes'
% (angle_style, s.angle_types.count))
elif len(line) > 0 and line[0] == 'Dihedral':
next(f)
for i in range(ndihedral_types):
line = next(f)
if not dihedral_style:
warning_print('unknown dihedral_style - infering from number of parameters (3=harmonic 6=class2 [7, 10]=fourier)')
dihedral_style = DihedralType.guess_style(
len(line.split('#')[0].split()[1:])
)
if dihedral_style:
dt = DihedralType.parse_lammps(line, dihedral_style)
s.dihedral_types.add(dt)
verbose_print('read "%s" dihedral parameters '
'for %s DihedralTypes'
% (dihedral_style, s.dihedral_types.count))
elif len(line) > 0 and line[0] == 'MiddleBondTorsion':
next(f)
for i in range(ndihedral_types):
line = next(f).strip().split()
tag = int(line[0])
s.dihedral_types[tag].a1 = float(line[1])
s.dihedral_types[tag].a2 = float(line[2])
s.dihedral_types[tag].a3 = float(line[3])
s.dihedral_types[tag].r2 = float(line[4])
if dihedral_style:
verbose_print('read "%s" dihedral '
'(middle-bond-torsion parameters for '
'%s DihedralTypes'
% (dihedral_style, ndihedral_types))
elif len(line) > 0 and line[0] == 'EndBondTorsion':
next(f)
for i in range(ndihedral_types):
line = next(f).strip().split()
tag = int(line[0])
s.dihedral_types[tag].b1 = float(line[1])
s.dihedral_types[tag].b2 = float(line[2])
s.dihedral_types[tag].b3 = float(line[3])
s.dihedral_types[tag].c1 = float(line[4])
s.dihedral_types[tag].c2 = float(line[5])
s.dihedral_types[tag].c3 = float(line[6])
s.dihedral_types[tag].r1 = float(line[7])
s.dihedral_types[tag].r3 = float(line[8])
if dihedral_style:
verbose_print('read "%s" dihedral '
'(end-bond-torsion parameters for '
'%s DihedralTypes'
% (dihedral_style, ndihedral_types))
elif len(line) > 0 and line[0] == 'AngleTorsion':
next(f)
for i in range(ndihedral_types):
line = next(f).strip().split()
tag = int(line[0])
s.dihedral_types[tag].d1 = float(line[1])
s.dihedral_types[tag].d2 = float(line[2])
s.dihedral_types[tag].d3 = float(line[3])
s.dihedral_types[tag].e1 = float(line[4])
s.dihedral_types[tag].e2 = float(line[5])
s.dihedral_types[tag].e3 = float(line[6])
s.dihedral_types[tag].theta1 = float(line[7])
s.dihedral_types[tag].theta2 = float(line[8])
if dihedral_style:
verbose_print('read "%s" dihedral '
'(angle-torsion parameters for '
'%s DihedralTypes'
% (dihedral_style, ndihedral_types))
elif len(line) > 0 and line[0] == 'AngleAngleTorsion':
next(f)
for i in range(ndihedral_types):
line = next(f).strip().split()
tag = int(line[0])
s.dihedral_types[tag].m = float(line[1])
s.dihedral_types[tag].theta1 = float(line[2])
s.dihedral_types[tag].theta2 = float(line[3])
if dihedral_style:
verbose_print('read "%s" dihedral '
'(angle-angle-torsion parameters for '
'%s DihedralTypes'
% (dihedral_style, ndihedral_types))
elif len(line) > 0 and line[0] == 'BondBond13':
next(f)
for i in range(ndihedral_types):
line = next(f).strip().split()
tag = int(line[0])
s.dihedral_types[tag].n = float(line[1])
s.dihedral_types[tag].r1 = float(line[2])
s.dihedral_types[tag].r3 = float(line[3])
if dihedral_style:
verbose_print('read "%s" dihedral '
'(bond-bond-1-3 parameters for '
'%s DihedralTypes'
% (dihedral_style, ndihedral_types))
elif len(line) > 0 and line[0] == 'Improper':
next(f)
for i in range(nimproper_types):
line = next(f)
if not improper_style:
warning_print('unknown improper_style - infering from number of parameters (3=cvff)')
improper_style = ImproperType.guess_style(
len(line.split('#')[0].split()[1:])
)
if improper_style.startswith('harmonic') and 'class2' in [bond_style, angle_style, dihedral_style]:
improper_style = 'class2'
if improper_style:
s.improper_types.add(ImproperType.parse_lammps(line, improper_style))
verbose_print('read "%s" improper parameters '
'for %s ImproperTypes'
% (improper_style, s.improper_types.count))
elif len(line) > 0 and line[0] == 'AngleAngle':
improper_style = 'class2'
next(f)
for i in range(nimproper_types):
line = next(f).strip().split()
tag = int(line[0])
s.improper_types[tag].m1 = float(line[1])
s.improper_types[tag].m2 = float(line[2])
s.improper_types[tag].m3 = float(line[3])
s.improper_types[tag].theta1 = float(line[4])
s.improper_types[tag].theta2 = float(line[5])
s.improper_types[tag].theta3 = float(line[6])
if improper_style:
verbose_print('read "%s" improper '
'(angle-angle parameters for '
'%s ImproperTypes'
% (improper_style, nimproper_types))
elif len(line) > 0 and line[0] == 'Atoms':
next(f)
for i in range(nparticles):
line = next(f).strip().split()
tag = int(line[0])
if not atom_style:
if len(line) == 7:
atom_style = 'full'
elif len(line) == 6:
try:
int(line[2])
atom_style = 'molecular'
except:
atom_style = 'charge'
else:
warning_print('cannot determine atom_style; assuming atom_style "full"')
atom_style = 'full'
if atom_style == 'full':
d_ = {'tag': tag, 'molecule': int(line[1]), 'type': int(line[2]),
'charge': float(line[3]), 'x': float(line[4]),
'y': float(line[5]), 'z': float(line[6])}
elif atom_style == 'charge':
d_ = {'tag': tag, 'molecule': 0, 'type': int(line[1]),
'charge': float(line[2]), 'x': float(line[3]),
'y': float(line[4]), 'z': float(line[5])}
elif atom_style == 'molecular':
d_ = {'tag': tag, 'molecule': int(line[1]), 'type': int(line[2]),
'charge': 0., 'x': float(line[3]), 'y': float(line[4]), 'z': float(line[5])}
if s.particles[tag]:
p = s.particles[tag]
p.set(**d_)
else:
p = Particle(vx=0., vy=0., vz=0., **d_)
s.particles.add(p)
p.frac_x = p.x / s.dim.dx
p.frac_y = p.y / s.dim.dy
p.frac_z = p.z / s.dim.dz
if not quiet:
verbose_print('read %s particles' % nparticles)
elif len(line) > 0 and line[0] == 'Velocities':
next(f)
for i in range(nparticles):
line = next(f).strip().split()
tag = int(line[0])
if s.particles[tag]:
p = s.particles[tag]
d_ = {'vx': float(line[1]), 'vy': float(line[2]),
'vz': float(line[3])}
p.set(**d_)
else:
p = Particle(tag=tag, vx=float(line[1]), vy=float(line[2]),
vz=float(line[3]))
s.particles.add(p)
if not quiet:
verbose_print('read velocities for %s particles' % nparticles)
elif len(line) > 0 and line[0] == 'Bonds':
next(f)
for i in range(nbonds):
line = next(f).strip().split()
tag = int(line[0])
b = Bond(tag=tag, type=int(line[1]),
a=int(line[2]), b=int(line[3]))
s.bonds.add(b)
if not quiet:
verbose_print('read %s bonds' % nbonds)
elif len(line) > 0 and line[0] == 'Angles':
next(f)
for i in range(nangles):
line = next(f).strip().split()
tag = int(line[0])
a = Angle(tag=tag, type=int(line[1]),
a=int(line[2]), b=int(line[3]), c=int(line[4]))
s.angles.add(a)
if not quiet:
verbose_print('read %s angles' % nangles)
elif len(line) > 0 and line[0] == 'Dihedrals':
next(f)
for i in range(ndihedrals):
line = next(f).strip().split()
tag = int(line[0])
d = Dihedral(tag=tag, type=int(line[1]),
a=int(line[2]), b=int(line[3]),
c=int(line[4]), d=int(line[5]))
s.dihedrals.add(d)
if not quiet:
verbose_print('read %s dihedrals' % ndihedrals)
elif len(line) > 0 and line[0] == 'Impropers':
next(f)
for i in range(nimpropers):
line = next(f).strip().split()
tag = int(line[0])
if (s.ff_class == '2' or improper_style == 'class2' or (s.improper_types[1] and s.improper_types[1].m1
is not None)):
s.impropers.add(Improper(tag=tag, type=int(line[1]),
a=int(line[3]), b=int(line[2]),
c=int(line[4]), d=int(line[5])))
else:
s.impropers.add(Improper(tag=tag, type=int(line[1]),
a=int(line[2]), b=int(line[3]),
c=int(line[4]), d=int(line[5])))
if not quiet:
verbose_print('read %s impropers' % nimpropers)
f.close()
s.atom_style = atom_style
s.pair_style = pair_style
s.bond_style = bond_style
s.angle_style = angle_style
s.dihedral_style = dihedral_style
if improper_style:
s.improper_style = improper_style
elif not improper_style and s.impropers.count > 1:
if not quiet:
verbose_print('improper style not set explicitly '
'but impropers exist in system, guessing style '
'based on other forcefield styles...')
if (s.bond_style.startswith('harm') or
s.angle_style.startswith('harm') or
s.dihedral_style.startswith('harm')):
improper_style = 'harmonic'
s.improper_style = 'harmonic'
elif (s.bond_style.startswith('class2') or
s.angle_style.startswith('class2') or
s.dihedral_style.startswith('class2')):
improper_style = 'class2'
s.improper_style = 'class2'
if s.improper_style:
if not quiet:
verbose_print('setting improper style to "%s", '
'if this is incorrect try explicitly setting '
'improper_style as argument in '
'pysimm.system.read_lammps' % improper_style)
else:
if not quiet:
error_print('still cannot determine improper style...')
if pair_style and pair_style.startswith('lj'):
if ((s.bond_style and s.bond_style.startswith('class2')) or
(s.angle_style and s.angle_style.startswith('class2')) or
(s.dihedral_style and s.dihedral_style.startswith('class2'))):
s.pair_style = 'class2'
styles = [s.pair_style, s.bond_style, s.angle_style, s.dihedral_style,
s.improper_style]
if 'class2' in styles:
s.ff_class = '2'
else:
s.ff_class = '1'
if 'harmonic' in styles and 'class2' in styles:
if not quiet:
warning_print('it appears there is a mixture of class1 and class2 '
'forcefield styles in your system...this is usually '
'unadvised')
if set_types:
s.objectify()
for pt in s.particle_types:
if pt.name and pt.name.find('@') >= 0:
if pt.name.split('@')[-1][0].upper() in ['H', 'C', 'N', 'O', 'F', 'S']:
pt.elem = pt.name.split('@')[-1][0].upper()
if pt.name and pt.name[0] == 'L' and pt.name[1] != 'i':
pt.elem = pt.name[1].upper()
elif pt.name:
pt.elem = pt.name[0:2]
if pt.name[1:3] == 'Na':
pt.elem = 'Na'
if pt.name[0].upper() in ['H', 'C', 'N', 'O', 'F', 'S']:
pt.elem = pt.name[0].upper()
for p in s.particles:
if isinstance(p.type, ParticleType) and p.type.name and p.type.name.find('@') >= 0:
if p.type.name[0].upper() == 'H':
p.linker = 'head'
elif p.type.name[0].upper() == 'T':
p.linker = 'tail'
elif p.type.name[0].upper() == 'L':
p.linker = True
if s.objectified:
s.set_cog()
s.set_mass()
s.set_volume()
s.set_density()
s.set_velocity()
return s
def read_pubchem_smiles(smiles, quiet=False, type_with=None):
"""pysimm.system.read_pubchem_smiles
Interface with pubchem restful API to create molecular system from SMILES format
Args:
smiles: smiles formatted string of molecule
type_with: :class:`~pysimm.forcefield.Forcefield` object to type with default=None
Returns:
:class:`~pysimm.system.System` object
"""
req = ('https://pubchem.ncbi.nlm.nih.gov/'
'rest/pug/compound/smiles/%s/SDF/?record_type=3d' % smiles)
if not quiet:
print('making request to pubchem RESTful API:')
print(req)
try:
resp = urlopen(req)
return read_mol(resp.read().decode('utf-8'), type_with=type_with)
except (HTTPError, URLError):
print('Could not retrieve pubchem entry for smiles %s' % smiles)
def read_pubchem_cid(cid, type_with=None):
"""pysimm.system.read_pubchem_smiles
Interface with pubchem restful API to create molecular system from SMILES format
Args:
smiles: smiles formatted string of molecule
type_with: :class:`~pysimm.forcefield.Forcefield` object to type with default=None
Returns:
:class:`~pysimm.system.System` object
"""
req = ('https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/cid/{}/SDF/?record_type=3d'.format(cid))
print('making request to pubchem RESTful API:')
print(req)
try:
resp = urlopen(req)
return read_mol(resp.read().decode('utf-8'), type_with=type_with)
except (HTTPError, URLError):
print('Could not retrieve pubchem entry for cid %s' % cid)
def read_cml(cml_file, **kwargs):
"""pysimm.system.read_cml
Interprets cml file and creates :class:`~pysimm.system.System` object
Args:
cml_file: cml file name
linkers (optional): if True, use spinMultiplicity to determine linker default=None
Returns:
:class:`~pysimm.system.System` object
"""
linkers = kwargs.get('linkers')
if os.path.isfile(cml_file):
debug_print('reading file')
iter_parse = Et.iterparse(cml_file)
elif isinstance(cml_file, str):
debug_print('reading string')
iter_parse = Et.iterparse(StringIO(cml_file))
else:
raise PysimmError('pysimm.system.read_cml requires a file as argument')
for _, el in iter_parse:
if '}' in el.tag:
el.tag = el.tag.split('}', 1)[1]
root = iter_parse.root
s = System(name='read using pysimm.system.read_cml')
particles = root.find('atomArray')
bonds = root.find('bondArray')
for p_ in particles:
tag = int(p_.attrib['id'].replace('a', '').replace(',', ''))
elem = p_.attrib['elementType']
x = float(p_.attrib['x3'])
y = float(p_.attrib['y3'])
z = float(p_.attrib['z3'])
if linkers:
linker = True if p_.attrib.get('spinMultiplicity') else None
else:
linker = None
p = Particle(tag=tag, elem=elem, x=x, y=y, z=z, charge=0, molecule=1, linker=linker)
s.particles.add(p)
for b_ in bonds:
a, b = b_.attrib['atomRefs2'].split()
a = int(a.replace('a', '').replace(',', ''))
b = int(b.replace('a', '').replace(',', ''))
order = b_.attrib['order']
if order == 'A':
order = 4
else:
order = int(order)
b = Bond(a=a, b=b, order=order)
s.bonds.add(b)
s.objectify()
return s
def read_mol(mol_file, type_with=None, version='V2000'):
"""pysimm.system.read_mol
Interprets mol file and creates :class:`~pysimm.system.System` object
Args:
mol_file: mol file name
f (optional): :class:`~pysimm.forcefield.Forcefield` object to get data from
version: version of mol file to expect default='V2000'
Returns:
:class:`~pysimm.system.System` object
"""
if os.path.isfile(mol_file):
debug_print('reading file')
f = open(mol_file)
elif isinstance(mol_file, str):
debug_print('reading string')
f = StringIO(mol_file)
else:
raise PysimmError('pysimm.system.read_mol requires either '
'file or string as argument')
s = System(name='read using pysimm.system.read_mol')
for n in range(3):
next(f)
line = next(f)
nparticles = int(line.split()[0])
nbonds = int(line.split()[1])
if len(line.split()) >= 3:
version = line.split()[-1]
if version == 'V2000':
for n in range(nparticles):
line = next(f)
x, y, z, elem, something, charge = line.split()[:6]
p = Particle(x=float(x), y=float(y), z=float(z), molecule=1,
elem=elem, charge=float(charge))
s.particles.add(p)
if p.elem[0] == 'L':
p.linker = True
p.elem = p.elem[1:]
elif p.charge == 5:
p.linker = True
p.charge = 0
for n in range(nbonds):
line = next(f)
a, b, order = map(int, line.split()[:3])
new_bond = s.bonds.add(Bond(a=a, b=b, order=order))
elif version == 'V3000':
next(f)
line = next(f)
nparticles = int(line.split()[3])
nbonds = int(line.split()[4])
next(f)
for n in range(nparticles):
line = next(f)
id_, elem, x, y, z, charge = line.split()[2:8]
p = Particle(x=float(x), y=float(y), z=float(z), molecule=1,
elem=elem, charge=float(charge))
s.particles.add(p)
next(f)
next(f)
for n in range(nbonds):
line = next(f)
id_, order, a, b = map(int, line.split()[2:6])
s.bonds.add(Bond(a=a, b=b, order=order))
s.objectify()
if type_with:
try:
s.apply_forcefield(type_with)
except Exception:
print('forcefield typing with forcefield %s unsuccessful'
% type_with.name)
return s
def read_mol2(mol2_file, type_with=None):
"""pysimm.system.read_mol2
Interprets .mol2 file and creates :class:`~pysimm.system.System` object
Args:
mol_file2: a full name (including path) of a Tripos Mol2 text file
type_with (optional): :class:`~pysimm.forcefield.Forcefield` object to use for attempt to assighn
forcefield parameters to the system
Returns:
:class:`~pysimm.system.System` object
"""
if os.path.isfile(mol2_file):
debug_print('reading file')
f = open(mol2_file)
else:
raise PysimmError('pysimm.system.read_mol2 requires a path to .mol2 file')
s = System(name='read using pysimm.system.read_mol2')
ref_tag = '@<TRIPOS>'
stream = f.read()
tags = list(map(lambda x: x.lower(), re.findall('(?<=' + ref_tag + ').*', stream)))
data = re.split(ref_tag, stream)
# reading molecule related info
segm = data[tags.index('molecule') + 1]
lines = segm.split('\n')
tmp = lines[2].split()
nparticles = int(tmp[0])
if len(tmp) > 1:
nbonds = int(tmp[1])
# reading atom related info
segm = data[tags.index('atom') + 1]
lines = segm.split('\n')
for l in lines:
tmp = l.split()
if len(tmp) > 8:
s.particles.add(Particle(tag=int(tmp[0]), elem=tmp[1][0], charge=float(tmp[8]), molecule=1,
x=float(tmp[2]), y=float(tmp[3]), z=float(tmp[4])))
segm = data[tags.index('bond') + 1]
lines = segm.split('\n')
for l in lines:
tmp = l.split()
if len(tmp) > 3:
tmp = l.split()
val = re.findall('[123]', tmp[3])
if len(val) > 0:
ordnung = int(val[0])
elif tmp[3].lower() in ['am', 'du']:
ordnung = 1
elif tmp[3].lower() == 'ar':
ordnung = 'A'
else:
ordnung = None
s.bonds.add(Bond(tag=int(tmp[0]), a=int(tmp[1]), b=int(tmp[2]), order=ordnung))
s.objectify()
if type_with:
try:
s.apply_forcefield(type_with)
except Exception:
print('forcefield typing with forcefield {} unsuccessful'.format(type_with.name))
return s
def read_prepc(prec_file):
"""pysimm.system.read_prepc
Interprets prepc file and creates :class:`~pysimm.system.System` object
Args:
prepc_file: ac file name
Returns:
:class:`~pysimm.system.System` object
"""
if os.path.isfile(prec_file):
debug_print('reading file')
f = open(prec_file)
elif isinstance(prec_file, str):
debug_print('reading string')
f = StringIO(prec_file)
else:
raise PysimmError('pysimm.system.read_prepc requires either '
'file or string as argument')
s = System(name='read using pysimm.system.read_prepc')
for line in f:
for _ in range(10):
line = next(f)
while line.split():
tag = int(line.split()[0])
name = line.split()[1]
type_name = line.split()[2]
x = float(line.split()[4])
y = float(line.split()[5])
z = float(line.split()[6])
charge = float(line.split()[7])
elem = type_name[0]
p = Particle(tag=tag, name=name, type_name=type_name, x=x, y=y, z=z, elem=elem, charge=charge)
if not s.particles[tag]:
s.particles.add(p)
line = next(f)
break
f.close()
return s
def read_ac(ac_file):
"""pysimm.system.read_ac
Interprets ac file and creates :class:`~pysimm.system.System` object
Args:
ac_file: ac file name
Returns:
:class:`~pysimm.system.System` object
"""
if os.path.isfile(ac_file):
debug_print('reading file')
f = open(ac_file)
elif isinstance(ac_file, str):
debug_print('reading string')
f = StringIO(ac_file)
else:
raise PysimmError('pysimm.system.read_ac requires either '
'file or string as argument')
s = System(name='read using pysimm.system.read_ac')
for line in f:
if line.startswith('ATOM'):
tag = int(line.split()[1])
name = line.split()[2]
resname = line.split()[3]
resid = line.split()[4]
x = float(line.split()[5])
y = float(line.split()[6])
z = float(line.split()[7])
charge = float(line.split()[8])
type_name = line.split()[9]
elem = type_name[0]
p = Particle(tag=tag, name=name, type_name=type_name, resname=resname, resid=resid, x=x, y=y, z=z, elem=elem, charge=charge)
if not s.particles[tag]:
s.particles.add(p)
if line.startswith('BOND'):
tag = int(line.split()[1])
a = s.particles[int(line.split()[2])]
b = s.particles[int(line.split()[3])]
b = Bond(tag=tag, a=a, b=b)
if not s.bonds[tag]:
s.bonds.add(b)
f.close()
return s
def read_pdb(pdb_file, str_file=None, **kwargs):
"""pysimm.system.read_pdb
Interprets pdb file and creates :class:`~pysimm.system.System` object
Args:
pdb_file: pdb file name
Keyword Args:
str_file: (str) optional CHARMM topology (stream) file which can be used as source of charges and description
of bonded topology
use_ptypes: (bool) flag to either use the forcefield atom type names from the .str file or not
Returns:
:class:`~pysimm.system.System` object
"""
if os.path.isfile(pdb_file):
debug_print('reading file')
f = open(pdb_file)
elif isinstance(pdb_file, str):
debug_print('reading string')
f = StringIO(pdb_file)
else:
raise PysimmError('pysimm.system.read_pdb requires either '
'file or string as argument')
s = System(name='read using pysimm.system.read_pdb')
read_types = kwargs.get('use_ptypes', False)
for line in f:
if line.startswith('ATOM') or line.startswith('HETATM'):
tag = int(line[6:11].strip())
name = line[12:16].strip()
resname = line[17:20].strip()
chainid = line[21]
resid = line[22:26].strip()
x = float(line[30:38].strip())
y = float(line[38:46].strip())
z = float(line[46:54].strip())
elem = line[76:78].strip()
if len(elem) < 1:
if len(name) > 0:
elem = re.split('\d+?', name)[0]
p = Particle(tag=tag, name=name, resname=resname, chainid=chainid,
resid=resid, x=x, y=y, z=z, elem=elem, molecule=1)
if not s.particles[tag]:
s.particles.add(p)
f.seek(0)
bnd_id = 1
duplets = set()
for line in f:
if line.startswith('CONECT'):
curr_tag = int(line[6:11].strip())
other_tags = list(map(int, line[11:].split()))
for ot in other_tags:
rec = tuple(sorted([curr_tag, ot]))
if rec not in duplets:
s.bonds.add(Bond(tag=bnd_id, a=curr_tag, b=ot))
bnd_id += 1
duplets.add(rec)
f.close()
if str_file:
if os.path.isfile(str_file):
debug_print('read_pdb: reading file \'{}\''.format(str_file))
f = open(str_file)
stream = f.read()
for p in s.particles:
partcl_line = re.findall('(?<=ATOM {}).*'.format(p.name), stream)
if len(partcl_line) > 0:
tmp = partcl_line[0].split()
p.charge = float(tmp[1])
if read_types:
p.type_name = tmp[0]
pt_names = {p.name: p.tag for p in s.particles}
bond_records = re.findall('(?<=BOND ).*', stream)
if bnd_id == 1:
for bnd_id,bndr in enumerate(bond_records):
tmp = bndr.split()
s.bonds.add(Bond(tag=bnd_id, a=pt_names[tmp[0]], b=pt_names[tmp[1]]))
f.close()
else:
debug_print('read_pdb: got parameters file argument, but file does not exist')
s.objectify()
s.set_box(padding=0.5)
return s
def compare(s1, s2):
print('Particle Types:\n')
for pt in s1.particle_types:
s2_pt = s2.particle_types.get(pt.name)
if s2_pt and len(s2_pt) == 1:
s2_pt = s2_pt[0]
print('%s\n%s\n' % (vars(pt), vars(s2_pt)))
print('\n\nBond Types:\n')
for bt in s1.bond_types:
s2_bt = s2.bond_types.get(bt.name)
if s2_bt and len(s2_bt) == 1:
s2_bt = s2_bt[0]
print('%s\n%s\n' % (vars(bt), vars(s2_bt)))
print('\n\nAngle Types:\n')
for at in s1.angle_types:
s2_at = s2.angle_types.get(at.name)
if s2_at and len(s2_at) == 1:
s2_at = s2_at[0]
print('%s\n%s\n' % (vars(at), vars(s2_at)))
print('\n\nDihedral Types:\n')
for dt in s1.dihedral_types:
s2_dt = s2.dihedral_types.get(dt.name)
if s2_dt and len(s2_dt) == 1:
s2_dt = s2_dt[0]
print('%s\n%s\n' % (vars(dt), vars(s2_dt)))
print('\n\nImproper Types:\n')
for it in s1.improper_types:
s2_it = s2.improper_types.get(it.name)
if s2_it and len(s2_it) == 1:
s2_it = s2_it[0]
print('%s\n%s\n' % (vars(it), vars(s2_it)))
def get_types(*arg, **kwargs):
"""pysimm.system.get_types
Get unique type names from list of systems
Args:
write (optional): if True, write types dictionary to filename
Returns:
(ptypes, btypes, atypes, dtypes, itypes)
*** for use with update_types ***
"""
write = kwargs.get('write')
ptypes = ItemContainer()
btypes = ItemContainer()
atypes = ItemContainer()
dtypes = ItemContainer()
itypes = ItemContainer()
for s in arg:
for t in s.particle_types:
if t.name and t.name not in [x.name for x in ptypes]:
ptypes.add(t.copy())
for t in s.bond_types:
if t.name and t.name not in [x.name for x in btypes]:
btypes.add(t.copy())
for t in s.angle_types:
if t.name and t.name not in [x.name for x in atypes]:
atypes.add(t.copy())
for t in s.dihedral_types:
if t.name and t.name not in [x.name for x in dtypes]:
dtypes.add(t.copy())
for t in s.improper_types:
if t.name and t.name not in [x.name for x in itypes]:
itypes.add(t.copy())
if write:
t_file = open('types.txt', 'w+')
if ptypes.count > 0:
t_file.write('atom types\n')
for t in ptypes:
t_file.write('%s %s\n' % (t.tag, t.name))
if btypes.count > 0:
t_file.write('\nbond types\n')
for t in btypes:
t_file.write('%s %s\n' % (t.tag, t.name))
if atypes.count > 0:
t_file.write('\nangle types\n')
for t in atypes:
t_file.write('%s %s\n' % (t.tag, t.name))
if dtypes.count > 0:
t_file.write('\ndihedral types\n')
for t in dtypes:
t_file.write('%s %s\n' % (t.tag, t.name))
if itypes.count > 0:
t_file.write('\nimproper types\n')
for t in itypes:
t_file.write('%s %s\n' % (t.tag, t.name))
t_file.close()
return ptypes, btypes, atypes, dtypes, itypes
def distance_to_origin(p):
"""pysimm.system.distance_to_origin
Calculates distance of particle to origin.
Args:
p: Particle object with x, y, and z attributes
Returns:
Distance of particle to origin
"""
return sqrt(pow(p.x, 2) + pow(p.y, 2) + pow(p.z, 2))
def replicate(ref, nrep, s_=None, density=0.3, rand=True, print_insertions=True):
"""pysimm.system.replicate
Replicates list of :class:`~pysimm.system.System` objects into new (or exisintg) :class:`~pysimm.system.System`.
Can be random insertion.
Args:
ref: reference :class:`~pysimm.system.System`(s) (this can be a list)
nrep: number of insertions to perform (can be list but must match length of ref)
s_: :class:`~pysimm.system.System` into which insertions will be performed default=None
density: density of new :class:`~pysimm.system.System` default=0.3 (set to None to not change box)
rand: if True, random insertion is performed
print_insertions: if True, update screen with number of insertions
"""
if not isinstance(ref, list):
ref = [ref]
if not isinstance(nrep, list):
nrep = [nrep]
assert len(ref) == len(nrep)
if s_ is None:
s_ = System()
s_.ff_class = ref[0].ff_class
s_.forcefield = ref[0].forcefield
s_.pair_style = ref[0].pair_style
s_.bond_style = ref[0].bond_style
s_.angle_style = ref[0].angle_style
s_.dihedral_style = ref[0].dihedral_style
s_.improper_style = ref[0].improper_style
for r in ref:
r.set_mass()
r.center('particles', [0, 0, 0], True)
r.r = 0
for p in r.particles:
r.r = max(r.r, distance_to_origin(p))
s_.molecule_types.add(r)
mass = 0
for i, r in enumerate(ref):
mass += r.mass * nrep[i]
mass /= 6.02e23
if density:
volume = float(mass) / density
boxl = pow(volume, 1 / 3.) * 1e8
s_.dim.xlo = -1. * boxl / 2.
s_.dim.xhi = boxl / 2.
s_.dim.ylo = -1. * boxl / 2.
s_.dim.yhi = boxl / 2.
s_.dim.zlo = -1. * boxl / 2.
s_.dim.zhi = boxl / 2.
num = 0
for j, r in enumerate(ref):
for n in range(nrep[j]):
if rand:
rotate_x = random() * 2 * pi
rotate_y = random() * 2 * pi
rotate_z = random() * 2 * pi
dx = s_.dim.xhi - s_.dim.xlo
dx = (-dx / 2. + r.r) + random() * (dx - 2 * r.r)
dy = s_.dim.yhi - s_.dim.ylo
dy = (-dy / 2. + r.r) + random() * (dy - 2 * r.r)
dz = s_.dim.zhi - s_.dim.zlo
dz = (-dz / 2. + r.r) + random() * (dz - 2 * r.r)
r_ = r.copy(rotate_x=rotate_x, rotate_y=rotate_y,
rotate_z=rotate_z, dx=dx, dy=dy, dz=dz)
else:
r_ = r.copy()
s_.add(r_, change_dim=False, update_properties=False)
num += 1
if print_insertions:
verbose_print('Molecule %s inserted' % num)
s_.set_density()
s_.set_cog()
s_.set_velocity()
return s_
|
polysimtools/pysimm
|
pysimm/system.py
|
Python
|
mit
| 190,918
|
[
"CHARMM",
"LAMMPS",
"VMD"
] |
839611d0cf6b4b86b048d4fa56b751c515480aa2f518cb745c7d2c5f1d53a302
|
"""
Galaxy root package.
"""
|
jmchilton/galaxy-central
|
galaxy/__init__.py
|
Python
|
mit
| 28
|
[
"Galaxy"
] |
6fa36f7ebf34ce886c90e3793f47deeba2d7e4543f5c0a66e782bae39286435d
|
#! /usr/bin/env python
import warnings
import numpy as np
try:
from tvtk.api import tvtk
except ImportError:
warnings.warn("vtk is not installed")
class VtkGridMixIn(object):
pass
else:
class VtkGridMixIn(object):
_EDGE_COUNT_TO_TYPE = {
1: tvtk.Vertex().cell_type,
2: tvtk.Line().cell_type,
3: tvtk.Triangle().cell_type,
4: tvtk.Quad().cell_type,
}
def to_vtk(self):
points = self.vtk_points()
cell_types = self.vtk_cell_types()
cell_array = self.vtk_cell_array()
offsets = self.vtk_offsets()
vtk_grid = tvtk.UnstructuredGrid(points=points)
vtk_grid.set_cells(cell_types, offsets, cell_array)
return vtk_grid
def vtk_points(self):
pad = np.zeros((3 - self._coords.shape[0], self._coords.shape[1]))
return np.vstack([self._coords, pad]).T
def vtk_cell_array(self):
cell_array = tvtk.CellArray()
cell_array.set_cells(self.get_cell_count(), self.vtk_connectivity())
return cell_array
def vtk_cell_types(self):
cell_types = np.empty(self.get_cell_count(), dtype=int)
for (id_, n_nodes) in enumerate(self.nodes_per_cell()):
try:
cell_types[id_] = self._EDGE_COUNT_TO_TYPE[n_nodes]
except KeyError:
cell_types[id_] = tvtk.Polygon().cell_type
return cell_types
def vtk_connectivity(self):
cells = np.empty(self.get_vertex_count() + self.get_cell_count(), dtype=int)
cell_nodes = self.get_connectivity()
offset = 0
for n_nodes in self.nodes_per_cell():
cells[offset] = n_nodes
offset += n_nodes + 1
offset = 1
for cell in self.vtk_offsets():
n_nodes = cells[offset - 1]
cells[offset : offset + n_nodes] = cell_nodes[cell : cell + n_nodes]
offset += n_nodes + 1
return cells
def vtk_offsets(self):
offsets = np.empty(self.get_cell_count(), dtype=int)
(offsets[0], offsets[1:]) = (0, self._offset[:-1])
return offsets
def vtk_write(self, file_name):
writer = tvtk.XMLUnstructuredGridWriter()
writer.set_input(self.to_vtk())
writer.file_name = file_name
writer.write()
|
csdms/coupling
|
deprecated/grids/vtk_mixin.py
|
Python
|
mit
| 2,523
|
[
"VTK"
] |
57f54f3a6d6d33ff8eed5caf2c04788cd979024f91b7baae2687d66d27a0550d
|
########################################################################
# $Id$
########################################################################
"""
Matcher class. It matches Agent Site capabilities to job requirements.
It also provides an XMLRPC interface to the Matcher
"""
__RCSID__ = "$Id$"
import time
from types import StringType, DictType, StringTypes
import threading
from DIRAC.ConfigurationSystem.Client.Helpers import Registry, Operations
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Core.Utilities.ClassAd.ClassAdLight import ClassAd
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB
from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import TaskQueueDB
from DIRAC.WorkloadManagementSystem.DB.PilotAgentsDB import PilotAgentsDB
from DIRAC import gMonitor
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.Core.Security import Properties
from DIRAC.Core.Utilities.DictCache import DictCache
from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus
DEBUG = 0
gMutex = threading.Semaphore()
gTaskQueues = {}
gJobDB = False
gJobLoggingDB = False
gTaskQueueDB = False
gPilotAgentsDB = False
def initializeMatcherHandler( serviceInfo ):
""" Matcher Service initialization
"""
global gJobDB
global gJobLoggingDB
global gTaskQueueDB
global gPilotAgentsDB
# Create JobDB object and initialize its tables.
gJobDB = JobDB()
res = gJobDB._checkTable()
if not res[ 'OK' ]:
return res
# Create JobLoggingDB object and initialize its tables.
gJobLoggingDB = JobLoggingDB()
res = gJobLoggingDB._checkTable()
if not res[ 'OK' ]:
return res
gTaskQueueDB = TaskQueueDB()
# Create PilotAgentsDB object and initialize its tables.
gPilotAgentsDB = PilotAgentsDB()
res = gPilotAgentsDB._checkTable()
if not res[ 'OK' ]:
return res
gMonitor.registerActivity( 'matchTime', "Job matching time",
'Matching', "secs" , gMonitor.OP_MEAN, 300 )
gMonitor.registerActivity( 'matchesDone', "Job Match Request",
'Matching', "matches" , gMonitor.OP_RATE, 300 )
gMonitor.registerActivity( 'matchesOK', "Matched jobs",
'Matching', "matches" , gMonitor.OP_RATE, 300 )
gMonitor.registerActivity( 'numTQs', "Number of Task Queues",
'Matching', "tqsk queues" , gMonitor.OP_MEAN, 300 )
gTaskQueueDB.recalculateTQSharesForAll()
gThreadScheduler.addPeriodicTask( 120, gTaskQueueDB.recalculateTQSharesForAll )
gThreadScheduler.addPeriodicTask( 60, sendNumTaskQueues )
sendNumTaskQueues()
return S_OK()
def sendNumTaskQueues():
result = gTaskQueueDB.getNumTaskQueues()
if result[ 'OK' ]:
gMonitor.addMark( 'numTQs', result[ 'Value' ] )
else:
gLogger.error( "Cannot get the number of task queues", result[ 'Message' ] )
class Limiter:
__csDictCache = DictCache()
__condCache = DictCache()
__delayMem = {}
def __init__( self, opsHelper ):
""" Constructor
"""
self.__runningLimitSection = "JobScheduling/RunningLimit"
self.__matchingDelaySection = "JobScheduling/MatchingDelay"
self.__opsHelper = opsHelper
def checkJobLimit( self ):
return self.__opsHelper.getValue( "JobScheduling/CheckJobLimits", True )
def checkMatchingDelay( self ):
return self.__opsHelper.getValue( "JobScheduling/CheckMatchingDelay", True )
def getNegativeCond( self ):
""" Get negative condition for ALL sites
"""
orCond = Limiter.__condCache.get( "GLOBAL" )
if orCond:
return orCond
negCond = {}
#Run Limit
result = self.__opsHelper.getSections( self.__runningLimitSection )
sites = []
if result[ 'OK' ]:
sites = result[ 'Value' ]
for siteName in sites:
result = self.__getRunningCondition( siteName )
if not result[ 'OK' ]:
continue
data = result[ 'Value' ]
if data:
negCond[ siteName ] = data
#Delay limit
result = self.__opsHelper.getSections( self.__matchingDelaySection )
sites = []
if result[ 'OK' ]:
sites = result[ 'Value' ]
for siteName in sites:
result = self.__getDelayCondition( siteName )
if not result[ 'OK' ]:
continue
data = result[ 'Value' ]
if not data:
continue
if siteName in negCond:
negCond[ siteName ] = self.__mergeCond( negCond[ siteName ], data )
else:
negCond[ siteName ] = data
orCond = []
for siteName in negCond:
negCond[ siteName ][ 'Site' ] = siteName
orCond.append( negCond[ siteName ] )
Limiter.__condCache.add( "GLOBAL", 10, orCond )
return orCond
def getNegativeCondForSite( self, siteName ):
""" Generate a negative query based on the limits set on the site
"""
# Check if Limits are imposed onto the site
negativeCond = {}
if self.checkJobLimit():
result = self.__getRunningCondition( siteName )
if result['OK']:
negativeCond = result['Value']
gLogger.verbose( 'Negative conditions for site %s after checking limits are: %s' % ( siteName, str( negativeCond ) ) )
if self.checkMatchingDelay():
result = self.__getDelayCondition( siteName )
if result['OK']:
delayCond = result['Value']
gLogger.verbose( 'Negative conditions for site %s after delay checking are: %s' % ( siteName, str( delayCond ) ) )
negativeCond = self.__mergeCond( negativeCond, delayCond )
if negativeCond:
gLogger.info( 'Negative conditions for site %s are: %s' % ( siteName, str( negativeCond ) ) )
return negativeCond
def __mergeCond( self, negCond, addCond ):
""" Merge two negative dicts
"""
#Merge both negative dicts
for attr in addCond:
if attr not in negCond:
negCond[ attr ] = []
for value in addCond[ attr ]:
if value not in negCond[ attr ]:
negCond[ attr ].append( value )
return negCond
def __extractCSData( self, section ):
""" Extract limiting information from the CS in the form:
{ 'JobType' : { 'Merge' : 20, 'MCGen' : 1000 } }
"""
stuffDict = Limiter.__csDictCache.get( section )
if stuffDict:
return S_OK( stuffDict )
result = self.__opsHelper.getSections( section )
if not result['OK']:
return result
attribs = result['Value']
stuffDict = {}
for attName in attribs:
result = self.__opsHelper.getOptionsDict( "%s/%s" % ( section, attName ) )
if not result[ 'OK' ]:
return result
attLimits = result[ 'Value' ]
try:
attLimits = dict( [ ( k, int( attLimits[k] ) ) for k in attLimits ] )
except Exception, excp:
errMsg = "%s/%s has to contain numbers: %s" % ( section, attName, str( excp ) )
gLogger.error( errMsg )
return S_ERROR( errMsg )
stuffDict[ attName ] = attLimits
Limiter.__csDictCache.add( section, 300, stuffDict )
return S_OK( stuffDict )
def __getRunningCondition( self, siteName ):
""" Get extra conditions allowing site throttling
"""
siteSection = "%s/%s" % ( self.__runningLimitSection, siteName )
result = self.__extractCSData( siteSection )
if not result['OK']:
return result
limitsDict = result[ 'Value' ]
#limitsDict is something like { 'JobType' : { 'Merge' : 20, 'MCGen' : 1000 } }
if not limitsDict:
return S_OK( {} )
# Check if the site exceeding the given limits
negCond = {}
for attName in limitsDict:
if attName not in gJobDB.jobAttributeNames:
gLogger.error( "Attribute %s does not exist. Check the job limits" % attName )
continue
cK = "Running:%s:%s" % ( siteName, attName )
data = self.__condCache.get( cK )
if not data:
result = gJobDB.getCounters( 'Jobs', [ attName ], { 'Site' : siteName, 'Status' : [ 'Running', 'Matched', 'Stalled' ] } )
if not result[ 'OK' ]:
return result
data = result[ 'Value' ]
data = dict( [ ( k[0][ attName ], k[1] ) for k in data ] )
self.__condCache.add( cK, 10, data )
for attValue in limitsDict[ attName ]:
limit = limitsDict[ attName ][ attValue ]
running = data.get( attValue, 0 )
if running >= limit:
gLogger.verbose( 'Job Limit imposed at %s on %s/%s=%d,'
' %d jobs already deployed' % ( siteName, attName, attValue, limit, running ) )
if attName not in negCond:
negCond[ attName ] = []
negCond[ attName ].append( attValue )
#negCond is something like : {'JobType': ['Merge']}
return S_OK( negCond )
def updateDelayCounters( self, siteName, jid ):
#Get the info from the CS
siteSection = "%s/%s" % ( self.__matchingDelaySection, siteName )
result = self.__extractCSData( siteSection )
if not result['OK']:
return result
delayDict = result[ 'Value' ]
#limitsDict is something like { 'JobType' : { 'Merge' : 20, 'MCGen' : 1000 } }
if not delayDict:
return S_OK()
attNames = []
for attName in delayDict:
if attName not in gJobDB.jobAttributeNames:
gLogger.error( "Attribute %s does not exist in the JobDB. Please fix it!" % attName )
else:
attNames.append( attName )
result = gJobDB.getJobAttributes( jid, attNames )
if not result[ 'OK' ]:
gLogger.error( "While retrieving attributes coming from %s: %s" % ( siteSection, result[ 'Message' ] ) )
return result
atts = result[ 'Value' ]
#Create the DictCache if not there
if siteName not in Limiter.__delayMem:
Limiter.__delayMem[ siteName ] = DictCache()
#Update the counters
delayCounter = Limiter.__delayMem[ siteName ]
for attName in atts:
attValue = atts[ attName ]
if attValue in delayDict[ attName ]:
delayTime = delayDict[ attName ][ attValue ]
gLogger.notice( "Adding delay for %s/%s=%s of %s secs" % ( siteName, attName,
attValue, delayTime ) )
delayCounter.add( ( attName, attValue ), delayTime )
return S_OK()
def __getDelayCondition( self, siteName ):
""" Get extra conditions allowing matching delay
"""
if siteName not in Limiter.__delayMem:
return S_OK( {} )
lastRun = Limiter.__delayMem[ siteName ].getKeys()
negCond = {}
for attName, attValue in lastRun:
if attName not in negCond:
negCond[ attName ] = []
negCond[ attName ].append( attValue )
return S_OK( negCond )
#####
#
# End of Limiter
#
#####
class MatcherHandler( RequestHandler ):
__opsCache = {}
def initialize( self ):
self.__opsHelper = self.__getOpsHelper()
self.__limiter = Limiter( self.__opsHelper )
self.__siteStatus = SiteStatus()
def __getOpsHelper( self, setup = False, vo = False ):
if not setup:
setup = self.srv_getClientSetup()
if not vo:
vo = Registry.getVOForGroup( self.getRemoteCredentials()[ 'group' ] )
cKey = ( vo, setup )
if cKey not in MatcherHandler.__opsCache:
MatcherHandler.__opsCache[ cKey ] = Operations.Operations( vo = vo, setup = setup )
return MatcherHandler.__opsCache[ cKey ]
def __processResourceDescription( self, resourceDescription ):
# Check and form the resource description dictionary
resourceDict = {}
if type( resourceDescription ) in StringTypes:
classAdAgent = ClassAd( resourceDescription )
if not classAdAgent.isOK():
return S_ERROR( 'Illegal Resource JDL' )
gLogger.verbose( classAdAgent.asJDL() )
for name in gTaskQueueDB.getSingleValueTQDefFields():
if classAdAgent.lookupAttribute( name ):
if name == 'CPUTime':
resourceDict[name] = classAdAgent.getAttributeInt( name )
else:
resourceDict[name] = classAdAgent.getAttributeString( name )
for name in gTaskQueueDB.getMultiValueMatchFields():
if classAdAgent.lookupAttribute( name ):
if name == 'SubmitPool':
resourceDict[name] = classAdAgent.getListFromExpression( name )
else:
resourceDict[name] = classAdAgent.getAttributeString( name )
# Check if a JobID is requested
if classAdAgent.lookupAttribute( 'JobID' ):
resourceDict['JobID'] = classAdAgent.getAttributeInt( 'JobID' )
for k in ( 'DIRACVersion', 'ReleaseVersion', 'ReleaseProject', 'VirtualOrganization' ):
if classAdAgent.lookupAttribute( k ):
resourceDict[ k ] = classAdAgent.getAttributeString( k )
else:
for name in gTaskQueueDB.getSingleValueTQDefFields():
if resourceDescription.has_key( name ):
resourceDict[name] = resourceDescription[name]
for name in gTaskQueueDB.getMultiValueMatchFields():
if resourceDescription.has_key( name ):
resourceDict[name] = resourceDescription[name]
if resourceDescription.has_key( 'JobID' ):
resourceDict['JobID'] = resourceDescription['JobID']
for k in ( 'DIRACVersion', 'ReleaseVersion', 'ReleaseProject', 'VirtualOrganization',
'PilotReference', 'PilotInfoReportedFlag', 'PilotBenchmark', 'LHCbPlatform' ):
if k in resourceDescription:
resourceDict[ k ] = resourceDescription[ k ]
return resourceDict
def selectJob( self, resourceDescription ):
""" Main job selection function to find the highest priority job
matching the resource capacity
"""
startTime = time.time()
resourceDict = self.__processResourceDescription( resourceDescription )
credDict = self.getRemoteCredentials()
#Check credentials if not generic pilot
if Properties.GENERIC_PILOT in credDict[ 'properties' ]:
#You can only match groups in the same VO
vo = Registry.getVOForGroup( credDict[ 'group' ] )
result = Registry.getGroupsForVO( vo )
if result[ 'OK' ]:
resourceDict[ 'OwnerGroup' ] = result[ 'Value' ]
else:
#If it's a private pilot, the DN has to be the same
if Properties.PILOT in credDict[ 'properties' ]:
gLogger.notice( "Setting the resource DN to the credentials DN" )
resourceDict[ 'OwnerDN' ] = credDict[ 'DN' ]
#If it's a job sharing. The group has to be the same and just check that the DN (if any)
# belongs to the same group
elif Properties.JOB_SHARING in credDict[ 'properties' ]:
resourceDict[ 'OwnerGroup' ] = credDict[ 'group' ]
gLogger.notice( "Setting the resource group to the credentials group" )
if 'OwnerDN' in resourceDict and resourceDict[ 'OwnerDN' ] != credDict[ 'DN' ]:
ownerDN = resourceDict[ 'OwnerDN' ]
result = Registry.getGroupsForDN( resourceDict[ 'OwnerDN' ] )
if not result[ 'OK' ] or credDict[ 'group' ] not in result[ 'Value' ]:
#DN is not in the same group! bad boy.
gLogger.notice( "You cannot request jobs from DN %s. It does not belong to your group!" % ownerDN )
resourceDict[ 'OwnerDN' ] = credDict[ 'DN' ]
#Nothing special, group and DN have to be the same
else:
resourceDict[ 'OwnerDN' ] = credDict[ 'DN' ]
resourceDict[ 'OwnerGroup' ] = credDict[ 'group' ]
# Check the pilot DIRAC version
if self.__opsHelper.getValue( "Pilot/CheckVersion", True ):
if 'ReleaseVersion' not in resourceDict:
if not 'DIRACVersion' in resourceDict:
return S_ERROR( 'Version check requested and not provided by Pilot' )
else:
pilotVersion = resourceDict['DIRACVersion']
else:
pilotVersion = resourceDict['ReleaseVersion']
validVersions = self.__opsHelper.getValue( "Pilot/Version", [] )
if validVersions and pilotVersion not in validVersions:
return S_ERROR( 'Pilot version does not match the production version %s not in ( %s )' % \
( pilotVersion, ",".join( validVersions ) ) )
#Check project if requested
validProject = self.__opsHelper.getValue( "Pilot/Project", "" )
if validProject:
if 'ReleaseProject' not in resourceDict:
return S_ERROR( "Version check requested but expected project %s not received" % validProject )
if resourceDict[ 'ReleaseProject' ] != validProject:
return S_ERROR( "Version check requested but expected project %s != received %s" % ( validProject,
resourceDict[ 'ReleaseProject' ] ) )
# Update pilot information
pilotInfoReported = False
pilotReference = resourceDict.get( 'PilotReference', '' )
if pilotReference:
if "PilotInfoReportedFlag" in resourceDict and not resourceDict['PilotInfoReportedFlag']:
gridCE = resourceDict.get( 'GridCE', 'Unknown' )
site = resourceDict.get( 'Site', 'Unknown' )
benchmark = benchmark = resourceDict.get( 'PilotBenchmark', 0.0 )
gLogger.verbose('Reporting pilot info for %s: gridCE=%s, site=%s, benchmark=%f' % (pilotReference,gridCE,site,benchmark) )
result = gPilotAgentsDB.setPilotStatus( pilotReference, status = 'Running',
gridSite = site,
destination = gridCE,
benchmark = benchmark )
if result['OK']:
pilotInfoReported = True
#Check the site mask
if not 'Site' in resourceDict:
return S_ERROR( 'Missing Site Name in Resource JDL' )
# Get common site mask and check the agent site
result = self.__siteStatus.getUsableSites( 'ComputingAccess' )
if not result['OK']:
return S_ERROR( 'Internal error: can not get site mask' )
usableSites = result['Value']
siteName = resourceDict['Site']
if siteName not in maskList:
# if 'GridCE' not in resourceDict:
# return S_ERROR( 'Site not in mask and GridCE not specified' )
# Even if the site is banned, if it defines a CE, it must be able to check it
# del resourceDict['Site']
# Banned site can only take Test jobs
resourceDict['JobType'] = 'Test'
resourceDict['Setup'] = self.serviceInfoDict['clientSetup']
gLogger.verbose( "Resource description:" )
for key in resourceDict:
gLogger.verbose( "%s : %s" % ( key.rjust( 20 ), resourceDict[ key ] ) )
negativeCond = self.__limiter.getNegativeCondForSite( siteName )
result = gTaskQueueDB.matchAndGetJob( resourceDict, negativeCond = negativeCond )
if DEBUG:
print result
if not result['OK']:
return result
result = result['Value']
if not result['matchFound']:
return S_ERROR( 'No match found' )
jobID = result['jobId']
resAtt = gJobDB.getJobAttributes( jobID, ['OwnerDN', 'OwnerGroup', 'Status'] )
if not resAtt['OK']:
return S_ERROR( 'Could not retrieve job attributes' )
if not resAtt['Value']:
return S_ERROR( 'No attributes returned for job' )
if not resAtt['Value']['Status'] == 'Waiting':
gLogger.error( 'Job matched by the TQ is not in Waiting state', str( jobID ) )
result = gTaskQueueDB.deleteJob( jobID )
if not result[ 'OK' ]:
return result
return S_ERROR( "Job %s is not in Waiting state" % str( jobID ) )
attNames = ['Status','MinorStatus','ApplicationStatus','Site']
attValues = ['Matched','Assigned','Unknown',siteName]
result = gJobDB.setJobAttributes( jobID, attNames, attValues )
# result = gJobDB.setJobStatus( jobID, status = 'Matched', minor = 'Assigned' )
result = gJobLoggingDB.addLoggingRecord( jobID,
status = 'Matched',
minor = 'Assigned',
source = 'Matcher' )
result = gJobDB.getJobJDL( jobID )
if not result['OK']:
return S_ERROR( 'Failed to get the job JDL' )
resultDict = {}
resultDict['JDL'] = result['Value']
resultDict['JobID'] = jobID
matchTime = time.time() - startTime
gLogger.info( "Match time: [%s]" % str( matchTime ) )
gMonitor.addMark( "matchTime", matchTime )
# Get some extra stuff into the response returned
resOpt = gJobDB.getJobOptParameters( jobID )
if resOpt['OK']:
for key, value in resOpt['Value'].items():
resultDict[key] = value
resAtt = gJobDB.getJobAttributes( jobID, ['OwnerDN', 'OwnerGroup'] )
if not resAtt['OK']:
return S_ERROR( 'Could not retrieve job attributes' )
if not resAtt['Value']:
return S_ERROR( 'No attributes returned for job' )
if self.__opsHelper.getValue( "JobScheduling/CheckMatchingDelay", True ):
self.__limiter.updateDelayCounters( siteName, jobID )
# Report pilot-job association
if pilotReference:
result = gPilotAgentsDB.setCurrentJobID( pilotReference, jobID )
result = gPilotAgentsDB.setJobForPilot( jobID, pilotReference, updateStatus=False )
resultDict['DN'] = resAtt['Value']['OwnerDN']
resultDict['Group'] = resAtt['Value']['OwnerGroup']
resultDict['PilotInfoReportedFlag'] = pilotInfoReported
return S_OK( resultDict )
##############################################################################
types_requestJob = [ [StringType, DictType] ]
def export_requestJob( self, resourceDescription ):
""" Serve a job to the request of an agent which is the highest priority
one matching the agent's site capacity
"""
result = self.selectJob( resourceDescription )
gMonitor.addMark( "matchesDone" )
if result[ 'OK' ]:
gMonitor.addMark( "matchesOK" )
return result
##############################################################################
types_getActiveTaskQueues = []
def export_getActiveTaskQueues( self ):
""" Return all task queues
"""
return gTaskQueueDB.retrieveTaskQueues()
##############################################################################
types_getMatchingTaskQueues = [ DictType ]
def export_getMatchingTaskQueues( self, resourceDict ):
""" Return all task queues
"""
if 'Site' in resourceDict and type( resourceDict[ 'Site' ] ) in StringTypes:
negativeCond = self.__limiter.getNegativeCondForSite( resourceDict[ 'Site' ] )
else:
negativeCond = self.__limiter.getNegativeCond()
return gTaskQueueDB.retrieveTaskQueuesThatMatch( resourceDict, negativeCond = negativeCond )
##############################################################################
types_matchAndGetTaskQueue = [ DictType ]
def export_matchAndGetTaskQueue( self, resourceDict ):
""" Return matching task queues
"""
return gTaskQueueDB.matchAndGetTaskQueue( resourceDict )
|
avedaee/DIRAC
|
WorkloadManagementSystem/Service/MatcherHandler.py
|
Python
|
gpl-3.0
| 23,221
|
[
"DIRAC"
] |
d6157bbb785dd08a075bc70a5c08d1f4b35ace7b44df0d7d4b9b15e41ed0eb55
|
#!/usr/bin/env python
import sys, getopt
import glob, os
# MergeHash can maybe go on the hour queue
JobParams = {
'CreateHash': {
'outfile': """CreateHash_Job.q""",
'header': ["""#BSUB -J CreateHash""","""#BSUB -o PROJECT_HOME/Logs/CreateHash-Out.out""","""#BSUB -e PROJECT_HOME/Logs/CreateHash-Err.err""","""#BSUB -q week""","""#BSUB -W 23:58"""],
'body': ["""python LSA/create_hash.py -i PROJECT_HOME/original_reads/ -o PROJECT_HOME/hashed_reads/ -k 33 -s 31"""]},
'HashReads': {
'outfile': """HashReads_ArrayJob.q""",
'array': ["""original_reads/""","""*.fastq.*"""],
'header': ["""#BSUB -J HashReads[1-""","""#BSUB -o PROJECT_HOME/Logs/HashReads-Out-%I.out""","""#BSUB -e PROJECT_HOME/Logs/HashReads-Err-%I.err""","""#BSUB -q hour""","""#BSUB -W 3:56""","""#BSUB -M 8"""],
# add -z option to omit reverse complimenting
'body': ["""sleep $(($LSB_JOBINDEX % 60))""","""python LSA/hash_fastq_reads.py -r ${LSB_JOBINDEX} -i PROJECT_HOME/original_reads/ -o PROJECT_HOME/hashed_reads/"""]},
'MergeHash': {
'outfile': """MergeHash_ArrayJob.q""",
'array': ["""original_reads/""","""*.fastq""",5],
'header': ["""#BSUB -J MergeHash[1-""","""#BSUB -o PROJECT_HOME/Logs/MergeHash-Out-%I.out""","""#BSUB -e PROJECT_HOME/Logs/MergeHash-Err-%I.err""","""#BSUB -q week""","""#BSUB -W 53:58""","""#BSUB -R 'rusage[mem=4]'""","""#BSUB -M 8"""],
'body': ["""sleep $(($LSB_JOBINDEX % 60))""","""python LSA/merge_hashq_files.py -r ${LSB_JOBINDEX} -i PROJECT_HOME/hashed_reads/ -o PROJECT_HOME/hashed_reads/"""]},
'CombineFractions': {
'outfile': """CombineFractions_ArrayJob.q""",
'array': ["""original_reads/""","""*.fastq""",1],
'header': ["""#BSUB -J CombineFractions[1-""","""#BSUB -o PROJECT_HOME/Logs/CombineFractions-Out-%I.out""","""#BSUB -e PROJECT_HOME/Logs/CombineFractions-Err-%I.err""","""#BSUB -q week""","""#BSUB -W 23:58""","""#BSUB -R 'rusage[mem=8]'""","""#BSUB -M 20"""],
'body': ["""sleep $(($LSB_JOBINDEX % 60))""","""python LSA/merge_hashq_fractions.py -r ${LSB_JOBINDEX} -i PROJECT_HOME/hashed_reads/ -o PROJECT_HOME/hashed_reads/"""]},
'GlobalWeights': {
'outfile': """GlobalWeights_Job.q""",
'header': ["""#BSUB -J GlobalWeights""","""#BSUB -o PROJECT_HOME/Logs/GlobalWeights-Out.out""","""#BSUB -e PROJECT_HOME/Logs/GlobalWeights-Err.err""","""#BSUB -q week""","""#BSUB -W 71:10""","""#BSUB -R 'rusage[mem=25]'""","""#BSUB -M 75"""],
'body': ["""python LSA/tfidf_corpus.py -i PROJECT_HOME/hashed_reads/ -o PROJECT_HOME/cluster_vectors/"""]},
'KmerCorpus': {
'outfile': """KmerCorpus_ArrayJob.q""",
'array': ["""hashed_reads/""","""*.count.hash"""],
'header': ["""#BSUB -J KmerCorpus[1-""","""#BSUB -o PROJECT_HOME/Logs/KmerCorpus-Out-%I.out""","""#BSUB -e PROJECT_HOME/Logs/KmerCorpus-Err-%I.err""","""#BSUB -q hour""","""#BSUB -W 3:58""","""#BSUB -R 'rusage[mem=32]'""","""#BSUB -M 45"""],
'body': ["""sleep $(($LSB_JOBINDEX % 60))""","""python LSA/kmer_corpus.py -r ${LSB_JOBINDEX} -i PROJECT_HOME/hashed_reads/ -o PROJECT_HOME/cluster_vectors/"""]},
'KmerLSI': {
'outfile': """KmerLSI_Job.q""",
'header': ["""#BSUB -J KmerLSI""","""#BSUB -o PROJECT_HOME/Logs/KmerLSI-Out.out""","""#BSUB -e PROJECT_HOME/Logs/KmerLSI-Err.err""","""#BSUB -q week""","""#BSUB -n 6""","""#BSUB -R 'rusage[mem=4] span[hosts=1]'""","""#BSUB -M 10""","""python -m Pyro4.naming -n 0.0.0.0 > PROJECT_HOME/Logs/nameserver.log 2>&1 &""","""P1=$!""","""python -m gensim.models.lsi_worker > PROJECT_HOME/Logs/worker1.log 2>&1 &""","""P2=$!""","""python -m gensim.models.lsi_worker > PROJECT_HOME/Logs/worker2.log 2>&1 &""","""P3=$!""","""python -m gensim.models.lsi_worker > PROJECT_HOME/Logs/worker3.log 2>&1 &""","""P4=$!""","""python -m gensim.models.lsi_worker > PROJECT_HOME/Logs/worker4.log 2>&1 &""","""P5=$!""","""python -m gensim.models.lsi_worker > PROJECT_HOME/Logs/worker5.log 2>&1 &""","""P6=$!""","""python -m gensim.models.lsi_dispatcher > PROJECT_HOME/Logs/dispatcher.log 2>&1 &""","""P7=$!"""],
'body': ["""python LSA/kmer_lsi.py -i PROJECT_HOME/hashed_reads/ -o PROJECT_HOME/cluster_vectors/""","""kill $P1 $P2 $P3 $P4 $P5 $P6 $P7"""]},
'KmerClusterIndex': {
'outfile': """KmerClusterIndex_Job.q""",
'header': ["""#BSUB -J KmerClusterIndex""","""#BSUB -o PROJECT_HOME/Logs/KmerClusterIndex-Out.out""","""#BSUB -e PROJECT_HOME/Logs/KmerClusterIndex-Err.err""","""#BSUB -q week""","""#BSUB -R 'rusage[mem=1]'""","""#BSUB -M 35"""],
# adjust cluster thresh (-t) as necessary
'body': ["""python LSA/kmer_cluster_index.py -i PROJECT_HOME/hashed_reads/ -o PROJECT_HOME/cluster_vectors/ -t 0.7""","""python PBS_Scripts/create_jobs.py -j KmerClusterParts -i ./""","""X=`sed -n 1p hashed_reads/hashParts.txt`""","""sed -i 's/%parts%/$X/g' PBS_Scripts/KmerClusterParts_ArrayJob.q""","""python PBS_Scripts/create_jobs.py -j PBS_Scripts/KmerClusterMerge -i ./""","""X=`sed -n 1p cluster_vectors/numClusters.txt`""","""sed -i 's/%clusters%/$X/g' PBS_Scripts/KmerClusterMerge_ArrayJob.q"""]},
'KmerClusterParts': {
'outfile': """KmerClusterParts_ArrayJob.q""",
# number of tasks is 2**hash_size/10**6 + 1
#'array': ["""hashed_reads/""","""*.hashq.*"""],
'header': ["""#BSUB -J KmerClusterParts[1-%parts%]""","""#BSUB -o PROJECT_HOME/Logs/KmerClusterParts-Out-%I.out""","""#BSUB -e PROJECT_HOME/Logs/KmerClusterParts-Err-%I.err""","""#BSUB -q hour""","""#BSUB -W 3:59""","""#BSUB -R 'rusage[mem=1:argon_io=3]'""","""#BSUB -M 4"""],
###!!!
# adjust cluster thresh (-t) as necessary - probably same as Index step (maybe slightly higher)
###!!!
'body': ["""sleep $(($LSB_JOBINDEX % 60))""","""python LSA/kmer_cluster_part.py -r ${LSB_JOBINDEX} -i PROJECT_HOME/hashed_reads/ -o PROJECT_HOME/cluster_vectors/ -t 0.7"""]},
'KmerClusterMerge': {
'outfile': """KmerClusterMerge_ArrayJob.q""",
# number of tasks is number of clusters
#'array': ["""hashed_reads/""","""*.hashq.*"""],
'header': ["""#BSUB -J KmerClusterMerge[1-%clusters%]""","""#BSUB -o PROJECT_HOME/Logs/KmerClusterMerge-Out-%I.out""","""#BSUB -e PROJECT_HOME/Logs/KmerClusterMerge-Err-%I.err""","""#BSUB -q hour""","""#BSUB -W 3:59""","""#BSUB -R 'rusage[mem=1]'""","""#BSUB -M 8"""],
'body': ["""sleep $(($LSB_JOBINDEX % 60))""","""python LSA/kmer_cluster_merge.py -r ${LSB_JOBINDEX} -i PROJECT_HOME/cluster_vectors/ -o PROJECT_HOME/cluster_vectors/"""]},
'KmerClusterCols': {
'outfile': """KmerClusterCols_Job.q""",
'header': ["""#BSUB -J KmerClusterCols""","""#BSUB -o PROJECT_HOME/Logs/KmerClusterCols-Out.out""","""#BSUB -e PROJECT_HOME/Logs/KmerClusterCols-Err.err""","""#BSUB -q flower""","""#BSUB -W 71:58""","""#BSUB -R 'rusage[mem=40]'""","""#BSUB -M 70"""],
'body': ["""python LSA/kmer_cluster_cols.py -i PROJECT_HOME/hashed_reads/ -o PROJECT_HOME/cluster_vectors/"""]},
'ReadPartitions': {
'outfile': """ReadPartitions_ArrayJob.q""",
'array': ["""hashed_reads/""","""*.hashq.*"""],
# MAKE SURE TO SET TMP FILE LOCATION
'header': ["""#BSUB -J ReadPartitions[1-""","""#BSUB -o PROJECT_HOME/Logs/ReadPartitions-Out-%I.out""","""#BSUB -e PROJECT_HOME/Logs/ReadPartitions-Err-%I.err""","""#BSUB -q week""","""#BSUB -W 45:10""","""#BSUB -R 'rusage[mem=3:argon_io=3]'""","""#BSUB -M 20"""],
'body': ["""sleep $(($LSB_JOBINDEX % 60))""","""python LSA/write_partition_parts.py -r ${LSB_JOBINDEX} -i PROJECT_HOME/hashed_reads/ -o PROJECT_HOME/cluster_vectors/ -t TMPDIR"""]},
'MergeIntermediatePartitions': {
'outfile': """MergeIntermediatePartitions_ArrayJob.q""",
'array': ["""cluster_vectors/""","""*.cluster.npy"""],
'header': ["""#BSUB -J MergeIntermediatePartitions[1-""","""#BSUB -o PROJECT_HOME/Logs/MergeIntermediatePartitions-Out-%I.out""","""#BSUB -e PROJECT_HOME/Logs/MergeIntermediatePartitions-Err-%I.err""","""#BSUB -q hour""","""#BSUB -W 1:55""","""#BSUB -M 2""","""#BSUB -R 'rusage[argon_io=3]'"""],
'body': ["""sleep $(($LSB_JOBINDEX % 60))""","""python LSA/merge_partition_parts.py -r ${LSB_JOBINDEX} -i PROJECT_HOME/cluster_vectors/ -o PROJECT_HOME/read_partitions/"""]},
# Check to make sure there are no files remaining in cluster_vectors/PARTITION_NUM/
'SplitPairs': {
'outfile': """SplitPairs_ArrayJob.q""",
'array': ["""cluster_vectors/""","""*.cluster.npy"""],
'header': ["""#BSUB -J SplitPairs[1-""","""#BSUB -o PROJECT_HOME/Logs/SplitPairs-Out-%I.out""","""#BSUB -e PROJECT_HOME/Logs/SplitPairs-Err-%I.err""","""#BSUB -q hour""","""#BSUB -W 3:59""","""#BSUB -R 'rusage[argon_io=3]'""","""#BSUB -M 8"""],
'body': ["""sleep $(($LSB_JOBINDEX % 60))""","""python LSA/split_read_pairs.py -r ${LSB_JOBINDEX} -i PROJECT_HOME/read_partitions/ -o PROJECT_HOME/read_partitions/"""]},
'PhylerClassify': {
'outfile': """PhylerClassify_ArrayJob.q""",
'array': ["""cluster_vectors/""","""*.cluster.npy"""],
'header': ["""#BSUB -J PhylerClassify[1-""","""#BSUB -o PROJECT_HOME/Logs/PhylerClassify-Out-%I.out""","""#BSUB -e PROJECT_HOME/Logs/PhylerClassify-Err-%I.err""","""#BSUB -q hour""","""#BSUB -W 3:55""","""#BSUB -M 4""","""source /broad/software/scripts/useuse""","""reuse BLAST"""],
'body': ["""sleep $(($LSB_JOBINDEX % 60))""","""python misc/phyler_classify.py -r ${LSB_JOBINDEX} -i PROJECT_HOME/read_partitions/ -o PROJECT_HOME/phyler/"""]},
'PhylerIdentify': {
'outfile': """PhylerIdentify_ArrayJob.q""",
'array': ["""cluster_vectors/""","""*.cluster.npy"""],
'header': ["""#BSUB -J PhylerIdentify[1-""","""#BSUB -o PROJECT_HOME/Logs/PhylerIdentify-Out-%I.out""","""#BSUB -e PROJECT_HOME/Logs/PhylerIdentify-Err-%I.err""","""#BSUB -q hour""","""#BSUB -W 3:55""","""#BSUB -M 2"""],
'body': ["""sleep $(($LSB_JOBINDEX % 60))""","""python misc/phyler_identify.py -r ${LSB_JOBINDEX} -i PROJECT_HOME/read_partitions/ -o PROJECT_HOME/phyler/"""]},
'PhylerSummary': {
'outfile': """PhylerSummary_Job.q""",
'header': ["""#BSUB -J PhylerSummary""","""#BSUB -o PROJECT_HOME/Logs/PhylerSummary-Out.out""","""#BSUB -e PROJECT_HOME/Logs/PhylerSummary-Err.err""","""#BSUB -q hour""","""#BSUB -W 1:55""","""#BSUB -M 2"""],
'body': ["""python misc/phyler_summary.py -i PROJECT_HOME/phyler/"""]}
}
CommonElements = {
'header': ["""#!/bin/bash"""],
'body': ["""echo Date: `date`""","""t1=`date +%s`"""],
'footer': ["""[ $? -eq 0 ] || echo 'JOB FAILURE: $?'""","""echo Date: `date`""","""t2=`date +%s`""","""tdiff=`echo 'scale=3;('$t2'-'$t1')/3600' | bc`""","""echo 'Total time: '$tdiff' hours'"""]
}
help_message = 'usage example: python create_jobs.py -j HashReads -i /project/home/'
if __name__ == "__main__":
job = 'none specified'
try:
opts, args = getopt.getopt(sys.argv[1:],'hj:i:',["--jobname","inputdir="])
except:
print help_message
sys.exit(2)
for opt, arg in opts:
if opt in ('-h','--help'):
print help_message
sys.exit()
elif opt in ('-j',"--jobname"):
job = arg
elif opt in ('-i','--inputdir'):
inputdir = arg
if inputdir[-1] != '/':
inputdir += '/'
try:
params = JobParams[job]
except:
print job+' is not a known job.'
print 'known jobs:',JobParams.keys()
print help_message
sys.exit(2)
if params.get('array',None) != None:
FP = glob.glob(os.path.join(inputdir+params['array'][0],params['array'][1]))
if len(params['array']) == 3:
FP = [fp[fp.rfind('/')+1:] for fp in FP]
if params['array'][2] == -1:
suffix = params['array'][1].replace('*','').replace('.','')
FP = set([fp[:fp.index(suffix)] for fp in FP])
else:
FP = set([fp[:fp.index('.')] for fp in FP])
FP = [None]*len(FP)*abs(params['array'][2])
array_size = str(len(FP))
params['header'][0] += array_size+']'
print job+' array size will be '+array_size
f = open(inputdir+'PBS_Scripts/'+params['outfile'],'w')
f.write('\n'.join(CommonElements['header']) + '\n')
f.write('\n'.join(params['header']).replace('PROJECT_HOME/',inputdir) + '\n')
f.write('\n'.join(CommonElements['body']) + '\n')
f.write('\n'.join(params['body']).replace('PROJECT_HOME/',inputdir) + '\n')
f.write('\n'.join(CommonElements['footer']) +'\n')
f.close()
|
hurwitzlab/LSA-pipeline
|
PBS_Scripts/create_jobs.py
|
Python
|
mit
| 11,911
|
[
"BLAST"
] |
381f5cedf1e3b870774514f3f006374a40a0cbea49adea9da8f8fd9a485af6af
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
import glob
class SspaceStandard(Package):
"""SSPACE standard is a stand-alone program for scaffolding pre-assembled
contigs using NGS paired-read data
Note: A manual download is required for SSPACE-Standard.
Spack will search your current directory for the download file.
Alternatively, add this file to a mirror so that Spack can find it.
For instructions on how to set up a mirror, see
http://spack.readthedocs.io/en/latest/mirrors.html"""
homepage = "https://www.baseclear.com/genomics/bioinformatics/basetools/SSPACE"
url = "file://{0}/41SSPACE-STANDARD-3.0_linux-x86_64.tar.gz".format(os.getcwd())
manual_download = True
version('3.0', '7e171b4861b9d514e80aafc3d9cdf554')
depends_on('perl+threads', type=('build', 'run'))
depends_on('perl-perl4-corelibs', type=('build', 'run'))
def install(self, spec, prefix):
rootscript = 'SSPACE_Standard_v{0}.pl'.format(self.version)
scripts = [rootscript]
scripts.extend(glob.glob('tools/*.pl'))
scripts.extend(glob.glob('bwa/*.pl'))
for s in scripts:
filter_file('/usr/bin/perl', '/usr/bin/env perl',
s, string=True)
filter_file('require "getopts.pl";', 'use Getopt::Std;',
s, string=True)
filter_file('&Getopts(', 'getopts(', s, string=True)
install_tree('bin', prefix.bin)
install_tree('bowtie', prefix.bowtie)
install_tree('bwa', prefix.bwa)
install_tree('dotlib', prefix.dotlib)
install_tree('tools', prefix.tools)
install(rootscript, prefix)
def setup_run_environment(self, env):
env.set('SSPACE_HOME', self.prefix)
env.prepend_path('PATH', self.prefix)
|
iulian787/spack
|
var/spack/repos/builtin/packages/sspace-standard/package.py
|
Python
|
lgpl-2.1
| 2,015
|
[
"BWA",
"Bowtie"
] |
a407c2b2494dfc8a7b598f5e679b41d4c12ef3b014b9e2893760bc566e289d9d
|
#!/usr/bin/python
# file: mdoc.py
# author: Brian Fulkerson and Andrea Vedaldi
# description: MDoc main
# Copyright (C) 2007-12 Andrea Vedaldi and Brian Fulkerson.
# All rights reserved.
#
# This file is part of the VLFeat library and is made available under
# the terms of the BSD license (see the COPYING file).
import sys, os, re, shutil
import subprocess, signal
from wikidoc import wikidoc
from formatter import Formatter
from optparse import OptionParser
excludeRegexList = []
format = 'html'
verb = 0
sitexml = ""
usage = """usage: %prog [options] <basedir> <docdir>
Takes all .m files in basedir and its subdirectories and converts
them to html documentation, placing the results in docdir."""
parser = OptionParser(usage=usage)
parser.add_option(
"-f", "--format",
dest = "format",
default = "html",
action = "store",
help = "specify the output format (html, wiki, web)",
metavar = "STRING")
parser.add_option(
"-x", "--exclude",
dest = "excludeList",
action = "append",
type = "string",
help = "exclude files matching the specified regexp")
parser.add_option(
"-v", "--verbose",
dest = "verb",
default = False,
action = "store_true",
help = "print debug information")
parser.add_option(
"-t", "--helptoc",
dest = "helptoc",
default = False,
action = "store_true",
help = "create helptoc.xml")
parser.add_option(
"", "--helptoc-toolbox-name",
dest = "helptoc_toolbox_name",
default = "Example",
action = "store",
type = "string",
help = "helptoc.xml: Toolbox Name")
# --------------------------------------------------------------------
def runcmd(cmd):
# --------------------------------------------------------------------
"""
runcmd(CMD) runs the command CMD. The function waits for the
command to complete and correctly react to Ctrl-C by stopping the
process and raising an exception.
"""
try:
p = subprocess.Popen(cmd, shell=True)
sts = os.waitpid(p.pid, 0)
except (KeyboardInterrupt, SystemExit):
os.kill(p.pid, signal.SIGKILL)
raise
# --------------------------------------------------------------------
class MFile:
# --------------------------------------------------------------------
"""
MFile('sub/file.m') represents a MATLAB M-File.
"""
def __init__(self, basedir, dirname, name):
funcname = os.path.splitext(name)[0]
self.funcname = funcname #.upper()
self.path = os.path.join(basedir, dirname, name)
self.mdocname = funcname.replace(os.path.sep, '_')
self.webname = funcname.replace(os.path.sep, '.')
self.htmlname = self.mdocname + '.html'
self.wikiname = 'MDoc_' + (os.path.join(dirname, funcname)
.upper().replace(os.path.sep, '_'))
self.prev = None
self.next = None
self.node = None
def getId (self, format='html'):
if format == 'html':
return self.htmlname
elif format == 'web':
return self.webname
elif format == 'wiki':
return self.wikiname
def getRef (self, format='html'):
if format == 'html':
return self.htmlname
elif format == 'web':
return '%pathto:' + self.webname + ';'
elif format == 'wiki':
return self.wikiname
def __cmp__(self, other):
return cmp(self.webname, other.webname)
def __str__(self):
str = "MFile: %s\n" % (self.funcname)
str += " path : %s\n" % (self.path)
str += " mdocname: %s\n" % (self.mdocname)
str += " htmlname: %s\n" % (self.htmlname)
str += " wikiname: %s\n" % (self.wikiname)
return str
# --------------------------------------------------------------------
class Node:
# --------------------------------------------------------------------
"""
A Node N represents a node in the toolbox hierechy. A node is a
directory in the toolbox hierarchy and contains both M-files and
other sub-directories.
"""
def __init__(self, dirname):
self.dirname = dirname
self.children = []
self.mfiles = []
def addChildNode(self, node):
"Add a child node (toolbox subdirectory) to this node"
self.children.append(node)
def addMFile(self, mfile):
"Add a MATLAB M-File to this node"
self.mfiles.append(mfile)
mfile.node = self
def toIndexPage(self, format='html', depth=1):
"Converts the node hierarchy rooted here into an index."
page = ""
if format == 'html' or format == 'web':
if len(self.mfiles) > 0:
page += "<b>%s</b>" % (self.dirname.upper())
page += "<ul>\n"
for m in self.mfiles:
page += "<li>"
page += "<b><a href='%s'>%s</a></b>" % (m.getRef(format),
m.funcname)
page += " %s" % (m.brief)
page += "</li>"
page += "</ul>\n"
elif format == 'wiki':
if len(self.mfiles) > 0:
if depth > 1:
page += "=== %s ===\n" % (self.dirname.upper())
for m in self.mfiles:
page += "* [[%s|%s]]" % (m.getRef(format), m.funcname)
page += " %s\n" % (m.brief)
elif format == 'helptoc':
for m in self.mfiles:
page += "<tocitem target='%s'>%s</tocitem>\n" % (m.getRef('html'),
m.funcname)
else:
assert False
for n in self.children:
page += n.toIndexPage(format, depth+1)
return page
def toIndexXML(self):
xml = ""
for m in self.mfiles:
dirname = m.node.dirname.upper()
if len(dirname) > 0:
xml += \
"<page id='%s' name='%s' title='%s - %s' hide='yes'>" \
"<div class='mdoc'>" \
"<include src='%s'/></div></page>\n" % (m.getId('web'), m.funcname,
dirname,
m.funcname, m.htmlname)
else:
xml += \
"<page id='%s' name='%s' title='%s' hide='yes'>" \
"<div class='mdoc'>" \
"<include src='%s'/></div></page>\n" % (m.getId('web'), m.funcname,
m.funcname, m.htmlname)
for n in self.children:
xml += n.toIndexXML() ;
return xml
def __str__(self):
s = "Node: %s\n" % self.dirname
for m in self.mfiles:
s += m.__str__()
for n in self.children:
s += n.__str__()
return s
# --------------------------------------------------------------------
def depth_first(node):
# --------------------------------------------------------------------
"""
depth_first(NODE) is a generator that implements a depth first
visit of the node hierarchy rooted at NODE.
"""
yield node
for n in node.children:
for m in depth_first(n):
yield m
return
# --------------------------------------------------------------------
def extract(path):
# --------------------------------------------------------------------
"""
(BODY, FUNC, BRIEF) = extract(PATH) extracts the comment BODY, the
function name FUNC and the brief description BRIEF from the MATLAB
M-file located at PATH.
"""
body = []
func = ""
brief = ""
seenfunction = False
seenpercent = False
for l in open(path):
# Remove whitespace and newline
line = l.strip().lstrip()
if line.startswith('%'): seenpercent = True
if line.startswith('function'):
seenfunction = True
continue
if not line.startswith('%'):
if (seenfunction and seenpercent) or not seenfunction:
break
else:
continue
# remove leading `%' character
line = line[1:] #
body.append('%s\n' % line)
# Extract header from body
if len(body) > 0:
head = body[0]
body = body[1:]
match = re.match(r"^\s*(\w+)\s*(\S.*)\n$", head)
func = match.group(1)
brief = match.group(2)
return (body, func, brief)
# --------------------------------------------------------------------
def xscan(baseDir, subDir=''):
# --------------------------------------------------------------------
"""
NODE = xscan(BASEDIR) recusrively scans the directory BASEDIR and
construct the toolbox hierarchy rooted at NODE.
"""
node = Node(subDir)
dir = os.listdir(os.path.join(baseDir, subDir))
fileNames = [f for f in dir if os.path.isfile(
os.path.join(baseDir, subDir, f))]
subSubDirs = [s for s in dir if os.path.isdir (
os.path.join(baseDir, subDir, s))]
fileNames.sort()
# Scan M-FileNames
for fileName in fileNames:
# only m-files
if not os.path.splitext(fileName)[1] == '.m':
continue
# skip if in the exclude list
exclude = False
for rx in excludeRegexList:
fileRelPath = os.path.join(subDir, fileName)
mo = rx.match(fileRelPath)
if mo and (mo.end() - mo.start() == len(fileRelPath)):
if verb:
print "mdoc: excluding ''%s''." % fileRelPath
exclude = True
if exclude: continue
node.addMFile(MFile(baseDir, subDir, fileName))
# Scan sub-directories
for s in subSubDirs:
node.addChildNode(xscan(basedir, os.path.join(subDir, s)))
return node
# --------------------------------------------------------------------
def breadCrumb(m):
# --------------------------------------------------------------------
breadcrumb = "<ul class='breadcrumb'>"
if format == 'web':
breadcrumb += "<li><a href='%pathto:mdoc;'>Index</a></li>"
else:
breadcrumb += "<li><a href='index.html'>Index</a></li>"
if m.prev: breadcrumb += "<li><a href='%s'>Prev</a></li>" % m.prev.getRef(format)
if m.next: breadcrumb += "<li><a href='%s'>Next</a></li>" % m.next.getRef(format)
breadcrumb += "</ul>"
#breadcrumb += "<span class='path'>%s</span>" % m.node.dirname.upper()
return breadcrumb
# --------------------------------------------------------------------
if __name__ == '__main__':
# --------------------------------------------------------------------
#
# Parse comand line options
#
(options, args) = parser.parse_args()
if options.verb: verb = 1
format = options.format
helptoc = options.helptoc
print options.excludeList
for ex in options.excludeList:
rx = re.compile(ex)
excludeRegexList.append(rx)
if len(args) != 2:
parser.print_help()
sys.exit(2)
basedir = args[0]
docdir = args[1]
if not basedir.endswith('/'): basedir = basedir + "/"
if not basedir.endswith('/'): docdir = docdir + "/"
if verb:
print "mdoc: search path: %s" % basedir
print "mdoc: output path: %s" % docdir
print "mdoc: output format: %s" % format
#
# Search for mfiles
#
toolbox = xscan(basedir)
#
# Extract dictionaries of links and M-Files
#
linkdict = {}
mfiles = {}
prev = None
next = None
for n in depth_first(toolbox):
for m in n.mfiles:
if prev:
prev.next = m
m.prev = prev
prev = m
func = m.funcname.upper()
mfiles[func] = m
linkdict[func] = m.getRef(format)
if verb:
print "mdoc: num mfiles: %d" % (len(mfiles))
# Create output directory
if not os.access(docdir, os.F_OK):
os.makedirs(docdir)
# ----------------------------------------------------------------
# Extract comment block and run formatter
# ----------------------------------------------------------------
for (func, m) in mfiles.items():
if format == 'wiki':
outname = m.wikiname
elif format == 'html':
outname = m.htmlname
elif format == 'web':
outname = m.htmlname
if verb:
print "mdoc: generating %s from %s" % (outname, m.path)
# extract comment block from file
(lines, func, brief) = extract(m.path)
m.brief = brief
# Run formatter
content = ""
if len(lines) > 0:
if format == 'wiki' :
formatter = Formatter(lines, linkdict, 'wiki')
else:
formatter = Formatter(lines, linkdict, 'a')
content = formatter.toDOM().toxml("UTF-8")
content = content[content.find('?>')+2:]
# add decorations
if not format == 'wiki':
content = breadCrumb(m) + content
if format == 'web':
content = "<group>\n" + content + "</group>\n"
# save the result to an html file
if format == 'wiki':
f = open(os.path.join(docdir, m.wikiname), 'w')
else:
f = open(os.path.join(docdir, m.htmlname), 'w')
f.write(content)
f.close()
# ----------------------------------------------------------------
# Make index page
# ----------------------------------------------------------------
page = ""
if format == 'html':
pagename = 'index.html'
page += toolbox.toIndexPage('html')
elif format == 'web':
pagename = 'mdoc.html'
page += '<group>\n' + toolbox.toIndexPage('web') + '</group>\n'
elif format =='wiki' :
pagename = 'MDoc'
page = "== Documentation ==\n"
page += toolbox.toIndexPage('wiki')
f = open(os.path.join(docdir, pagename), 'w')
f.write(page)
f.close()
if format == 'web':
f = open(os.path.join(docdir, "mdoc.xml"), 'w')
f.write("<group>"+toolbox.toIndexXML()+"</group>\n")
f.close()
# ----------------------------------------------------------------
# Make helptoc.xml
# ----------------------------------------------------------------
if helptoc:
page = """<?xml version='1.0' encoding="utf-8"?>
<toc version="2.0">
<tocitem target="../index.html">%s
<tocitem target="%s" image="HelpIcon.FUNCTION">Functions
""" % (options.helptoc_toolbox_name, pagename)
page += toolbox.toIndexPage('helptoc')
page += """
</tocitem>
</tocitem>
</toc>
"""
f = open(os.path.join(docdir, "helptoc.xml"), 'w')
f.write(page)
f.close()
# ----------------------------------------------------------------
# Checkin files to wiki
# ----------------------------------------------------------------
def towiki(docdir, pagename):
pagenamewiki = pagename + '.wiki'
runcmd("cd %s ; mvs update %s" % (docdir, pagenamewiki))
if verb:
print "mdoc: converting", pagename, "to", pagenamewiki
wikidoc(os.path.join(docdir, pagenamewiki),
os.path.join(docdir, pagename))
runcmd("cd %s ; mvs commit -M -m 'Documentation update' %s" % (docdir, pagenamewiki))
if format == 'wiki' :
try:
towiki(docdir, pagename)
except (KeyboardInterrupt, SystemExit):
sys.exit(1)
for (func, m) in mfiles.items():
try:
towiki(docdir, m.wikiname)
except (KeyboardInterrupt, SystemExit):
sys.exit(1)
|
silencej/pollenTubeProc
|
segPollen/vlfeat/docsrc/mdoc.py
|
Python
|
gpl-3.0
| 16,193
|
[
"Brian",
"VisIt"
] |
1b9863fb80b6daa7555932c44988167947550b797f7d6d1e3c8136809554379c
|
"""Functions to plot M/EEG data on topo (one axes per channel)
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
import warnings
from itertools import cycle
from functools import partial
import numpy as np
from ..io.pick import channel_type, pick_types
from ..fixes import normalize_colors
from ..utils import _clean_names, deprecated
from ..defaults import _handle_default
from .utils import (_check_delayed_ssp, COLORS, _draw_proj_checkbox,
add_background_image)
def iter_topography(info, layout=None, on_pick=None, fig=None,
fig_facecolor='k', axis_facecolor='k',
axis_spinecolor='k', layout_scale=None):
""" Create iterator over channel positions
This function returns a generator that unpacks into
a series of matplotlib axis objects and data / channel
indices, both corresponding to the sensor positions
of the related layout passed or inferred from the channel info.
`iter_topography`, hence, allows to conveniently realize custom
topography plots.
Parameters
----------
info : instance of mne.io.meas_info.Info
The measurement info.
layout : instance of mne.layout.Layout | None
The layout to use. If None, layout will be guessed
on_pick : callable | None
The callback function to be invoked on clicking one
of the axes. Is supposed to instantiate the following
API: `function(axis, channel_index)`
fig : matplotlib.figure.Figure | None
The figure object to be considered. If None, a new
figure will be created.
fig_facecolor : str | obj
The figure face color. Defaults to black.
axis_facecolor : str | obj
The axis face color. Defaults to black.
axis_spinecolor : str | obj
The axis spine color. Defaults to black. In other words,
the color of the axis' edge lines.
layout_scale: float | None
Scaling factor for adjusting the relative size of the layout
on the canvas. If None, nothing will be scaled.
Returns
-------
A generator that can be unpacked into
ax : matplotlib.axis.Axis
The current axis of the topo plot.
ch_dx : int
The related channel index.
"""
import matplotlib.pyplot as plt
if fig is None:
fig = plt.figure()
fig.set_facecolor(fig_facecolor)
if layout is None:
from ..channels import find_layout
layout = find_layout(info)
if on_pick is not None:
callback = partial(_plot_topo_onpick, show_func=on_pick)
fig.canvas.mpl_connect('button_press_event', callback)
pos = layout.pos.copy()
if layout_scale:
pos[:, :2] *= layout_scale
ch_names = _clean_names(info['ch_names'])
iter_ch = [(x, y) for x, y in enumerate(layout.names) if y in ch_names]
for idx, name in iter_ch:
ax = plt.axes(pos[idx])
ax.patch.set_facecolor(axis_facecolor)
plt.setp(list(ax.spines.values()), color=axis_spinecolor)
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.setp(ax.get_xticklines(), visible=False)
plt.setp(ax.get_yticklines(), visible=False)
ch_idx = ch_names.index(name)
vars(ax)['_mne_ch_name'] = name
vars(ax)['_mne_ch_idx'] = ch_idx
vars(ax)['_mne_ax_face_color'] = axis_facecolor
yield ax, ch_idx
def _plot_topo(info=None, times=None, show_func=None, layout=None,
decim=None, vmin=None, vmax=None, ylim=None, colorbar=None,
border='none', axis_facecolor='k', fig_facecolor='k',
cmap='RdBu_r', layout_scale=None, title=None, x_label=None,
y_label=None, vline=None, font_color='w'):
"""Helper function to plot on sensor layout"""
import matplotlib.pyplot as plt
# prepare callbacks
tmin, tmax = times[[0, -1]]
on_pick = partial(show_func, tmin=tmin, tmax=tmax, vmin=vmin,
vmax=vmax, ylim=ylim, x_label=x_label,
y_label=y_label, colorbar=colorbar)
fig = plt.figure()
if colorbar:
norm = normalize_colors(vmin=vmin, vmax=vmax)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array(np.linspace(vmin, vmax))
ax = plt.axes([0.015, 0.025, 1.05, .8], axisbg=fig_facecolor)
cb = fig.colorbar(sm, ax=ax)
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
plt.setp(cb_yticks, color=font_color)
ax.axis('off')
my_topo_plot = iter_topography(info, layout=layout, on_pick=on_pick,
fig=fig, layout_scale=layout_scale,
axis_spinecolor=border,
axis_facecolor=axis_facecolor,
fig_facecolor=fig_facecolor)
for ax, ch_idx in my_topo_plot:
if layout.kind == 'Vectorview-all' and ylim is not None:
this_type = {'mag': 0, 'grad': 1}[channel_type(info, ch_idx)]
ylim_ = [v[this_type] if _check_vlim(v) else v for v in ylim]
else:
ylim_ = ylim
show_func(ax, ch_idx, tmin=tmin, tmax=tmax, vmin=vmin,
vmax=vmax, ylim=ylim_)
if ylim_ and not any(v is None for v in ylim_):
plt.ylim(*ylim_)
if title is not None:
plt.figtext(0.03, 0.9, title, color=font_color, fontsize=19)
return fig
def _plot_topo_onpick(event, show_func=None, colorbar=False):
"""Onpick callback that shows a single channel in a new figure"""
# make sure that the swipe gesture in OS-X doesn't open many figures
orig_ax = event.inaxes
if event.inaxes is None:
return
import matplotlib.pyplot as plt
try:
ch_idx = orig_ax._mne_ch_idx
face_color = orig_ax._mne_ax_face_color
fig, ax = plt.subplots(1)
plt.title(orig_ax._mne_ch_name)
ax.set_axis_bgcolor(face_color)
# allow custom function to override parameters
show_func(plt, ch_idx)
except Exception as err:
# matplotlib silently ignores exceptions in event handlers,
# so we print
# it here to know what went wrong
print(err)
raise err
def _imshow_tfr(ax, ch_idx, tmin, tmax, vmin, vmax, onselect, ylim=None,
tfr=None, freq=None, vline=None, x_label=None, y_label=None,
colorbar=False, picker=True, cmap='RdBu_r', title=None):
""" Aux function to show time-freq map on topo """
import matplotlib.pyplot as plt
from matplotlib.widgets import RectangleSelector
extent = (tmin, tmax, freq[0], freq[-1])
img = ax.imshow(tfr[ch_idx], extent=extent, aspect="auto", origin="lower",
vmin=vmin, vmax=vmax, picker=picker, cmap=cmap)
if isinstance(ax, plt.Axes):
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
else:
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
if colorbar:
plt.colorbar(mappable=img)
if title:
plt.title(title)
if not isinstance(ax, plt.Axes):
ax = plt.gca()
ax.RS = RectangleSelector(ax, onselect=onselect) # reference must be kept
def _plot_timeseries(ax, ch_idx, tmin, tmax, vmin, vmax, ylim, data, color,
times, vline=None, x_label=None, y_label=None,
colorbar=False):
""" Aux function to show time series on topo """
import matplotlib.pyplot as plt
picker_flag = False
for data_, color_ in zip(data, color):
if not picker_flag:
# use large tol for picker so we can click anywhere in the axes
ax.plot(times, data_[ch_idx], color_, picker=1e9)
picker_flag = True
else:
ax.plot(times, data_[ch_idx], color_)
if vline:
for x in vline:
plt.axvline(x, color='w', linewidth=0.5)
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
if colorbar:
plt.colorbar()
def _check_vlim(vlim):
"""AUX function"""
return not np.isscalar(vlim) and vlim is not None
@deprecated("It will be removed in version 0.11. "
"Please use evoked.plot_topo or viz.evoked.plot_evoked_topo "
"for list of evoked instead.")
def plot_topo(evoked, layout=None, layout_scale=0.945, color=None,
border='none', ylim=None, scalings=None, title=None, proj=False,
vline=[0.0], fig_facecolor='k', fig_background=None,
axis_facecolor='k', font_color='w', show=True):
"""Plot 2D topography of evoked responses.
Clicking on the plot of an individual sensor opens a new figure showing
the evoked response for the selected sensor.
Parameters
----------
evoked : list of Evoked | Evoked
The evoked response to plot.
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
layout_scale: float
Scaling factor for adjusting the relative size of the layout
on the canvas
color : list of color objects | color object | None
Everything matplotlib accepts to specify colors. If not list-like,
the color specified will be repeated. If None, colors are
automatically drawn.
border : str
matplotlib borders style to be used for each sensor plot.
ylim : dict | None
ylim for plots. The value determines the upper and lower subplot
limits. e.g. ylim = dict(eeg=[-200e-6, 200e6]). Valid keys are eeg,
mag, grad, misc. If None, the ylim parameter for each channel is
determined by the maximum absolute peak.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
title : str
Title of the figure.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
vline : list of floats | None
The values at which to show a vertical line.
fig_facecolor : str | obj
The figure face color. Defaults to black.
fig_background : None | numpy ndarray
A background image for the figure. This must work with a call to
plt.imshow. Defaults to None.
axis_facecolor : str | obj
The face color to be used for each sensor plot. Defaults to black.
font_color : str | obj
The color of text in the colorbar and title. Defaults to white.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations
"""
return _plot_evoked_topo(evoked=evoked, layout=layout,
layout_scale=layout_scale, color=color,
border=border, ylim=ylim, scalings=scalings,
title=title, proj=proj, vline=vline,
fig_facecolor=fig_facecolor,
fig_background=fig_background,
axis_facecolor=axis_facecolor,
font_color=font_color, show=show)
def _plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None,
border='none', ylim=None, scalings=None, title=None,
proj=False, vline=[0.0], fig_facecolor='k',
fig_background=None, axis_facecolor='k', font_color='w',
show=True):
"""Plot 2D topography of evoked responses.
Clicking on the plot of an individual sensor opens a new figure showing
the evoked response for the selected sensor.
Parameters
----------
evoked : list of Evoked | Evoked
The evoked response to plot.
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
layout_scale: float
Scaling factor for adjusting the relative size of the layout
on the canvas
color : list of color objects | color object | None
Everything matplotlib accepts to specify colors. If not list-like,
the color specified will be repeated. If None, colors are
automatically drawn.
border : str
matplotlib borders style to be used for each sensor plot.
ylim : dict | None
ylim for plots. The value determines the upper and lower subplot
limits. e.g. ylim = dict(eeg=[-200e-6, 200e6]). Valid keys are eeg,
mag, grad, misc. If None, the ylim parameter for each channel is
determined by the maximum absolute peak.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
title : str
Title of the figure.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
vline : list of floats | None
The values at which to show a vertical line.
fig_facecolor : str | obj
The figure face color. Defaults to black.
fig_background : None | numpy ndarray
A background image for the figure. This must work with a call to
plt.imshow. Defaults to None.
axis_facecolor : str | obj
The face color to be used for each sensor plot. Defaults to black.
font_color : str | obj
The color of text in the colorbar and title. Defaults to white.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations
"""
import matplotlib.pyplot as plt
if not type(evoked) in (tuple, list):
evoked = [evoked]
if type(color) in (tuple, list):
if len(color) != len(evoked):
raise ValueError('Lists of evoked objects and colors'
' must have the same length')
elif color is None:
colors = ['w'] + COLORS
stop = (slice(len(evoked)) if len(evoked) < len(colors)
else slice(len(colors)))
color = cycle(colors[stop])
if len(evoked) > len(colors):
warnings.warn('More evoked objects than colors available.'
'You should pass a list of unique colors.')
else:
color = cycle([color])
times = evoked[0].times
if not all((e.times == times).all() for e in evoked):
raise ValueError('All evoked.times must be the same')
info = evoked[0].info
ch_names = evoked[0].ch_names
if not all(e.ch_names == ch_names for e in evoked):
raise ValueError('All evoked.picks must be the same')
ch_names = _clean_names(ch_names)
if layout is None:
from ..channels.layout import find_layout
layout = find_layout(info)
# XXX. at the moment we are committed to 1- / 2-sensor-types layouts
chs_in_layout = set(layout.names) & set(ch_names)
types_used = set(channel_type(info, ch_names.index(ch))
for ch in chs_in_layout)
# remove possible reference meg channels
types_used = set.difference(types_used, set('ref_meg'))
# one check for all vendors
meg_types = set(('mag', 'grad'))
is_meg = len(set.intersection(types_used, meg_types)) > 0
if is_meg:
types_used = list(types_used)[::-1] # -> restore kwarg order
picks = [pick_types(info, meg=kk, ref_meg=False, exclude=[])
for kk in types_used]
else:
types_used_kwargs = dict((t, True) for t in types_used)
picks = [pick_types(info, meg=False, exclude=[], **types_used_kwargs)]
assert isinstance(picks, list) and len(types_used) == len(picks)
scalings = _handle_default('scalings', scalings)
evoked = [e.copy() for e in evoked]
for e in evoked:
for pick, t in zip(picks, types_used):
e.data[pick] = e.data[pick] * scalings[t]
if proj is True and all(e.proj is not True for e in evoked):
evoked = [e.apply_proj() for e in evoked]
elif proj == 'interactive': # let it fail early.
for e in evoked:
_check_delayed_ssp(e)
if ylim is None:
def set_ylim(x):
return np.abs(x).max()
ylim_ = [set_ylim([e.data[t] for e in evoked]) for t in picks]
ymax = np.array(ylim_)
ylim_ = (-ymax, ymax)
elif isinstance(ylim, dict):
ylim_ = _handle_default('ylim', ylim)
ylim_ = [ylim_[kk] for kk in types_used]
# extra unpack to avoid bug #1700
if len(ylim_) == 1:
ylim_ = ylim_[0]
else:
ylim_ = zip(*[np.array(yl) for yl in ylim_])
else:
raise ValueError('ylim must be None ore a dict')
plot_fun = partial(_plot_timeseries, data=[e.data for e in evoked],
color=color, times=times, vline=vline)
fig = _plot_topo(info=info, times=times, show_func=plot_fun, layout=layout,
decim=1, colorbar=False, ylim=ylim_, cmap=None,
layout_scale=layout_scale, border=border,
fig_facecolor=fig_facecolor, font_color=font_color,
axis_facecolor=axis_facecolor,
title=title, x_label='Time (s)', vline=vline)
if fig_background is not None:
add_background_image(fig, fig_background)
if proj == 'interactive':
for e in evoked:
_check_delayed_ssp(e)
params = dict(evokeds=evoked, times=times,
plot_update_proj_callback=_plot_update_evoked_topo,
projs=evoked[0].info['projs'], fig=fig)
_draw_proj_checkbox(None, params)
if show:
plt.show()
return fig
def _plot_update_evoked_topo(params, bools):
"""Helper function to update topo sensor plots"""
evokeds, times, fig = [params[k] for k in ('evokeds', 'times', 'fig')]
projs = [proj for ii, proj in enumerate(params['projs'])
if ii in np.where(bools)[0]]
params['proj_bools'] = bools
evokeds = [e.copy() for e in evokeds]
for e in evokeds:
e.info['projs'] = []
e.add_proj(projs)
e.apply_proj()
# make sure to only modify the time courses, not the ticks
axes = fig.get_axes()
n_lines = len(axes[0].lines)
n_diff = len(evokeds) - n_lines
ax_slice = slice(abs(n_diff)) if n_diff < 0 else slice(n_lines)
for ax in axes:
lines = ax.lines[ax_slice]
for line, evoked in zip(lines, evokeds):
line.set_data(times, evoked.data[ax._mne_ch_idx])
fig.canvas.draw()
def _erfimage_imshow(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None,
data=None, epochs=None, sigma=None,
order=None, scalings=None, vline=None,
x_label=None, y_label=None, colorbar=False,
cmap='RdBu_r'):
"""Aux function to plot erfimage on sensor topography"""
from scipy import ndimage
import matplotlib.pyplot as plt
this_data = data[:, ch_idx, :].copy()
ch_type = channel_type(epochs.info, ch_idx)
if ch_type not in scalings:
raise KeyError('%s channel type not in scalings' % ch_type)
this_data *= scalings[ch_type]
if callable(order):
order = order(epochs.times, this_data)
if order is not None:
this_data = this_data[order]
if sigma > 0.:
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
ax.imshow(this_data, extent=[tmin, tmax, 0, len(data)], aspect='auto',
origin='lower', vmin=vmin, vmax=vmax, picker=True,
cmap=cmap, interpolation='nearest')
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
if colorbar:
plt.colorbar()
def plot_topo_image_epochs(epochs, layout=None, sigma=0., vmin=None,
vmax=None, colorbar=True, order=None, cmap='RdBu_r',
layout_scale=.95, title=None, scalings=None,
border='none', fig_facecolor='k', font_color='w',
show=True):
"""Plot Event Related Potential / Fields image on topographies
Parameters
----------
epochs : instance of Epochs
The epochs.
layout: instance of Layout
System specific sensor positions.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image. If 0., no smoothing is applied.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times)).
cmap : instance of matplotlib.pyplot.colormap
Colors to be mapped to the values.
layout_scale: float
scaling factor for adjusting the relative size of the layout
on the canvas.
title : str
Title of the figure.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If
None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
border : str
matplotlib borders style to be used for each sensor plot.
fig_facecolor : str | obj
The figure face color. Defaults to black.
font_color : str | obj
The color of tick labels in the colorbar. Defaults to white.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
import matplotlib.pyplot as plt
scalings = _handle_default('scalings', scalings)
data = epochs.get_data()
if vmin is None:
vmin = data.min()
if vmax is None:
vmax = data.max()
if layout is None:
from ..channels.layout import find_layout
layout = find_layout(epochs.info)
erf_imshow = partial(_erfimage_imshow, scalings=scalings, order=order,
data=data, epochs=epochs, sigma=sigma,
cmap=cmap)
fig = _plot_topo(info=epochs.info, times=epochs.times,
show_func=erf_imshow, layout=layout, decim=1,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title,
fig_facecolor=fig_facecolor,
font_color=font_color, border=border,
x_label='Time (s)', y_label='Epoch')
if show:
plt.show()
return fig
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
python-packages/mne-python-0.10/mne/viz/topo.py
|
Python
|
bsd-3-clause
| 23,928
|
[
"Gaussian"
] |
fcdf77576856ceebd8c458186360863088ce63b8c1725f0c57d6b8ed76167770
|
# Copyright: (c) 2013, James Cammarata <jcammarata@ansible.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
import re
import shutil
import sys
import time
import yaml
from jinja2 import Environment, FileSystemLoader
import ansible.constants as C
from ansible import context
from ansible.cli import CLI
from ansible.cli.arguments import optparse_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.login import GalaxyLogin
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.token import GalaxyToken
from ansible.module_utils._text import to_native, to_text
from ansible.playbook.role.requirement import RoleRequirement
from ansible.utils.display import Display
display = Display()
class GalaxyCLI(CLI):
'''command to manage Ansible roles in shared repositories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
VALID_ACTIONS = frozenset(("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup"))
def __init__(self, args):
self.api = None
self.galaxy = None
super(GalaxyCLI, self).__init__(args)
def set_action(self):
super(GalaxyCLI, self).set_action()
# specific to actions
if self.action == "delete":
self.parser.set_usage("usage: %prog delete [options] github_user github_repo")
self.parser.set_description("Removes the role from Galaxy. It does not remove or alter the actual GitHub repository.")
elif self.action == "import":
self.parser.set_usage("usage: %prog import [options] github_user github_repo")
self.parser.set_description("Import a role.")
self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help='Don\'t wait for import results.')
self.parser.add_option('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
self.parser.add_option('--role-name', dest='role_name', help='The name the role should have, if different than the repo name')
self.parser.add_option('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_user/github_repo.')
elif self.action == "info":
self.parser.set_usage("usage: %prog info [options] role_name[,version]")
self.parser.set_description("View more details about a specific role.")
elif self.action == "init":
self.parser.set_usage("usage: %prog init [options] role_name")
self.parser.set_description("Initialize new role with the base structure of a role.")
self.parser.add_option('--init-path', dest='init_path', default="./",
help='The path in which the skeleton role will be created. The default is the current working directory.')
self.parser.add_option('--type', dest='role_type', action='store', default='default',
help="Initialize using an alternate role type. Valid types include: 'container', 'apb' and 'network'.")
self.parser.add_option('--role-skeleton', dest='role_skeleton', default=C.GALAXY_ROLE_SKELETON,
help='The path to a role skeleton that the new role should be based upon.')
elif self.action == "install":
self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
self.parser.set_description("Install Roles from file(s), URL(s) or tar file(s)")
self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors and continue with the next specified role.')
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies')
self.parser.add_option('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported')
self.parser.add_option('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
default=False, help='Use tar instead of the scm archive option when packaging the role')
elif self.action == "remove":
self.parser.set_usage("usage: %prog remove role1 role2 ...")
self.parser.set_description("Delete a role from roles_path.")
elif self.action == "list":
self.parser.set_usage("usage: %prog list [role_name]")
self.parser.set_description("Show the name and version of each role installed in the roles_path.")
elif self.action == "login":
self.parser.set_usage("usage: %prog login [options]")
self.parser.set_description("Login to api.github.com server in order to use ansible-galaxy sub command such as 'import', 'delete' and 'setup'.")
self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.')
elif self.action == "search":
self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] "
"[--author username]")
self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by')
self.parser.add_option('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
self.parser.add_option('--author', dest='author', help='GitHub username')
self.parser.set_description("Search the Galaxy database by tags, platforms, author and multiple keywords.")
elif self.action == "setup":
self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret")
self.parser.add_option('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see ID values.')
self.parser.add_option('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.')
self.parser.set_description("Manage the integration between Galaxy and the given source.")
# options that apply to more than one action
if self.action in ['init', 'info']:
self.parser.add_option('--offline', dest='offline', default=False, action='store_true', help="Don't query the galaxy API when creating roles")
if self.action not in ("delete", "import", "init", "login", "setup"):
# NOTE: while the option type=str, the default is a list, and the
# callback will set the value to a list.
self.parser.add_option('-p', '--roles-path', dest='roles_path', action="callback", callback=opt_help.unfrack_paths, default=C.DEFAULT_ROLES_PATH,
help='The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfg'
' file (/etc/ansible/roles if not configured)', type='str')
if self.action in ("init", "install"):
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role')
def init_parser(self):
''' create an options parser for bin/ansible '''
super(GalaxyCLI, self).init_parser(
usage="usage: %%prog [%s] [--help] [options] ..." % "|".join(sorted(self.VALID_ACTIONS)),
epilog="\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]),
desc="Perform various Role related operations.",
)
# common
self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination')
self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS,
help='Ignore SSL certificate validation errors.')
self.set_action()
def post_process_args(self, options, args):
options, args = super(GalaxyCLI, self).post_process_args(options, args)
display.verbosity = options.verbosity
return options, args
def run(self):
super(GalaxyCLI, self).run()
self.galaxy = Galaxy()
self.api = GalaxyAPI(self.galaxy)
self.execute()
@staticmethod
def exit_without_ignore(rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not context.CLIARGS['ignore_errors']:
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
@staticmethod
def _display_role_info(role_info):
text = [u"", u"Role: %s" % to_text(role_info['name'])]
text.append(u"\tdescription: %s" % role_info.get('description', ''))
for k in sorted(role_info.keys()):
if k in GalaxyCLI.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in GalaxyCLI.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
return u'\n'.join(text)
############################
# execute actions
############################
def execute_init(self):
"""
creates the skeleton framework of a role that complies with the galaxy metadata format.
"""
init_path = context.CLIARGS['init_path']
force = context.CLIARGS['force']
role_skeleton = context.CLIARGS['role_skeleton']
role_name = context.CLIARGS['args'][0].strip() if context.CLIARGS['args'] else None
if not role_name:
raise AnsibleOptionsError("- no role name specified for init")
role_path = os.path.join(init_path, role_name)
if os.path.exists(role_path):
if os.path.isfile(role_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % role_path)
elif not force:
raise AnsibleError("- the directory %s already exists."
"you can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % role_path)
inject_data = dict(
role_name=role_name,
author='your name',
description='your description',
company='your company (optional)',
license='license (GPLv2, CC-BY, etc)',
issue_tracker_url='http://example.com/issue/tracker',
min_ansible_version='2.4',
role_type=context.CLIARGS['role_type']
)
# create role directory
if not os.path.exists(role_path):
os.makedirs(role_path)
if role_skeleton is not None:
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
else:
role_skeleton = self.galaxy.default_role_skeleton_path
skeleton_ignore_expressions = ['^.*/.git_keep$']
role_skeleton = os.path.expanduser(role_skeleton)
skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
template_env = Environment(loader=FileSystemLoader(role_skeleton))
for root, dirs, files in os.walk(role_skeleton, topdown=True):
rel_root = os.path.relpath(root, role_skeleton)
in_templates_dir = rel_root.split(os.sep, 1)[0] == 'templates'
dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
for f in files:
filename, ext = os.path.splitext(f)
if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
continue
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(rel_root, f)
dest_file = os.path.join(role_path, rel_root, filename)
template_env.get_template(src_template).stream(inject_data).dump(dest_file)
else:
f_rel_path = os.path.relpath(os.path.join(root, f), role_skeleton)
shutil.copyfile(os.path.join(root, f), os.path.join(role_path, f_rel_path))
for d in dirs:
dir_path = os.path.join(role_path, rel_root, d)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
display.display("- %s was created successfully" % role_name)
def execute_info(self):
"""
prints out detailed information about an installed role as well as info available from the galaxy API.
"""
if not context.CLIARGS['args']:
# the user needs to specify a role
raise AnsibleOptionsError("- you must specify a user/role name")
roles_path = context.CLIARGS['roles_path']
data = ''
for role in context.CLIARGS['args']:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['installed_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = False
if not context.CLIARGS['offline']:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
role_info.update(remote_data)
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec = req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data = self._display_role_info(role_info)
# FIXME: This is broken in both 1.9 and 2.0 as
# _display_role_info() always returns something
if not data:
data = u"\n- the role %s was not found" % role
self.pager(data)
def execute_install(self):
"""
uses the args list of roles to be installed, unless -f was specified. The list of roles
can be a name (which will be downloaded via the galaxy API and github), or it can be a local .tar.gz file.
"""
role_file = context.CLIARGS['role_file']
if not context.CLIARGS['args'] and role_file is None:
# the user needs to specify one of either --role-file or specify a single user/role name
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
no_deps = context.CLIARGS['no_deps']
force = context.CLIARGS['force']
roles_left = []
if role_file:
try:
f = open(role_file, 'r')
if role_file.endswith('.yaml') or role_file.endswith('.yml'):
try:
required_roles = yaml.safe_load(f.read())
except Exception as e:
raise AnsibleError("Unable to load data from the requirements file: %s" % role_file)
if required_roles is None:
raise AnsibleError("No roles found in file: %s" % role_file)
for role in required_roles:
if "include" not in role:
role = RoleRequirement.role_yaml_parse(role)
display.vvv("found role %s in yaml file" % str(role))
if "name" not in role and "scm" not in role:
raise AnsibleError("Must specify name or src for role")
roles_left.append(GalaxyRole(self.galaxy, **role))
else:
with open(role["include"]) as f_include:
try:
roles_left += [
GalaxyRole(self.galaxy, **r) for r in
(RoleRequirement.role_yaml_parse(i) for i in yaml.safe_load(f_include))
]
except Exception as e:
msg = "Unable to load data from the include requirements file: %s %s"
raise AnsibleError(msg % (role_file, e))
else:
raise AnsibleError("Invalid role requirements file")
f.close()
except (IOError, OSError) as e:
raise AnsibleError('Unable to open %s: %s' % (role_file, to_native(e)))
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in context.CLIARGS['args']:
role = RoleRequirement.role_yaml_parse(rname.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
for role in roles_left:
# only process roles in roles files when names matches if given
if role_file and context.CLIARGS['args'] and role.name not in context.CLIARGS['args']:
display.vvv('Skipping role %s' % role.name)
continue
display.vvv('Processing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None:
if role.install_info['version'] != role.version or force:
if force:
display.display('- changing role %s from %s to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
role.remove()
else:
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
continue
else:
if not force:
display.display('- %s is already installed, skipping.' % str(role))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning(u"- %s was NOT installed successfully: %s " % (role.name, to_text(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
if not role.metadata:
display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
else:
role_dependencies = role.metadata.get('dependencies') or []
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None:
if dep_role not in roles_left:
display.display('- adding dependency: %s' % str(dep_role))
roles_left.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
if dep_role.install_info['version'] != dep_role.version:
display.warning('- dependency %s from role %s differs from already installed version (%s), skipping' %
(str(dep_role), role.name, dep_role.install_info['version']))
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
removes the list of roles passed as arguments from the local system.
"""
if not context.CLIARGS['args']:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in context.CLIARGS['args']:
role = GalaxyRole(self.galaxy, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, to_native(e)))
return 0
def execute_list(self):
"""
lists the roles installed on the local system or matches a single role passed as an argument.
"""
if len(context.CLIARGS['args']) > 1:
raise AnsibleOptionsError("- please specify only one role to list, or specify no roles to see a full list")
def _display_role(gr):
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (gr.name, version))
if context.CLIARGS['args']:
# show the requested role, if it exists
name = context.CLIARGS['args'][0]
gr = GalaxyRole(self.galaxy, name)
if gr.metadata:
display.display('# %s' % os.path.dirname(gr.path))
_display_role(gr)
else:
display.display("- the role %s was not found" % name)
else:
# show all valid roles in the roles_path directory
roles_path = context.CLIARGS['roles_path']
path_found = False
warnings = []
for path in roles_path:
role_path = os.path.expanduser(path)
if not os.path.exists(role_path):
warnings.append("- the configured path %s does not exist." % role_path)
continue
elif not os.path.isdir(role_path):
warnings.append("- the configured path %s, exists, but it is not a directory." % role_path)
continue
display.display('# %s' % role_path)
path_files = os.listdir(role_path)
path_found = True
for path_file in path_files:
gr = GalaxyRole(self.galaxy, path_file, path=path)
if gr.metadata:
_display_role(gr)
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError("- None of the provided paths was usable. Please specify a valid path with --roles-path")
return 0
def execute_search(self):
''' searches for roles on the Ansible Galaxy server'''
page_size = 1000
search = None
if context.CLIARGS['args']:
search = '+'.join(context.CLIARGS['args'])
if not search and not context.CLIARGS['platforms'] and not context.CLIARGS['galaxy_tags'] and not context.CLIARGS['author']:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=context.CLIARGS['platforms'],
tags=context.CLIARGS['galaxy_tags'], author=context.CLIARGS['author'], page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_login(self):
"""
verify user's identify via Github and retrieve an auth token from Ansible Galaxy.
"""
# Authenticate with github and retrieve a token
if context.CLIARGS['token'] is None:
if C.GALAXY_TOKEN:
github_token = C.GALAXY_TOKEN
else:
login = GalaxyLogin(self.galaxy)
github_token = login.create_github_token()
else:
github_token = context.CLIARGS['token']
galaxy_response = self.api.authenticate(github_token)
if context.CLIARGS['token'] is None and C.GALAXY_TOKEN is None:
# Remove the token we created
login.remove_github_token()
# Store the Galaxy token
token = GalaxyToken()
token.set(galaxy_response['token'])
display.display("Successfully logged into Galaxy as %s" % galaxy_response['username'])
return 0
def execute_import(self):
""" used to import a role into Ansible Galaxy """
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
if len(context.CLIARGS['args']) < 2:
raise AnsibleError("Expected a github_username and github_repository. Use --help.")
github_user = to_text(context.CLIARGS['args'][0], errors='surrogate_or_strict')
github_repo = to_text(context.CLIARGS['args'][1], errors='surrogate_or_strict')
if context.CLIARGS['check_status']:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo,
reference=context.CLIARGS['reference'],
role_name=context.CLIARGS['role_name'])
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not context.CLIARGS['wait']:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
if context.CLIARGS['check_status'] or context.CLIARGS['wait']:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
""" Setup an integration from Github or Travis for Ansible Galaxy roles"""
if context.CLIARGS['setup_list']:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']), color=C.COLOR_OK)
return 0
if context.CLIARGS['remove_id']:
# Remove a secret
self.api.remove_secret(context.CLIARGS['remove_id'])
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
if len(context.CLIARGS['args']) < 4:
raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret")
source = context.CLIARGS['args'][0]
github_user = context.CLIARGS['args'][1]
github_repo = context.CLIARGS['args'][2]
secret = context.CLIARGS['args'][3]
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
""" Delete a role from Ansible Galaxy. """
if len(context.CLIARGS['args']) < 2:
raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo")
github_user = context.CLIARGS['args'][0]
github_repo = context.CLIARGS['args'][1]
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
display.display(resp['status'])
return True
|
dlazz/ansible
|
lib/ansible/cli/galaxy.py
|
Python
|
gpl-3.0
| 32,953
|
[
"Galaxy"
] |
eb750a8ae5fc9b533ca8da8a0adf9ae8eadb978f54a5b1daebf0c53b1f4e3ccd
|
# Developer: SpatialDev
# Company: Spatial Development International
# for more information please visit the following link below
# Website: https://lpdaac.usgs.gov/products/modis_products_table/land_cover/yearly_l3_global_500_m/mcd12q1
# --------------- Imports -------------------------------------
# standard library
import sys
import os
# Add the ETLBaseModule directory location to the Python system path in order to import the shared base modules
sys.path.append("SERVIR PATH TO ETL ON DISK\\ETL\\ETLScripts\\ETLBaseModules\\")
# third-party
import arcpy
# ETL framework
from etl_controller import ETLController
from land_etl_delegate import LandETLDelegate
from arcpy_land_etl_core import LandLoader, LandTransformer, LandExtractor, LandMetaDataTransformer, LandExtractValidator
# ETL utils
from etl_utils import ETLDebugLogger, ETLExceptionManager, ExceptionManager
from arcpy_utils import RasterCatalog, FileGeoDatabase
# custom modules
from arcpy_land_raster_dataset import LandCoverRasterDataset
# --------------- ETL ---------------------------------------------------------------------------------------------------
def createRasterCatalog(land_cover_year_to_process, land_cover_output_basepath):
# configure raster catalog object -------------------------------------
raster_catalog = RasterCatalog(land_cover_output_basepath, str('LandCover' + land_cover_year_to_process), {
'datetime_field':'datetime',
'datetime_field_format':'%m-%d-%Y %I:%M:%S %p',
'datetime_sql_cast':"date"
})
# custom fields -------------------------------------
arcpy.AddField_management(raster_catalog.fullpath, raster_catalog.options['datetime_field'], 'DATE', '', '', 25)
arcpy.AddField_management(raster_catalog.fullpath, 'datetime_string', 'TEXT', '', '', 25)
arcpy.AddField_management(raster_catalog.fullpath, 'land_cover_type', 'TEXT', '', '', 25)
# Land Cover specific meta-data fields -------------------------------------
arcpy.AddField_management(raster_catalog.fullpath, 'DistributedFileName', 'TEXT', '', '', 100)
arcpy.AddField_management(raster_catalog.fullpath, 'DTDVersion', 'DOUBLE', '', '', 10)
arcpy.AddField_management(raster_catalog.fullpath, 'DataCenterId', 'TEXT', '', '', 50)
arcpy.AddField_management(raster_catalog.fullpath, 'GranuleUR', 'TEXT', '', '', 50)
arcpy.AddField_management(raster_catalog.fullpath, 'DbID', 'TEXT', '', '', 25)
arcpy.AddField_management(raster_catalog.fullpath, 'InsertTime', 'TEXT', '', '', 50)
arcpy.AddField_management(raster_catalog.fullpath, 'LastUpdate', 'TEXT', '', '', 50)
arcpy.AddField_management(raster_catalog.fullpath, 'ShortName', 'TEXT', '', '', 25)
arcpy.AddField_management(raster_catalog.fullpath, 'VersionID', 'DOUBLE', '', '', 5)
arcpy.AddField_management(raster_catalog.fullpath, 'FileSize', 'DOUBLE', '', '', 25)
arcpy.AddField_management(raster_catalog.fullpath, 'ChecksumType', 'TEXT', '', '', 25)
arcpy.AddField_management(raster_catalog.fullpath, 'Checksum', 'DOUBLE', '', '', 25)
arcpy.AddField_management(raster_catalog.fullpath, 'ChecksumOrigin', 'TEXT', '', '', 50)
arcpy.AddField_management(raster_catalog.fullpath, 'SizeMBECSDataGranule', 'DOUBLE', '', '', 25)
arcpy.AddField_management(raster_catalog.fullpath, 'ReprocessingPlanned', 'TEXT', '', '', 250)
arcpy.AddField_management(raster_catalog.fullpath, 'ReprocessingActual', 'TEXT', '', '', 250)
arcpy.AddField_management(raster_catalog.fullpath, 'LocalGranuleID', 'TEXT', '', '', 100)
arcpy.AddField_management(raster_catalog.fullpath, 'DayNightFlag', 'TEXT', '', '', 25)
arcpy.AddField_management(raster_catalog.fullpath, 'ProductionDateTime', 'TEXT', '', '', 50)
arcpy.AddField_management(raster_catalog.fullpath, 'LocalVersionID', "TEXT", '', '', 25)
arcpy.AddField_management(raster_catalog.fullpath, 'PGEVersion', 'TEXT', '', '', 25)
arcpy.AddField_management(raster_catalog.fullpath, 'RangeEndingTime', 'TEXT', '', '', 25)
arcpy.AddField_management(raster_catalog.fullpath, 'RangeEndingDate', 'DATE', '', '', 25)
arcpy.AddField_management(raster_catalog.fullpath, 'RangeBeginningTime', 'TEXT', '', '', 25)
arcpy.AddField_management(raster_catalog.fullpath, "RangeBeginningDate", 'DATE', '', '', 25)
arcpy.AddField_management(raster_catalog.fullpath, 'l1', 'TEXT', '', '', 25)
arcpy.AddField_management(raster_catalog.fullpath, 'l2', "TEXT", '', '', 25)
arcpy.AddField_management(raster_catalog.fullpath, 'l3', 'TEXT', '', '', 25)
arcpy.AddField_management(raster_catalog.fullpath, 'l4', "TEXT", '', '', 25)
arcpy.AddField_management(raster_catalog.fullpath, "ParameterName", 'TEXT', '', '', 25)
arcpy.AddField_management(raster_catalog.fullpath, "QAPercentMissingData", 'DOUBLE', '', '', 10)
arcpy.AddField_management(raster_catalog.fullpath, "QAPercentOutofBoundsData", 'DOUBLE', '', '', 10)
arcpy.AddField_management(raster_catalog.fullpath, "QAPercentInterpolatedData", 'DOUBLE', '', '', 10)
arcpy.AddField_management(raster_catalog.fullpath, "AutomaticQualityFlag", 'TEXT', '', '', 25)
arcpy.AddField_management(raster_catalog.fullpath, "AutomaticQualityFlagExplanation", 'TEXT', '', '', 350)
arcpy.AddField_management(raster_catalog.fullpath, "OperationalQualityFlag", 'TEXT', '', '', 25)
arcpy.AddField_management(raster_catalog.fullpath, "OperationalQualityFlagExplanation", 'TEXT', '', '', 350)
arcpy.AddField_management(raster_catalog.fullpath, "ScienceQualityFlag", 'TEXT', '', '', 25)
arcpy.AddField_management(raster_catalog.fullpath, "ScienceQualityFlagExplanation", 'TEXT', '', '', 350)
arcpy.AddField_management(raster_catalog.fullpath, "PlatformShortName", 'TEXT', '', '', 50)
arcpy.AddField_management(raster_catalog.fullpath, "InstrumentShortName", 'TEXT', '', '', 50)
arcpy.AddField_management(raster_catalog.fullpath, "SensorShortName", 'TEXT', '', '', 50)
arcpy.AddField_management(raster_catalog.fullpath, "PSAName", 'TEXT', '', '', 250)
arcpy.AddField_management(raster_catalog.fullpath, "PSAValue", 'TEXT', '', '', 250)
arcpy.AddField_management(raster_catalog.fullpath, "InputPointer", 'TEXT', '', '', 500)
return raster_catalog
def executeETL(raster_catalog, land_cover_year_to_process):
# initialize utility objects -------------------------------------
debug_log_output_directory = os.path.join(sys.path[0], "Land_logs")
etl_debug_logger = ETLDebugLogger(debug_log_output_directory, "Land", {
"debug_log_archive_days":7
})
update_debug_log = etl_debug_logger.updateDebugLog # retrieve a reference to the debug logger function
etl_exception_manager = ETLExceptionManager(sys.path[0], "Land_exception_reports", {
"create_immediate_exception_reports":True
})
# initialize core ETL objects -------------------------------------
arcpy_land_extract_validator = LandExtractValidator({
"raster_catalog":raster_catalog,
"ftp_file_name_field":"DistributedFileName",
"debug_logger":update_debug_log
})
arcpy_land_extractor = LandExtractor({
"target_file_extn":'hdf',
"ftp_options": {
#THE SOURCE OF THE HDFs COLLECTED MAY NEED TO BE UPDATED
"ftp_host":'e4ftl01.cr.usgs.gov',
"ftp_user":'anonymous',
"ftp_pswrd":'anonymous'
},
"debug_logger":update_debug_log
})
scratch_fgdb_fullpath = FileGeoDatabase(sys.path[0], 'scratch.gdb').fullpath
arcpy_land_transformer = LandTransformer({
"output_file_geodatabase":scratch_fgdb_fullpath,
"debug_logger":update_debug_log
})
land_meta_data_transformer = LandMetaDataTransformer(
{"debug_logger":update_debug_log},
decoratee=arcpy_land_transformer
)
arcpy_land_loader = LandLoader({
"raster_catalog":raster_catalog,
"CopyRaster_management_config":{
'config_keyword':'',
'background_value':'',
'nodata_value':'',
'onebit_to_eightbit':'',
'colormap_to_RGB':'',
'pixel_type':''
},
"debug_logger":update_debug_log
})
etl_controller = ETLController(sys.path[0], "LandCover_ETL", {
"remove_etl_workspace_on_finish":False
})
land_etl_delegate = LandETLDelegate({
"ftp_dirs":['/MOTA/MCD12Q1.005/'+land_cover_year_to_process+'.01.01/'],
"ftp_file_meta_extn":'xml',
"all_or_none_for_success":True,
"debug_logger":update_debug_log,
'exception_handler':etl_exception_manager.handleException
})
# set ETLDelegate object properties-------------------------------------
land_etl_delegate.setExtractValidator(arcpy_land_extract_validator)
land_etl_delegate.setExtractor(arcpy_land_extractor)
land_etl_delegate.setTransformer(land_meta_data_transformer)
land_etl_delegate.setLoader(arcpy_land_loader)
land_etl_delegate.setETLController(etl_controller)
# execute the ETL operation -------------------------------------
successful_new_run = land_etl_delegate.startETLProcess()
# perform post-ETL operations -------------------------------------
etl_exception_manager.finalizeExceptionXMLLog()
etl_debug_logger.deleteOutdatedDebugLogs()
return successful_new_run
def createLandCoverDataset(land_cover_year_to_process, land_cover_basepath, raster_catalog_fullpath):
# create debug logger instance -------------------------------------
debug_log_output_directory = os.path.join(sys.path[0], "Land_Raster_debug_logs")
land_raster_debug_logger = ETLDebugLogger(debug_log_output_directory, "LandRaster", {
"debug_log_archive_days":7
})
# land cover raster data set config -------------------------------------
output_raster_dataset = str("LandCover_Type1_" + land_cover_year_to_process)
output_dataset_fullpath = os.path.join(land_cover_basepath, output_raster_dataset)
land_raster_dataset_options = {
"land_cover_type":"LC1",
"land_cover_type_field":"land_cover_type",
"RasterCatalogToRasterDataset_management_config": {
"mosaic_type":'',
"colormap":"",
"order_by_field":'',
"ascending":'',
"Pixel_type":'',
"ColorBalancing":'',
"matchingMethod":'',
"ReferenceRaster":'',
"OID":''
},
# land cover classification config -------------------------------------
"land_cover_description_field":"igbp_class",
"land_type_value_field":"Value",
"land_type_description_dict": {
0:"Water",
1:"Evergreen Needleleaf forest",
2:"Evergreen Broadleaf forest",
3:"Deciduous Needleleaf forest",
4:"Deciduous Broadleaf forest",
5:"Mixed forest",
6:"Closed shrublands",
7:"Open shrublands",
8:"Woody savannas",
9:"Savannas",
10:"Grasslands",
11:"Permanent wetlands",
12:"Croplands",
13:"Urban and built-up",
14:"Cropland/Natural vegetation mosaic",
15:"Snow and ice",
16:"Barren or sparsely vegetated"
},
# re-project raster dataset config -------------------------------------
"gp_env_extent":arcpy.Extent(-20037507.2295943, -19971868.8804086, 20037507.2295943, 19971868.8804086),
"out_coor_system" :'PROJCS["WGS_1984_Web_Mercator_Auxiliary_Sphere",GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137.0,298.257223563]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Mercator_Auxiliary_Sphere"],PARAMETER["False_Easting",0.0],PARAMETER["False_Northing",0.0],PARAMETER["Central_Meridian",0.0],PARAMETER["Standard_Parallel_1",0.0],PARAMETER["Auxiliary_Sphere_Type",0.0],UNIT["Meter",1.0],AUTHORITY["EPSG",3857]]',
"reprojected_raster_dataset":os.path.join(land_cover_basepath, output_raster_dataset+"_WM"),
"debug_logger":land_raster_debug_logger.updateDebugLog
}
# create a LandCoverRasterDataset instance, set debug logger and start the creation process -------------------------------------
land_raster_dataset = LandCoverRasterDataset(output_dataset_fullpath, raster_catalog_fullpath, land_raster_dataset_options)
land_raster_dataset.createLandRasterDataset()
# --------------- ETL MAIN ---------------------------------------------------------------------------------------------------
def main():
# select land cover year to process, this will be relfected throughout the entire modules config
land_cover_year_to_process = "200X"
# output location for the raster catalogs and each land cover raster dataset, creates the FileGeoDatabase if it does not exist
land_cover_fgdb = FileGeoDatabase(sys.path[0], "LandCover.gdb")
# get reference to raster catalog
raster_catalog = createRasterCatalog(land_cover_year_to_process, land_cover_fgdb.fullpath)
# execute the main ETL operation
successful_new_run = executeETL(raster_catalog, land_cover_year_to_process)
if successful_new_run:
# if successful_new_run, then create the land cover type raster dataset
createLandCoverDataset(land_cover_year_to_process, land_cover_fgdb.fullpath, raster_catalog.fullpath)
# method called upon module execution to start the ETL process
main()
|
SERVIR/ReferenceNode_ETL
|
sources/land/arcpy_land_etl_main.py
|
Python
|
apache-2.0
| 14,497
|
[
"VisIt"
] |
591373d5c057cc8693e54ed7295a268845ad0f21be772d8b67948ed7669a6238
|
"""
====================================================
Imputing missing values before building an estimator
====================================================
Missing values can be replaced by the mean, the median or the most frequent
value using the basic :class:`~sklearn.impute.SimpleImputer`.
In this example we will investigate different imputation techniques:
- imputation by the constant value 0
- imputation by the mean value of each feature combined with a missing-ness
indicator auxiliary variable
- k nearest neighbor imputation
- iterative imputation
We will use two datasets: Diabetes dataset which consists of 10 feature
variables collected from diabetes patients with an aim to predict disease
progression and California Housing dataset for which the target is the median
house value for California districts.
As neither of these datasets have missing values, we will remove some
values to create new versions with artificially missing data. The performance
of
:class:`~sklearn.ensemble.RandomForestRegressor` on the full original dataset
is then compared the performance on the altered datasets with the artificially
missing values imputed using different techniques.
"""
print(__doc__)
# Authors: Maria Telenczuk <https://github.com/maikia>
# License: BSD 3 clause
# %%
# Download the data and make missing values sets
################################################
#
# First we download the two datasets. Diabetes dataset is shipped with
# scikit-learn. It has 442 entries, each with 10 features. California Housing
# dataset is much larger with 20640 entries and 8 features. It needs to be
# downloaded. We will only use the first 400 entries for the sake of speeding
# up the calculations but feel free to use the whole dataset.
#
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.datasets import load_diabetes
rng = np.random.RandomState(42)
X_diabetes, y_diabetes = load_diabetes(return_X_y=True)
X_california, y_california = fetch_california_housing(return_X_y=True)
X_california = X_california[:400]
y_california = y_california[:400]
def add_missing_values(X_full, y_full):
n_samples, n_features = X_full.shape
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = int(n_samples * missing_rate)
missing_samples = np.zeros(n_samples, dtype=bool)
missing_samples[: n_missing_samples] = True
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
X_missing = X_full.copy()
X_missing[missing_samples, missing_features] = np.nan
y_missing = y_full.copy()
return X_missing, y_missing
X_miss_california, y_miss_california = add_missing_values(
X_california, y_california)
X_miss_diabetes, y_miss_diabetes = add_missing_values(
X_diabetes, y_diabetes)
# %%
# Impute the missing data and score
# #################################
# Now we will write a function which will score the results on the differently
# imputed data. Let's look at each imputer separately:
#
rng = np.random.RandomState(0)
from sklearn.ensemble import RandomForestRegressor
# To use the experimental IterativeImputer, we need to explicitly ask for it:
from sklearn.experimental import enable_iterative_imputer # noqa
from sklearn.impute import SimpleImputer, KNNImputer, IterativeImputer
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import make_pipeline
N_SPLITS = 5
regressor = RandomForestRegressor(random_state=0)
# %%
# Missing information
# -------------------
# In addition to imputing the missing values, the imputers have an
# `add_indicator` parameter that marks the values that were missing, which
# might carry some information.
#
def get_scores_for_imputer(imputer, X_missing, y_missing):
estimator = make_pipeline(imputer, regressor)
impute_scores = cross_val_score(estimator, X_missing, y_missing,
scoring='neg_mean_squared_error',
cv=N_SPLITS)
return impute_scores
x_labels = ['Full data',
'Zero imputation',
'Mean Imputation',
'KNN Imputation',
'Iterative Imputation']
mses_california = np.zeros(5)
stds_california = np.zeros(5)
mses_diabetes = np.zeros(5)
stds_diabetes = np.zeros(5)
# %%
# Estimate the score
# ------------------
# First, we want to estimate the score on the original data:
#
def get_full_score(X_full, y_full):
full_scores = cross_val_score(regressor, X_full, y_full,
scoring='neg_mean_squared_error',
cv=N_SPLITS)
return full_scores.mean(), full_scores.std()
mses_california[0], stds_california[0] = get_full_score(X_california,
y_california)
mses_diabetes[0], stds_diabetes[0] = get_full_score(X_diabetes, y_diabetes)
# %%
# Replace missing values by 0
# ---------------------------
#
# Now we will estimate the score on the data where the missing values are
# replaced by 0:
#
def get_impute_zero_score(X_missing, y_missing):
imputer = SimpleImputer(missing_values=np.nan, add_indicator=True,
strategy='constant', fill_value=0)
zero_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing)
return zero_impute_scores.mean(), zero_impute_scores.std()
mses_california[1], stds_california[1] = get_impute_zero_score(
X_miss_california, y_miss_california)
mses_diabetes[1], stds_diabetes[1] = get_impute_zero_score(X_miss_diabetes,
y_miss_diabetes)
# %%
# kNN-imputation of the missing values
# ------------------------------------
#
# :class:`~sklearn.impute.KNNImputer` imputes missing values using the weighted
# or unweighted mean of the desired number of nearest neighbors.
def get_impute_knn_score(X_missing, y_missing):
imputer = KNNImputer(missing_values=np.nan, add_indicator=True)
knn_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing)
return knn_impute_scores.mean(), knn_impute_scores.std()
mses_california[2], stds_california[2] = get_impute_knn_score(
X_miss_california, y_miss_california)
mses_diabetes[2], stds_diabetes[2] = get_impute_knn_score(X_miss_diabetes,
y_miss_diabetes)
# %%
# Impute missing values with mean
# -------------------------------
#
def get_impute_mean(X_missing, y_missing):
imputer = SimpleImputer(missing_values=np.nan, strategy="mean",
add_indicator=True)
mean_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing)
return mean_impute_scores.mean(), mean_impute_scores.std()
mses_california[3], stds_california[3] = get_impute_mean(X_miss_california,
y_miss_california)
mses_diabetes[3], stds_diabetes[3] = get_impute_mean(X_miss_diabetes,
y_miss_diabetes)
# %%
# Iterative imputation of the missing values
# ------------------------------------------
#
# Another option is the :class:`~sklearn.impute.IterativeImputer`. This uses
# round-robin linear regression, modeling each feature with missing values as a
# function of other features, in turn.
# The version implemented assumes Gaussian (output) variables. If your features
# are obviously non-normal, consider transforming them to look more normal
# to potentially improve performance.
#
def get_impute_iterative(X_missing, y_missing):
imputer = IterativeImputer(missing_values=np.nan, add_indicator=True,
random_state=0, n_nearest_features=5,
sample_posterior=True)
iterative_impute_scores = get_scores_for_imputer(imputer,
X_missing,
y_missing)
return iterative_impute_scores.mean(), iterative_impute_scores.std()
mses_california[4], stds_california[4] = get_impute_iterative(
X_miss_california, y_miss_california)
mses_diabetes[4], stds_diabetes[4] = get_impute_iterative(X_miss_diabetes,
y_miss_diabetes)
mses_diabetes = mses_diabetes * -1
mses_california = mses_california * -1
# %%
# Plot the results
# ################
#
# Finally we are going to visualize the score:
#
import matplotlib.pyplot as plt
n_bars = len(mses_diabetes)
xval = np.arange(n_bars)
colors = ['r', 'g', 'b', 'orange', 'black']
# plot diabetes results
plt.figure(figsize=(12, 6))
ax1 = plt.subplot(121)
for j in xval:
ax1.barh(j, mses_diabetes[j], xerr=stds_diabetes[j],
color=colors[j], alpha=0.6, align='center')
ax1.set_title('Imputation Techniques with Diabetes Data')
ax1.set_xlim(left=np.min(mses_diabetes) * 0.9,
right=np.max(mses_diabetes) * 1.1)
ax1.set_yticks(xval)
ax1.set_xlabel('MSE')
ax1.invert_yaxis()
ax1.set_yticklabels(x_labels)
# plot california dataset results
ax2 = plt.subplot(122)
for j in xval:
ax2.barh(j, mses_california[j], xerr=stds_california[j],
color=colors[j], alpha=0.6, align='center')
ax2.set_title('Imputation Techniques with California Data')
ax2.set_yticks(xval)
ax2.set_xlabel('MSE')
ax2.invert_yaxis()
ax2.set_yticklabels([''] * n_bars)
plt.show()
# You can also try different techniques. For instance, the median is a more
# robust estimator for data with high magnitude variables which could dominate
# results (otherwise known as a 'long tail').
|
xuewei4d/scikit-learn
|
examples/impute/plot_missing_values.py
|
Python
|
bsd-3-clause
| 9,687
|
[
"Gaussian"
] |
d9342b62ecfea850acf9232e9cc3cefdb9a518dc9e3e629c23bda6137ee45700
|
import os, sys, datetime, string
import numpy as np
from netCDF4 import Dataset
import numpy.ma as ma
import matplotlib.pyplot as plt
from pylab import *
import mpl_util
import time
__author__ = 'Trond Kristiansen'
__email__ = 'trond.kristiansen@imr.no'
__created__ = datetime.datetime(2012, 12, 3)
__modified__ = datetime.datetime(2014, 11, 10)
__version__ = "1.0"
__status__ = "Development, 03.12.2012, 10.11.2014"
def getCORTADtime():
base="http://data.nodc.noaa.gov/thredds/dodsC/cortad/Version5/"
file1="cortadv5_FilledSST.nc"
filename1=base+file1
cdf1=Dataset(filename1)
print "Time: Extrating timedata from openDAP: %s"%(filename1)
time=np.squeeze(cdf1.variables["time"][:])
cdf1.close()
return time
def openCoRTAD(maxTries,delay):
""" Info on the different tiles used to identify a region is found here:
http://www.nodc.noaa.gov/SatelliteData/Cortad/TileMap.jpg"""
base="http://data.nodc.noaa.gov/thredds/dodsC/cortad/Version5/"
#base="http://data.nodc.noaa.gov/cortad/Version5/"
file1="cortadv5_FilledSST.nc"
hostname="data.nodc.noaa.gov"
# response = os.system("ping -c 1 " + hostname)
#while maxTries >0:
# maxTries -= 1
# if maxTries<0:
# print "Check for network connection with openDAP server. Tries %s times without successful connection!"
# sys.exit()
# if response == 0:
# print "NOAA is up: %s"%(response)
filename1=base+file1
cdf1=Dataset(filename1)
return cdf1
# else:
# time.sleep (delay)
# continue
def extractCoRTADLongLat(maxTries,delay,minLon,maxLon,minLat,maxLat):
"""Routine that extracts the longitude and latitudes for the
combination of tiles. This is only necessary to do once so it is separated
from the extraction of SST."""
cdf1 = openCoRTAD(maxTries,delay)
longitude=np.squeeze(cdf1.variables["lon"][:])
latitude=np.squeeze(cdf1.variables["lat"][:])
cdf1.close();
""" We have to flip this array so that we have increasing latitude
values required by np.interp function. This means we also have to
flip the input SST array"""
# latitude=np.flipud(latitude)
lons,lats=np.meshgrid(longitude,latitude)
print "Full data range:"
print "Long min: %s Long max: %s"%(longitude.min(),longitude.max())
print "Lat min: %s Lat max: %s"%(latitude.min(),latitude.max())
print "Find indexes for SST that define the grid domain"
res = findSubsetIndices(minLat,maxLat,minLon,maxLon,latitude,longitude)
# Note that at this point the 2D arrays are flipped in order so [lat,lon]
print "Wanted: %3.3f %3.3f"%(minLon,minLat)
print 'Index corresponds to longitude [%3.3f] and latitude [%3.3f]'%(longitude[res[0]],latitude[res[2]])
print "2D test: %3.3f %3.3f\n"%(lons[res[2],res[0]],lats[res[2],res[0]])
print "Wanted: %3.3f %3.3f"%(minLon,maxLat)
print 'Index corresponds to longitude [%3.3f] and latitude [%3.3f]'%(longitude[res[0]],latitude[res[3]])
print "2D test: %3.3f %3.3f\n"%(lons[res[3],res[0]],lats[res[3],res[0]])
print "Wanted: %3.3f %3.3f"%(maxLon,minLat)
print 'Index corresponds to longitude [%3.3f] and latitude [%3.3f]'%(longitude[res[1]],latitude[res[2]])
print "2D test: %3.3f %3.3f\n"%(lons[res[2],res[1]],lats[res[2],res[1]])
print "Wanted: %3.3f %3.3f"%(maxLon,maxLat)
print 'Index corresponds to longitude [%3.3f] and latitude [%3.3f]'%(longitude[res[1]],latitude[res[3]])
print "2D test: %3.3f %3.3f\n"%(lons[res[3],res[1]],lats[res[3],res[1]])
print "Extracted longitude-latitude for CoRTAD region"
print "------------------------------\n"
return longitude, latitude, lons, lats, res
def findSubsetIndices(min_lat,max_lat,min_lon,max_lon,lats,lons):
"""Array to store the results returned from the function"""
res=np.zeros((4),dtype=np.float64)
minLon=min_lon; maxLon=max_lon
distances1 = []; distances2 = []
indices=[]; index=1
for point in lats:
s1 = max_lat-point # (vector subtract)
s2 = min_lat-point # (vector subtract)
distances1.append((np.dot(s1, s1), point, index))
distances2.append((np.dot(s2, s2), point, index-1))
index=index+1
distances1.sort()
distances2.sort()
indices.append(distances1[0])
indices.append(distances2[0])
distances1 = []; distances2 = []; index=1
for point in lons:
s1 = maxLon-point # (vector subtract)
s2 = minLon-point # (vector subtract)
distances1.append((np.dot(s1, s1), point, index))
distances2.append((np.dot(s2, s2), point, index-1))
index=index+1
distances1.sort()
distances2.sort()
indices.append(distances1[0])
indices.append(distances2[0])
""" Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices"""
minJ=int(indices[1][2])
maxJ=int(indices[0][2])
minI=int(indices[3][2])
maxI=int(indices[2][2])
res[0]=minI; res[1]=maxI; res[2]=minJ; res[3]=maxJ;
return res
def extractCORTADSST(name,t,cdf,indexes):
# Routine that extracts the SST values for the specific tiles and time-period (t)
filledSST=cdf.variables["FilledSST"][t,indexes[3]:indexes[2],indexes[0]:indexes[1]]
offset=cdf.variables["FilledSST"].__getattribute__('add_offset')
# Kelvin to Celsius
filledSST = filledSST-offset
#filledMaskedSST=np.where(filledSST > 270, filledSST - offset, filledSST)
""" Scale and offset is autmoatically detected and edited by netcdf, but
we need to mask the values that are not filled."""
#filledMaskedSST_final=ma.masked_less(filledMaskedSST,-2.)
#filledMaskedSST_final=ma.masked_greater(filledMaskedSST,25.)
print "Min and max of SST: %s - %s"%(filledSST.min(),filledSST.max())
print "------------------------------\n"
return filledSST
if __name__ == "__main__":
main()
|
trondkr/romstools
|
CreateObsFileIS4DVAR/getCortad.py
|
Python
|
mit
| 6,008
|
[
"NetCDF"
] |
446c7ff569452e9ad0c4a8505751815b0f018b772e05c7fe5903dcf3e0e96813
|
# -*- coding: utf-8 -*-
# Generic Base Interface for Infrared based interfaces. Handles loading and convertion of IR Codes
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import List
from typing import Union
from ORCA.interfaces.BaseInterface import cBaseInterFace
from ORCA.interfaces.BaseInterfaceSettings import cBaseInterFaceSettings
from ORCA.Action import cAction
from ORCA.utils.FileName import cFileName
'''
<root>
<repositorymanager>
<entry>
<name>Generic Infrared Interface</name>
<description language='English'>Base Interface Class for Infrared based Interfaces</description>
<description language='German'>Basis Schnittstelle für Infrarot Schnittstellen</description>
<author>Carsten Thielepape</author>
<version>5.0.4</version>
<minorcaversion>5.0.4</minorcaversion>
<sources>
<source>
<local>$var(APPLICATIONPATH)/interfaces/generic_infrared</local>
<sourcefile>$var(REPOSITORYWWWPATH)/interfaces/generic_infrared.zip</sourcefile>
<targetpath>interfaces</targetpath>
</source>
</sources>
<skipfiles>
</skipfiles>
</entry>
</repositorymanager>
</root>
'''
class cInterface(cBaseInterFace):
class cInterFaceSettings(cBaseInterFaceSettings):
def __init__(self,oInterFace:cBaseInterFace):
super().__init__(oInterFace)
def ReadAction(self,oAction:cAction) -> None:
super().ReadAction(oAction)
oAction.uCCF_Code = oAction.dActionPars.get(u'cmd_ccf',u'')
oAction.uRepeatCount = oAction.dActionPars.get(u'repeatcount',u'´1')
def GetConfigCodesetList(self) -> List[str]:
aRet:List[str]=super().GetConfigCodesetList()
# adjust the codeset path to load infrared generic formats
uTmpName:str=self.uObjectName
self.uObjectName="infrared_ccf"
aRet+=super().GetConfigCodesetList()
self.uObjectName=uTmpName
return aRet
def FindCodesetFile(self, uFNCodeset:str) -> Union[cFileName,None]:
uRet: Union[cFileName, None]
uRet = super().FindCodesetFile(uFNCodeset)
if uRet is None:
uTmpName=self.uObjectName
self.uObjectName="infrared_ccf"
uRet = super().FindCodesetFile(uFNCodeset)
self.uObjectName=uTmpName
return uRet
|
thica/ORCA-Remote
|
src/interfaces/generic_infrared/interface.py
|
Python
|
gpl-3.0
| 3,274
|
[
"ORCA"
] |
9fac01c46b906d21292f43300c3e9dff4f37f76dc6f755faf8d7ac31e84f19fc
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
from pymatgen.analysis.chemenv.utils.scripts_utils import strategies_class_lookup
from os.path import expanduser, exists
from os import makedirs
import json
from six.moves import input
from pymatgen import SETTINGS
"""
This module contains the classes for configuration of the chemenv package.
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
class ChemEnvConfig():
"""
Class used to store the configuration of the chemenv package :
- Materials project access
- ICSD database access
- Default options (strategies, ...)
"""
DEFAULT_PACKAGE_OPTIONS = {'default_strategy': {'strategy': 'SimplestChemenvStrategy',
'strategy_options': {'distance_cutoff': strategies_class_lookup['SimplestChemenvStrategy'].DEFAULT_DISTANCE_CUTOFF,
'angle_cutoff': strategies_class_lookup['SimplestChemenvStrategy'].DEFAULT_ANGLE_CUTOFF,
'additional_condition': strategies_class_lookup['SimplestChemenvStrategy'].DEFAULT_ADDITIONAL_CONDITION,
'continuous_symmetry_measure_cutoff': strategies_class_lookup['SimplestChemenvStrategy'].DEFAULT_CONTINUOUS_SYMMETRY_MEASURE_CUTOFF}},
'default_max_distance_factor': 1.5
}
def __init__(self, package_options=None):
if SETTINGS.get("PMG_MAPI_KEY", "") != "":
self.materials_project_configuration = SETTINGS.get("PMG_MAPI_KEY", "")
else:
self.materials_project_configuration = None
if package_options is None:
self.package_options = self.DEFAULT_PACKAGE_OPTIONS
else:
self.package_options = package_options
def setup(self):
while True:
print('\n=> Configuration of the ChemEnv package <=')
print('Current configuration :')
if self.has_materials_project_access:
print(' - Access to materials project is configured (add test ?)')
else:
print(' - No access to materials project')
print(' - Package options :')
for key, val in self.package_options.items():
print(' {} : {}'.format(str(key), str(val)))
print('\nChoose in the following :')
print(' <1> + <ENTER> : configuration of the package options (strategy, ...)')
print(' <q> + <ENTER> : quit without saving configuration')
test = input(' <S> + <ENTER> : save configuration and quit\n ... ')
if test == '1':
self.setup_package_options()
elif test == 'q':
break
elif test == 'S':
config_file = self.save()
break
else:
print(' ... wrong key, try again ...')
print('')
if test == 'S':
print('Configuration has been saved to file "{}"'.format(config_file))
@property
def has_materials_project_access(self):
return self.materials_project_configuration is not None
def setup_package_options(self):
self.package_options = self.DEFAULT_PACKAGE_OPTIONS
print('Choose between the following strategies : ')
strategies = list(strategies_class_lookup.keys())
for istrategy, strategy in enumerate(strategies):
print(' <{}> : {}'.format(str(istrategy + 1), strategy))
test = input(' ... ')
self.package_options['default_strategy'] = {'strategy': strategies[int(test) - 1], 'strategy_options': {}}
strategy_class = strategies_class_lookup[strategies[int(test) - 1]]
if len(strategy_class.STRATEGY_OPTIONS) > 0:
for option, option_dict in strategy_class.STRATEGY_OPTIONS.items():
while True:
print(' => Enter value for option "{}" '
'(<ENTER> for default = {})\n'.format(option,
str(option_dict['default'])))
print(' Valid options are :\n')
print(' {}'.format(option_dict['type'].allowed_values))
test = input(' Your choice : ')
if test == '':
self.package_options['default_strategy']['strategy_options'][option] = option_dict['type'](strategy_class.STRATEGY_OPTIONS[option]['default'])
break
try:
self.package_options['default_strategy']['strategy_options'][option] = option_dict['type'](test)
break
except ValueError:
print('Wrong input for option {}'.format(option))
def package_options_description(self):
out = 'Package options :\n'
out += ' - Maximum distance factor : {:.4f}\n'.format(self.package_options['default_max_distance_factor'])
out += ' - Default strategy is "{}" :\n'.format(self.package_options['default_strategy']['strategy'])
strategy_class = strategies_class_lookup[self.package_options['default_strategy']['strategy']]
out += '{}\n'.format(strategy_class.STRATEGY_DESCRIPTION)
out += ' with options :\n'
for option, option_dict in strategy_class.STRATEGY_OPTIONS.items():
out += ' - {} : {}\n'.format(option,
self.package_options['default_strategy']['strategy_options'][option])
return out
def save(self, root_dir=None):
if root_dir is None:
home = expanduser("~")
root_dir = '{}/.chemenv'.format(home)
if not exists(root_dir):
makedirs(root_dir)
config_dict = {'package_options': self.package_options}
config_file = '{}/config.json'.format(root_dir)
if exists(config_file):
test = input('Overwrite existing configuration ? (<Y> + <ENTER> to confirm)')
if test != 'Y':
print('Configuration not saved')
return config_file
f = open(config_file, 'w')
json.dump(config_dict, f)
f.close()
print('Configuration saved')
return config_file
@classmethod
def auto_load(cls, root_dir=None):
if root_dir is None:
home = expanduser("~")
root_dir = '{}/.chemenv'.format(home)
config_file = '{}/config.json'.format(root_dir)
try:
f = open(config_file, 'r')
config_dict = json.load(f)
f.close()
return ChemEnvConfig(package_options=config_dict['package_options'])
except IOError:
print('Unable to load configuration from file "{}" ...'.format(config_file))
print(' ... loading default configuration')
return ChemEnvConfig()
|
czhengsci/pymatgen
|
pymatgen/analysis/chemenv/utils/chemenv_config.py
|
Python
|
mit
| 7,410
|
[
"pymatgen"
] |
b9c81140d2eecd51597229589fc9dc4b1532735fd5d1ace7dc943d9096916d92
|
# -*- coding: utf-8 -*-
"""
Strongly connected components.
"""
# Copyright (C) 2004-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
__authors__ = "\n".join(['Eben Kenah',
'Aric Hagberg (hagberg@lanl.gov)'
'Christopher Ellison',
'Ben Edwards (bedwards@cs.unm.edu)'])
__all__ = ['number_strongly_connected_components',
'strongly_connected_components',
'strongly_connected_component_subgraphs',
'is_strongly_connected',
'strongly_connected_components_recursive',
'kosaraju_strongly_connected_components',
'condensation']
def strongly_connected_components(G):
"""Return nodes in strongly connected components of graph.
Parameters
----------
G : NetworkX Graph
An directed graph.
Returns
-------
comp : list of lists
A list of nodes for each component of G.
The list is ordered from largest connected component to smallest.
See Also
--------
connected_components
Notes
-----
Uses Tarjan's algorithm with Nuutila's modifications.
Nonrecursive version of algorithm.
References
----------
.. [1] Depth-first search and linear graph algorithms, R. Tarjan
SIAM Journal of Computing 1(2):146-160, (1972).
.. [2] On finding the strongly connected components in a directed graph.
E. Nuutila and E. Soisalon-Soinen
Information Processing Letters 49(1): 9-14, (1994)..
"""
preorder={}
lowlink={}
scc_found={}
scc_queue = []
scc_list=[]
i=0 # Preorder counter
for source in G:
if source not in scc_found:
queue=[source]
while queue:
v=queue[-1]
if v not in preorder:
i=i+1
preorder[v]=i
done=1
v_nbrs=G[v]
for w in v_nbrs:
if w not in preorder:
queue.append(w)
done=0
break
if done==1:
lowlink[v]=preorder[v]
for w in v_nbrs:
if w not in scc_found:
if preorder[w]>preorder[v]:
lowlink[v]=min([lowlink[v],lowlink[w]])
else:
lowlink[v]=min([lowlink[v],preorder[w]])
queue.pop()
if lowlink[v]==preorder[v]:
scc_found[v]=True
scc=[v]
while scc_queue and preorder[scc_queue[-1]]>preorder[v]:
k=scc_queue.pop()
scc_found[k]=True
scc.append(k)
scc_list.append(scc)
else:
scc_queue.append(v)
scc_list.sort(key=len,reverse=True)
return scc_list
def kosaraju_strongly_connected_components(G,source=None):
"""Return nodes in strongly connected components of graph.
Parameters
----------
G : NetworkX Graph
An directed graph.
Returns
-------
comp : list of lists
A list of nodes for each component of G.
The list is ordered from largest connected component to smallest.
See Also
--------
connected_components
Notes
-----
Uses Kosaraju's algorithm.
"""
components=[]
G=G.reverse(copy=False)
post=list(nx.dfs_postorder_nodes(G,source=source))
G=G.reverse(copy=False)
seen={}
while post:
r=post.pop()
if r in seen:
continue
c=nx.dfs_preorder_nodes(G,r)
new=[v for v in c if v not in seen]
seen.update([(u,True) for u in new])
components.append(new)
components.sort(key=len,reverse=True)
return components
def strongly_connected_components_recursive(G):
"""Return nodes in strongly connected components of graph.
Recursive version of algorithm.
Parameters
----------
G : NetworkX Graph
An directed graph.
Returns
-------
comp : list of lists
A list of nodes for each component of G.
The list is ordered from largest connected component to smallest.
See Also
--------
connected_components
Notes
-----
Uses Tarjan's algorithm with Nuutila's modifications.
References
----------
.. [1] Depth-first search and linear graph algorithms, R. Tarjan
SIAM Journal of Computing 1(2):146-160, (1972).
.. [2] On finding the strongly connected components in a directed graph.
E. Nuutila and E. Soisalon-Soinen
Information Processing Letters 49(1): 9-14, (1994)..
"""
def visit(v,cnt):
root[v]=cnt
visited[v]=cnt
cnt+=1
stack.append(v)
for w in G[v]:
if w not in visited: visit(w,cnt)
if w not in component:
root[v]=min(root[v],root[w])
if root[v]==visited[v]:
component[v]=root[v]
tmpc=[v] # hold nodes in this component
while stack[-1]!=v:
w=stack.pop()
component[w]=root[v]
tmpc.append(w)
stack.remove(v)
scc.append(tmpc) # add to scc list
scc=[]
visited={}
component={}
root={}
cnt=0
stack=[]
for source in G:
if source not in visited:
visit(source,cnt)
scc.sort(key=len,reverse=True)
return scc
def strongly_connected_component_subgraphs(G):
"""Return strongly connected components as subgraphs.
Parameters
----------
G : NetworkX Graph
A graph.
Returns
-------
glist : list
A list of graphs, one for each strongly connected component of G.
See Also
--------
connected_component_subgraphs
Notes
-----
The list is ordered from largest strongly connected component to smallest.
Graph, node, and edge attributes are copied to the subgraphs.
"""
cc=strongly_connected_components(G)
graph_list=[]
for c in cc:
graph_list.append(G.subgraph(c).copy())
return graph_list
def number_strongly_connected_components(G):
"""Return number of strongly connected components in graph.
Parameters
----------
G : NetworkX graph
A directed graph.
Returns
-------
n : integer
Number of strongly connected components
See Also
--------
connected_components
Notes
-----
For directed graphs only.
"""
return len(strongly_connected_components(G))
def is_strongly_connected(G):
"""Test directed graph for strong connectivity.
Parameters
----------
G : NetworkX Graph
A directed graph.
Returns
-------
connected : bool
True if the graph is strongly connected, False otherwise.
See Also
--------
strongly_connected_components
Notes
-----
For directed graphs only.
"""
if not G.is_directed():
raise nx.NetworkXError("""Not allowed for undirected graph G.
See is_connected() for connectivity test.""")
if len(G)==0:
raise nx.NetworkXPointlessConcept(
"""Connectivity is undefined for the null graph.""")
return len(strongly_connected_components(G)[0])==len(G)
def condensation(G, scc=None):
"""Returns the condensation of G.
The condensation of G is the graph with each of the strongly connected
components contracted into a single node.
Parameters
----------
G : NetworkX DiGraph
A directed graph.
scc: list (optional, default=None)
A list of strongly connected components. If provided, the elements in
`scc` must partition the nodes in `G`. If not provided, it will be
calculated as scc=nx.strongly_connected_components(G).
Returns
-------
C : NetworkX DiGraph
The condensation of G. The node labels are integers corresponding
to the index of the component in the list of strongly connected
components.
Notes
-----
After contracting all strongly connected components to a single node,
the resulting graph is a directed acyclic graph.
"""
if scc is None:
scc = nx.strongly_connected_components(G)
mapping = {}
C = nx.DiGraph()
for i,component in enumerate(scc):
for n in component:
mapping[n] = i
C.add_nodes_from(range(len(scc)))
for u,v in G.edges():
if mapping[u] != mapping[v]:
C.add_edge(mapping[u],mapping[v])
return C
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/networkx/algorithms/components/strongly_connected.py
|
Python
|
agpl-3.0
| 8,874
|
[
"VisIt"
] |
05fd753161e24732e43cc4061b937811ba8986257f59ebcb1e80ea9a8b4d03aa
|
## ## ## qepy.py v.0.1.2
## ## ## Created: 06/12/2014 - KDP
import os
import commands
import numpy as np
import cPickle as pickle
from inspect import ismethod
from subprocess import Popen, PIPE, call
from shutil import rmtree, copytree, copy2
from os.path import isdir
import runlist as runl # parameter list for running job
import pwxlist as pwxl # parameter lists of pwx
import phxlist as phxl # parameter lists of phx
import q2rlist as q2rl # parameter lists of q2r
from qepyrc import * # configuration file
from qepy_exceptions import * # exceptions definitions
class pwx:
"""
qepy.pwx()
A Quantum Espresso pw.x instance
"""
def __init__(self, qedir, **kwargs):
self.quote_control_params = {}
self.control_params = {}
self.paren_system_params = {}
self.system_params = {}
self.electrons_params = {}
self.paren_electrons_params = {}
self.ions_params = {}
self.cell_params = {}
self.atomic_species_params = {}
self.atomic_positions_params = {}
self.k_points_params = {}
self.cell_parameters_params = {}
self.constraints_params = {}
self.occupations_params = {}
self.atomic_forces_params = {}
self.run_params = {}
self.parallel_params = {}
for key in pwxl.quote_control_keys:
self.quote_control_params[key] = None
for key in pwxl.control_keys:
self.control_params[key] = None
for key in pwxl.paren_system_keys:
self.paren_system_params[key] = None
for key in pwxl.system_keys:
self.system_params[key] = None
for key in pwxl.electrons_keys:
self.electrons_params[key] = None
for key in pwxl.paren_electrons_keys:
self.paren_electrons_params[key] = None
for key in pwxl.ions_keys:
self.ions_params[key] = None
for key in pwxl.cell_keys:
self.cell_params[key] = None
for key in pwxl.atomic_species_keys:
self.atomic_species_params[key] = None
for key in pwxl.atomic_positions_keys:
self.atomic_positions_params[key] = None
for key in pwxl.k_points_keys:
self.k_points_params[key] = None
for key in pwxl.cell_parameters_keys:
self.cell_parameters_params[key] = None
for key in pwxl.constraints_keys:
self.constraints_params[key] = None
for key in pwxl.occupations_keys:
self.occupations_params[key] = None
for key in pwxl.atomic_forces_keys:
self.atomic_forces_params[key] = None
for key in runl.run_keys:
self.run_params[key] = None
for key in runl.parallel_keys:
self.parallel_params[key] = None
for key in QEPYRC:
if self.run_params.has_key(key):
self.run_params[key] = QEPYRC[key]
# Load any previous settings
qedir = str(qedir).rstrip('/')
if isdir(qedir):
self._load_object(qedir + '/.qepy.pkl')
# Set new directory settings
usrHome = os.path.expanduser('~')
self.qedir = qedir.replace('~', usrHome)
self.cwd = os.getcwd()
if self.quote_control_params['title'] is not None:
self.title = self.quote_control_params['title']
self._set(**kwargs)
def __enter__(self):
"""
On enter, if directory does not exist, create it.
Change into directory.
Syntax:
with pwx() as calc:
try:
calc.my_command()
except (QepyException):
do something
"""
## Create Paths
qedir = str(self.qedir).rstrip('/')
outdir = self.quote_control_params['outdir'].strip(' \'\"\t\n\r/.')
if qedir.replace('./', '') != os.getcwd().rstrip('/').split('/')[-1]: # Check if already in target directory
if not isdir(qedir):
os.mkdir('{0}'.format(qedir))
os.mkdir('{0}/{1}'.format(qedir, outdir))
os.chdir(qedir)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
On exit, change back to original directory.
"""
self._save_object('./.qepy.pkl')
os.chdir(self.cwd)
return False # Allows exception to propogate out
def _set(self, **kwargs):
"""
Set kwargs parameters to those defined by pwxlist.
"""
for key in kwargs:
if self.quote_control_params.has_key(key):
self.quote_control_params[key] = '\'{0}\''.format(kwargs[key].strip('\'\" '))
elif self.control_params.has_key(key):
self.control_params[key] = kwargs[key]
elif self.paren_system_params.has_key(key):
self.paren_system_params[key] = kwargs[key]
elif self.system_params.has_key(key):
self.system_params[key] = kwargs[key]
elif self.electrons_params.has_key(key):
self.electrons_params[key] = kwargs[key]
elif self.paren_electrons_params.has_key(key):
self.paren_electrons_params[key] = kwargs[key]
elif self.ions_params.has_key(key):
self.ions_params[key] = kwargs[key]
elif self.cell_params.has_key(key):
self.cell_params[key] = kwargs[key]
elif self.atomic_species_params.has_key(key):
self.atomic_species_params[key] = kwargs[key]
elif self.atomic_positions_params.has_key(key):
self.atomic_positions_params[key] = kwargs[key]
elif self.k_points_params.has_key(key):
self.k_points_params[key] = kwargs[key]
elif self.cell_parameters_params.has_key(key):
self.cell_parameters_params[key] = kwargs[key]
elif self.constraints_params.has_key(key):
self.constraints_params[key] = kwargs[key]
elif self.occupations_params.has_key(key):
self.occupations_params[key] = kwargs[key]
elif self.atomic_forces_params.has_key(key):
self.atomic_forces_params[key] = kwargs[key]
elif self.run_params.has_key(key):
self.run_params[key] = kwargs[key]
elif self.parallel_params.has_key(key):
self.parallel_params[key] = kwargs[key]
else:
raise TypeError('Parameter not defined: '+ key)
self.title = self.quote_control_params['title']
## ## ## JOB MANAGEMENT FUNCTIONS ## ## ##
def _job_in_queue(self):
if not os.path.isfile('jobid'):
return False
else:
# Get jobid
jobid = open('jobid').readline().strip()
# See if jobid is in the queue
jobids_in_queue = commands.getoutput('qselect').split('\n')
if jobid in jobids_in_queue:
status, output = commands.getstatusoutput('qstat {0}'.format(jobid))
if status == 0:
lines = output.split('\n')
fields = lines[2].split()
job_status = fields[4]
if job_status == 'C':
return False
else:
return True
else:
return False
def _local(self, **kwargs):
"""
Run the calculation through command line
"""
inFileName = self.title.strip('\'\"') + '.in'
outFileName = self.title.strip('\'\"') + '.out'
command = ''
if self.run_params['ppn'] is not 1: # allow mpirun if multiple cores requested
command += 'mpirun -np {0} '.format(self.run_params['ppn'])
command += self.run_params['pw.x']
for key, value in self.parallel_params.items():
if value is not None:
command += ' -{0} {1} '.format(str(key), str(value))
command += ' < {0} > {1}'.format(inFileName, outFileName)
p = Popen([command], stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
p.wait() # wait for comannd to finish
out, err = p.communicate()
if err != '':
raise Exception(err)
def _load_object(self, fname):
"""
If save object exists, then load and set attributes.
"""
if os.path.isfile(fname):
with open(fname, 'rb') as input:
old_object = pickle.load(input)
for i in dir(old_object):
if not i.startswith('__') and not ismethod(getattr(old_object,i)):
setattr(self, i, getattr(old_object, i))
def _save_object(self, fname):
"""
Save object attributes to possible load later.
"""
with open(fname, 'wb') as output:
pickle.dump(self, output, -1)
## ## ## USER ACCESS FUNCTIONS ## ## ##
def calculate(self, **kwargs):
inFile = self.title.strip('\'\"') + '.in'
outFile = self.title.strip('\'\"') + '.out'
if not os.path.isfile(inFile):
## Create input file
self._qeControl()
self._qeSystem()
self._qeElectrons()
self._qeIons()
self._qeCellParams()
self._qeAtomicSpecies()
self._qeAtomicPositions()
self._qeKpoints()
self._qeCellParameters()
self._qeOccupations()
self._qeAtomicForces()
for key in kwargs:
if self.run_params.has_key(key):
self.run_params[key] = kwargs[key]
elif self.parallel_params.has_key(key):
self.parallel_params[key] = kwargs[key]
else:
raise TypeError('Parameter not defined: '+ key)
if self.run_params['jobname'] == None:
self.run_params['jobname'] = self.title.strip('\'\"')
## Submit/Run job
if os.path.isfile('CRASH'):
raise QepyCrash('Quantum Espresso has crashed')
if self._job_in_queue(): # If in queue, exit
raise QepyRunning()
elif os.path.isfile(outFile):
if self._check_complete(): #if already done, exit
pass
else:
raise QepyNotComplete('Job not found in queue, energy not found in out file')
elif os.path.isfile('jobid'): # jobid file already exists
raise QepyNotComplete('Found jobid file, but not running and no output file')
else: # If not in queue and not done, run
if self.run_params['mode'] == 'queue':
_qeSubmission(self, 'pwx')
p = Popen(['qsub', 'pwxrun.sh'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if out == '' or err != '':
raise Exception(err)
else:
f = open('jobid','w')
f.write(out)
f.close()
raise QepySubmitted(out)
elif self.run_params['mode'] == 'local':
self._local(**kwargs)
def _check_complete(self):
fileName = self.title.strip('\'\"') + '.out'
outFile = open(str(fileName), 'r')
found = False
for line in outFile:
if 'JOB DONE' in line:
found = True
break
return found
def _energy(self):
fileName = self.title.strip('\'\"') + '.out'
outFile = open(str(fileName), 'r')
while True:
myString = outFile.readline()
if myString.find('!') > -1:
outFile.close()
return float(myString.split()[4])
elif myString == '':
outFile.close()
break
return False
#-- END _energy --#
def _forces(self):
fileName = self.title.strip('\'\"') + '.out'
forces = []
if type(self.control_params['tprnfor']) is not str:
print("Forces not calculated. Set 'tprnfor' = '.true.' in input file to calculate")
return False
if self.control_params['tprnfor'].strip().lower() == '.true.':
with open(str(fileName), 'r') as outFile:
while True:
myString = outFile.readline().lower()
if myString.find('forces acting on atoms') > -1:
while True:
forceString = outFile.readline().lower()
if forceString.find('atom') > -1:
forces.append([forceString.split()[6],
forceString.split()[7],
forceString.split()[8]])
elif forceString.find('total force') > -1:
forces.append(forceString.split()[3])
return np.array(forces)
elif forceString == '':
break
elif myString == '':
break
return False
else:
print("Forces not calculated. Set tprnfor == '.true.' in input file to calculate.")
return False
#-- END _forces --#
def _stress_pressure(self):
fileName = self.title.strip('\'\"') + '.out'
stress = []
pressure = []
totalP = None
if not isinstance(self.control_params['tstress'], str):
print("Stress not calculated. set 'tstress' = '.true.' in input file")
return False
if self.control_params['tstress'].strip().lower() == '.true.':
with open(str(fileName), 'r') as outFile:
while True:
myString = outFile.readline().lower()
if myString.find('entering subroutine stress') > -1:
while True:
spString = outFile.readline().lower()
if spString.find('total stress') > -1:
totalP = spString.split()[5]
for i in range(3):
spString = outFile.readline().lower()
stress.append([spString.split()[0],
spString.split()[1],
spString.split()[2]])
pressure.append([spString.split()[3],
spString.split()[4],
spString.split()[5]])
return np.array(stress),np.array(pressure),totalP
elif spString == '':
break
elif myString == '':
break
return False
else:
print("Pressure not calculated. Set tstress == '.true.' in input file to calculate.")
return False
#-- END _stress_pressure --#
def _structure(self):
fileName = self.title.strip('\'\"') + '.out'
alat = 0.0
latVec = np.zeros((3, 3))
pos = np.zeros((self.system_params['nat'], 3))
with open(str(fileName), 'r') as outFile:
while True:
myString = outFile.readline()
if myString.find('CELL_PARAMETERS') > -1:
myString = myString.split()[2] # Get alat
alat = float(myString.strip('() \t'))
myString = outFile.readline().lower().strip().split()
latVec[0,:] = float(myString[0]), float(myString[1]), float(myString[2])
myString = outFile.readline().lower().split()
latVec[1,:] = float(myString[0]), float(myString[1]), float(myString[2])
myString = outFile.readline().lower().split()
latVec[2,:] = float(myString[0]), float(myString[1]), float(myString[2])
elif myString.find('ATOMIC_POSITIONS') > -1:
for i in range(self.system_params['nat']):
myString = outFile.readline().lower().strip().split()
pos[i,:] = float(myString[1]), float(myString[2]), float(myString[3])
elif myString == '':
break
return alat, latVec, pos
#-- END _structure --#
def get_stress_pressure(self, type='all'):
'''
Returns the pressure from the output file. Requires tstress to be \'.true.\'
To return only stress or pressure set type='stress' or 'pressure'
'''
if os.path.isfile('CRASH'):
raise QepyCrash('Quantum Espresso has crashed')
elif not os.path.isfile('jobid'):
raise QepyNotComplete('Job not submitted')
elif self._job_in_queue():
raise QepyNotComplete('Job still in queue')
elif not self._check_complete():
raise QepyException('Unknown error')
else:
output = self._stress_pressure()
if output is false:
raise QepyException('Unknown error')
if type.lower() == 'all':
return output[0],output[1],output[2]
elif type.lower() == 'stress':
return output[0]
elif type.lower() == 'pressure':
return output[1],output[2]
else:
print("Error. Set type = 'all', 'stress', or 'pressure'")
return False
def get_forces(self):
'''
Returns the forces from the output file. Requires tprnfor to be \'.true.\'
'''
if os.path.isfile('CRASH'):
raise QepyCrash('Quantum Espresso has crashed')
elif not os.path.isfile('jobid'):
raise QepyNotComplete('Job not submitted')
elif self._job_in_queue():
raise QepyNotComplete('Job still in queue')
elif not self._check_complete():
raise QepyException('Unknown error')
else:
forces = self._forces()
return forces
def get_energy(self):
'''
Returns the energy from the output file.
'''
if os.path.isfile('CRASH'):
raise QepyCrash('Quantum Espresso has crashed')
elif not os.path.isfile('jobid'):
raise QepyNotComplete('Job not submitted')
elif self._job_in_queue():
raise QepyNotComplete('Job still in queue')
elif not self._check_complete():
raise QepyException('Unknown error')
else:
energy = self._energy()
return energy
def get_structure(self):
'''
Returns the relaxed structure from the output file.
'''
if self.quote_control_params['calculation'].strip('\'\" ').lower() \
!= 'relax' and \
self.quote_control_params['calculation'].strip('\'\" ').lower() \
!= 'vc-relax':
raise QepyException('Calculation must be either \'relax\' or \'vc-relax\'')
if os.path.isfile('CRASH'):
raise QepyCrash('Quantum Espresso has crashed')
elif not os.path.isfile('jobid'):
raise QepyNotComplete('Job not submitted')
elif self._job_in_queue():
raise QepyNotComplete('Job still in queue')
elif not self._check_complete():
raise QepyException('Unknown error')
else:
alat, latVec, pos = self._structure()
return alat, latVec, pos
def _qeControl(self):
fileName = self.title.strip('\'\"') + '.in'
inFile = open(str(fileName), 'a')
inFile.write('# This file generated by qepy\n')
inFile.write('# For more info, visit github.com/kparrish/qepy\n')
inFile.write(' &control' + '\n')
for key, val in self.quote_control_params.items():
if val is not None:
inFile.write(' {0:s}={1:s},\n'.format(str(key), str(val)))
for key, val in self.control_params.items():
if val is not None:
inFile.write(' {0:s}={1:s},\n'.format(str(key), str(val)))
inFile.write(' /' + '\n')
inFile.close()
#-- END _qeControl --#
def _qeSystem(self):
fileName = self.title.strip('\'\"') + '.in'
inFile = open(str(fileName), 'a')
inFile.write(' &system' + '\n')
for key, val in self.paren_system_params.items():
if val is not None:
if not isinstance(val[0], list):
if len(val) is not 2:
raise ValueError('Value for {0} must be len 2 or a list of items of len 2'.format(key))
elif key is 'celldm': # Have celldm default to an angstrom input
inFile.write(' {0}({1})={2},\n'.format(str(key), str(val[0]), str(val[1]/0.5291772108)))
else:
inFile.write(' {0}({1})={2},\n'.format(str(key), str(val[0]), str(val[1])))
else:
for line in range(len(val)):
if len(val) is not 2:
raise ValueError('Value for {0} must be len 2 or a list of items of len 2'.format(key))
elif key is 'celldm': # Have celldm default to an angstrom input
inFile.write(' {0}({1})={2},\n'.format(str(key), str(val[line][0]), str(val[line][1]/0.5291772108)))
else:
inFile.write(' {0}({1})={2},\n'.format(str(key), str(val[line][0]), str(val[line][1])))
for key, val in self.system_params.items():
if val is not None:
inFile.write(' {0}={1},\n'.format(str(key), str(val)))
inFile.write(' /' + '\n')
inFile.close()
#-- END _qeSystem --#
def _qeElectrons(self):
fileName = self.title.strip('\'\"') + '.in'
inFile = open(str(fileName), 'a')
inFile.write(' &electrons' + '\n')
for key, val in self.electrons_params.items():
if val is not None:
inFile.write(' {0}={1},\n'.format(str(key), str(val)))
for key, val in self.paren_electrons_params.items():
if val is not None:
if len(val) is not 2:
raise ValueError('Value for {0} must be a list or tuple of length 2'.format(key))
else:
inFile.write(' {0}({1})={2},\n'.format(str(key), str(val[0]), str(val[1])))
inFile.write(' /' + '\n')
inFile.close()
#-- END _qeElectrons --#
def _qeIons(self):
fileName = self.title.strip('\'\"') + '.in'
inFile = open(str(fileName), 'a')
inFile.write(' &ions' + '\n')
for key, val in self.ions_params.items():
if val is not None:
inFile.write(' {0}={1},\n'.format(str(key), str(val)))
inFile.write(' /' + '\n')
inFile.close()
#-- END _qeIons --#
def _qeCellParams(self):
fileName = self.title.strip('\'\"') + '.in'
inFile = open(str(fileName), 'a')
inFile.write(' &cell' + '\n')
for key, val in self.cell_params.items():
if val is not None:
inFile.write(' {0}={1},\n'.format(str(key), str(val)))
inFile.write(' /' + '\n')
inFile.close()
#-- END qeCellParams --#
def _qeAtomicSpecies(self):
fileName = self.title.strip('\'\"') + '.in'
inFile = open(str(fileName), 'a')
inFile.write('ATOMIC_SPECIES\n')
asp = self.atomic_species_params['atomic_species']
if not isinstance(asp[0], list):
if len(asp) != 3:
raise ValueError('\'atomic_species\' must be len 3 or list of items of len 3')
else:
inFile.write(' {0} {1} {2}\n'.format(str(asp[0]), str(asp[1]), str(asp[2])))
else:
for line in range(len(asp)):
if len(asp[line]) != 3:
raise ValueError('\'atomic_species\' must be len 3 or list of items of len 3')
else:
inFile.write(' {0} {1} {2}\n'.format(str(asp[line][0]), str(asp[line][1]), str(asp[line][2])))
inFile.close()
#-- END _qeAtomicSpecies --#
def _qeAtomicPositions(self):
fileName = self.title.strip('\'\"') + '.in'
inFile = open(str(fileName), 'a')
ap = self.atomic_positions_params['atomic_positions']
if ap.lower() != 'alat' and ap.lower() != 'bohr' and \
ap.lower() != 'angstrom' and ap.lower() != 'crystal':
raise ValueError('\'atomic_positions\' must be either \'alat\', \'bohr\', \'angstrom\', or \'crystal\'')
inFile.write('ATOMIC_POSITIONS {0}\n'.format(str(ap)))
apl = self.atomic_positions_params['atomic_positions_list']
if not isinstance(apl[0], list) and not isinstance(apl[0], np.ndarray):
if len(apl) != 4 and len(apl) != 7:
raise ValueError('\'atomic_species_list\' must be len 4 or 7 or list of items of len 4 or 7')
elif len(apl) == 4:
inFile.write(' {0} {1} {2} {3}\n'.format(apl[0], apl[1], apl[2], apl[3]))
else:
inFile.write(' {0} {1} {2} {3} {4} {5} {6}\n'.format(apl[0], apl[1], apl[2], apl[3], apl[4], apl[5], apl[6]))
else:
for line in range(len(apl)):
if len(apl[line]) != 4 and len(apl[line]) !=7:
raise ValueError('\'atomic_species_list\' must be len 4 or 7 or list of items of len 4 or 7')
elif len(apl[line]) == 4:
inFile.write(' {0} {1} {2} {3}\n'.format(apl[line][0], apl[line][1], apl[line][2], apl[line][3]))
else:
inFile.write(' {0} {1} {2} {3} {4} {5} {6}\n'.format(apl[line][0], apl[line][1], apl[line][2], apl[line][3], apl[line][4], apl[line][5], apl[line][6]))
inFile.close()
#-- END _qeAtomicPositions --#
def _qeKpoints(self):
fileName = self.title.strip('\'\"') + '.in'
inFile = open(str(fileName), 'a')
kpt = self.k_points_params['k_points']
inFile.write('K_POINTS {0}\n'.format(kpt))
if kpt.lower() == 'tpiba' or kpt.lower() == 'crystal' or \
kpt.lower() == 'tpiba_b' or kpt.lower() == 'crystal_b' or \
kpt.lower() == 'tpiba_c' or kpt.lower() == 'crystal_c':
kpl = self.k_points_params['k_points_list']
inFile.write(' ')
for line in range(len(kpl)):
for item in range(len(kpl[line])):
inFile.write('{0} '.format(kpl[line][item]))
inFile.write('\n')
elif kpt.lower() == 'automatic':
kpl = self.k_points_params['k_points_list']
inFile.write(' ')
for item in range(len(kpl)):
inFile.write('{0} '.format(kpl[item]))
inFile.write('\n')
elif kpt.lower() != 'gamma':
raise ValueError('\'k_points\' must be either tpiba | crystal | tpiba_b | crystal_b | tipiba_c | crystal_c | automatic | gamma')
inFile.close()
#-- END _qeKpoints --#
def _qeCellParameters(self):
fileName = self.title.strip('\'\"') + '.in'
inFile = open(str(fileName), 'a')
if self.cell_parameters_params['units'] != None:
inFile.write('CELL_PARAMETERS {{ {0} }}\n'.format(self.cell_parameters_params['units']))
if self.cell_parameters_params['v1'] is not None and \
self.cell_parameters_params['v2'] is not None and \
self.cell_parameters_params['v3'] is not None:
v1 = self.cell_parameters_params['v1']
v2 = self.cell_parameters_params['v2']
v3 = self.cell_parameters_params['v3']
if len(v1) != 3 or len(v2) != 3 or len(v3) != 3:
raise ValueError('v1, v2, and v3 must have dimensions of 3')
inFile.write(' {0:s} {1:s} {2:s}\n'.format(str(v1[0]), str(v1[1]), str(v1[2])))
inFile.write(' {0:s} {1:s} {2:s}\n'.format(str(v2[0]), str(v2[1]), str(v2[2])))
inFile.write(' {0:s} {1:s} {2:s}\n'.format(str(v3[0]), str(v3[1]), str(v3[2])))
inFile.close()
#-- END qeCellParameters --#
def _qeConstraints(self):
fileName = self.title.strip('\'\"') + '.in'
inFile = open(str(fileName), 'a')
use = False
for key, val in self.constraints_params.items():
if val is not None:
use = True
if use:
inFile.write('CONSTRAINTS\n')
inFile.write(' {0}'.format(str(self.constraints_params['nconstr'])))
if self.constraints_params['constr_tol'] is not None:
inFile.write(' {0}'.format(str(self.constraints_params['constr_tol'])))
inFile.write('\n')
constr = self.constraints_params['constr']
inFile.write(' ')
if not isinstance(constr[0], list):
for i in range(len(constr)):
inFile.write('{0} '.format(str(constr[i])))
inFile.write('\n')
else:
for i in range(len(constr)):
for j in range(len(constr[i])):
inFile.write('{0} '.format(str(constr[i][j])))
inFile.write('\n')
inFile.close()
#-- END _qeConstraints --#
def _qeOccupations(self):
fileName = self.title.strip('\'\"') + '.in'
inFile = open(str(fileName), 'a')
use = False
for key, val in self.occupations_params.items():
if val is not None:
use = True
if use:
inFile.write('OCCUPATIONS\n')
for key, val in self.occupations_params.items():
if val is not None:
if not isinstance(val[0], list):
if len(val) > 10:
raise ValueError(key + ' cannot must have len <= 10!')
inFile.write(' ')
for i in range(len(val)):
inFile.write('{0} '.format(val[i]))
inFile.write('\n')
else:
for i in range(len(val)):
if len(val[i]) > 10:
raise ValueError(key + ' nested list cannot must have len <= 10!')
inFile.write(' ')
for j in range(len(val[i])):
inFile.write('{0} '.format(val[i][j]))
inFile.write('\n')
inFile.close()
#-- END _qeOccupations --#
def _qeAtomicForces(self):
fileName = self.title.strip('\'\"') + '.in'
inFile = open(str(fileName), 'a')
use = False
for key, val in self.atomic_forces_params.items():
if val is not None:
use = True
if use:
inFile.write('ATOMIC_FORCES\n')
for key, val in self.atomic_forces_params.items():
if val is not None:
if not isinstance(val[0], list):
if len(val) != 4:
raise ValueError(key + ' must have len of 4!')
inFile.write(' ')
for i in range(len(val)):
inFile.write('{0} '.format(val[i]))
inFile.write('\n')
else:
for i in range(len(val)):
if len(val[i]) != 4:
raise ValueError(key + ' must have len of 4!')
inFile.write(' ')
for j in range(len(val[i])):
inFile.write('{0} '.format(val[i][j]))
inFile.write('\n')
inFile.close()
#-- END _qeAtomicForces --#
class phx:
"""
qepy.phx()
A Quantum Espresso ph.x instance
"""
def __init__(self, qedir, title=None, PWX=None, **kwargs):
usrHome = os.path.expanduser('~')
self.qedir = qedir.replace('~', usrHome)
self.cwd = os.getcwd()
self.title = title
self.quote_inputph_params = {}
self.inputph_params = {}
self.paren_inputph_params = {}
self.run_params = {}
self.parallel_params = {}
for key in phxl.inputph_keys:
self.inputph_params[key] = None
for key in phxl.quote_inputph_keys:
self.quote_inputph_params[key] = None
for key in phxl.paren_inputph_keys:
self.paren_inputph_params[key] = None
for key in runl.run_keys:
self.run_params[key] = None
for key in runl.parallel_keys:
self.parallel_params[key] = None
for key in QEPYRC:
if self.run_params.has_key(key):
self.run_params[key] = QEPYRC[key]
# Get dependent pwx object
if PWX is not None:
if not isinstance(PWX, pwx):
raise TypeError('PWX must be of type pwx')
else:
self.pwx = PWX
else:
self.pwx = None
self._set(**kwargs)
def __enter__(self):
"""
On enter, if directory does not exist, create it.
Change into directory.
Syntax:
with phx() as calc:
try:
calc.my_command()
except (QepyException):
do something
"""
## Create Paths
qedir = str(self.qedir).rstrip('/')
outdir = self.quote_inputph_params['outdir'].strip(' \'\"\t\n\r/.')
if qedir.replace('./', '') != os.getcwd().rstrip('/').split('/')[-1]: # Check if already in target directory
if not isdir(qedir):
os.mkdir('{0}'.format(qedir))
os.mkdir('{0}/{1}'.format(qedir, outdir))
os.chdir(qedir)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
On exit, change back to original directory.
"""
os.chdir(self.cwd)
return False # Allows exception to propogate out
def _set(self, **kwargs):
"""
Set kwargs parameters to those defined by pwxlist.
"""
for key in kwargs:
if self.quote_inputph_params.has_key(key):
self.quote_inputph_params[key] = '\'{0}\''.format(kwargs[key].strip('\'\" '))
elif self.inputph_params.has_key(key):
self.inputph_params[key] = kwargs[key]
elif self.paren_inputph_params.has_key(key):
self.paren_inputph_params[key] = kwargs[key]
elif self.run_params.has_key(key):
self.run_params[key] = kwargs[key]
elif self.parallel_params.has_key(key):
self.parallel_params[key] = kwargs[key]
else:
raise TypeError('Parameter not defined: '+ key)
def calculate(self, **kwargs):
inFile = self.title.strip('\'\"') + '.in'
outFile = self.title.strip('\'\"') + '.out'
if not os.path.isfile(inFile):
## Create input file
self._qeInputph()
for key in kwargs:
if self.run_params.has_key(key):
self.run_params[key] = kwargs[key]
elif self.parallel_params.has_key(key):
self.parallel_params[key] = kwargs[key]
else:
raise TypeError('Parameter not defined: '+ key)
if self.run_params['jobname'] == None:
self.run_params['jobname'] = self.title.strip('\'\"')
## Submit/Run job
if self._job_in_queue(): # If in queue, exit
raise QepyRunning()
elif os.path.isfile(outFile):
if self._check_complete():
pass
else:
raise QepyNotComplete('Job not found in queue, energy not found in out file')
elif os.path.isfile('jobidPH'): # jobidPH file already exists
raise QepyNotComplete('Found jobidPH file, but not running and no output file')
else: # If not in queue and not done, run
if self.run_params['mode'] == 'queue':
_qeSubmission(self, 'phx')
if self.pwx is not None: # Dependent on pwx
pwxSuccessfulSubmit = False
try:
self.pwx.calculate() # Submit pwx
except(QepySubmitted):
pwxSuccessfulSubmit = True
pass
if pwxSuccessfulSubmit is not True:
raise QepyException('ERROR: Dependent pwx did not successfully submit to queue.')
pwxfile = open('jobid') # Read jobid file
pwxsub = pwxfile.read().strip('\n\t ')
pwxfile.close()
p = Popen(['qsub -W depend=afterok:'+ pwxsub + ' phxrun.sh'], shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
else: # NOT dependent on pwx
p = Popen(['qsub', 'phxrun.sh'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if out == '' or err != '':
raise Exception(err)
else:
f = open('jobidPH','w')
f.write(out)
f.close()
raise QepySubmitted(out)
elif self.run_params['mode'] == 'local':
if self.pwx is None: # Local run cannot have dependent pwx
self._local(**kwargs)
else:
raise QepyException('ERROR: Cannot run phx in mode=\'local\' while passing a dependent pwx object.')
def _job_in_queue(self):
if not os.path.isfile('jobidPH'):
return False
else:
# Get jobidPH
jobidPH = open('jobidPH').readline().strip()
# See if jobidPH is in the queue
jobidPHs_in_queue = commands.getoutput('qselect').split('\n')
if jobidPH in jobidPHs_in_queue:
status, output = commands.getstatusoutput('qstat {0}'.format(jobidPH))
if status == 0:
lines = output.split('\n')
fields = lines[2].split()
job_status = fields[4]
if job_status == 'C':
return False
else:
return True
else:
return False
def _local(self, **kwargs):
"""
Run the calculation through command line
"""
inFileName = self.title.strip('\'\"') + '.in'
outFileName = self.title.strip('\'\"') + '.out'
command = ''
if self.run_params['ppn'] is not 1: # allow mpirun if multiple cores requested
command += 'mpirun -np {0} '.format(self.run_params['ppn'])
command += self.run_params['ph.x']
for key, value in self.parallel_params.items():
if value is not None:
command += ' -{0} {1} '.format(str(key), str(value))
command += ' < {0} > {1}'.format(inFileName, outFileName)
p = Popen([command], stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
p.wait() # wait for comannd to finish
out, err = p.communicate()
if err != '':
raise Exception(err)
def _check_complete(self):
fileName = self.title.strip('\'\"') + '.out'
outFile = open(str(fileName), 'r')
found = False
for line in outFile:
if 'JOB DONE' in line:
found = True
break
return found
def _qeInputph(self):
fileName = self.title.strip('\'\"') + '.in'
inFile = open(str(fileName), 'a')
inFile.write('# This file generated by qepy\n')
inFile.write('# For more info, visit github.com/kparrish/qepy\n')
inFile.write(' &INPUTPH' + '\n')
for key, val in self.quote_inputph_params.items():
if val is not None:
inFile.write(' {0:s}={1:s},\n'.format(str(key), str(val)))
for key, val in self.inputph_params.items():
if val is not None:
inFile.write(' {0:s}={1:s},\n'.format(str(key), str(val)))
for key, val in self.paren_inputph_params.items():
if val is not None:
if not isinstance(val[0], list):
if len(val) is not 2:
raise ValueError('Value for {0} must be a list or tuple of length 2'.format(key))
else:
inFile.write(' {0}({1})={2},\n'.format(str(key), str(val[0]), str(val[1])))
else:
for i in range(len(val)):
if len(val[i]) is not 2:
raise ValueError('Value for {0} must be a list or tuple of length 2'.format(key))
else:
inFile.write(' {0}({1})={2},\n'.format(str(key), str(val[i][0]), str(val[i][1])))
inFile.write(' /' + '\n')
inFile.close()
#-- END _qeInputph --#
class q2r:
"""
qepy.q2r()
A Quantum Espresso q2r.x instance
"""
def __init__(self, qedir, title=None, PHX=None, **kwargs):
usrHome = os.path.expanduser('~')
self.qedir = qedir.replace('~', usrHome)
self.cwd = os.getcwd()
self.title = title
self.quote_input_params = {}
self.run_params = {}
self.parallel_params = {}
for key in q2rl.quote_input_keys:
self.quote_input_params[key] = None
for key in runl.run_keys:
self.run_params[key] = None
for key in runl.parallel_keys:
self.parallel_params[key] = None
for key in QEPYRC:
if self.run_params.has_key(key):
self.run_params[key] = QEPYRC[key]
# Get dependent pwx object
if PHX is not None:
if not isinstance(PHX, phx):
raise TypeError('PHX must be of type phx')
else:
self.phx = PHX
else:
self.phx = None
self._set(**kwargs)
def __enter__(self):
"""
On enter, if directory does not exist, create it.
Change into directory.
Syntax:
with q2r() as calc:
try:
calc.my_command()
except (QepyException):
do something
"""
## Create Paths
qedir = str(self.qedir).rstrip('/')
if qedir.replace('./', '') != os.getcwd().rstrip('/').split('/')[-1]: # Check if already in target directory
if not isdir(qedir):
os.mkdir('{0}'.format(qedir))
os.chdir(qedir)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
On exit, change back to original directory.
"""
os.chdir(self.cwd)
return False # Allows exception to propogate out
def _set(self, **kwargs):
"""
Set kwargs parameters to those defined by q2rlist.
"""
for key in kwargs:
if self.quote_input_params.has_key(key):
self.quote_input_params[key] = '\'{0}\''.format(kwargs[key].strip('\'\" '))
elif self.run_params.has_key(key):
self.run_params[key] = kwargs[key]
elif self.parallel_params.has_key(key):
self.parallel_params[key] = kwargs[key]
else:
raise TypeError('Parameter not defined: '+ key)
def calculate(self, **kwargs):
inFile = self.title.strip('\'\"') + '.in'
outFile = self.title.strip('\'\"') + '.out'
if not os.path.isfile(inFile):
## Create input file
self._qeInput()
for key in kwargs:
if self.run_params.has_key(key):
self.run_params[key] = kwargs[key]
elif self.parallel_params.has_key(key):
self.parallel_params[key] = kwargs[key]
else:
raise TypeError('Parameter not defined: '+ key)
if self.run_params['jobname'] == None:
self.run_params['jobname'] = self.title.strip('\'\"')
## Submit/Run job
if self._job_in_queue(): # If in queue, exit
raise QepyRunning()
elif os.path.isfile(outFile):
if self._check_complete():
pass
else:
raise QepyNotComplete('Job not found in queue, energy not found in out file')
elif os.path.isfile('jobidQ2R'): # jobidQ2R file already exists
raise QepyNotComplete('Found jobidQ2R file, but not running and no output file')
else: # If not in queue and not done, run
if self.run_params['mode'] == 'queue':
_qeSubmission(self, 'q2r')
if self.phx is not None: # Dependent on pwx
phxSuccessfulSubmit = False
try:
self.phx.calculate() # Submit pwx
except(QepySubmitted):
phxSuccessfulSubmit = True
pass
if phxSuccessfulSubmit is not True:
raise QepyException('ERROR: Dependent phx did not successfully submit to queue.')
phxfile = open('jobidPH') # Read jobid file
phxsub = phxfile.read().strip('\n\t ')
phxfile.close()
p = Popen(['qsub -W depend=afterok:'+ phxsub + ' q2rrun.sh'], shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
else: # NOT dependent on phx
p = Popen(['qsub', 'q2rrun.sh'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
if out == '' or err != '':
raise Exception(err)
else:
f = open('jobidQ2R','w')
f.write(out)
f.close()
raise QepySubmitted(out)
elif self.run_params['mode'] == 'local':
if self.phx is None: # Local run cannot have dependent phx
self._local(**kwargs)
else:
raise QepyException('ERROR: Cannot run q2r in mode=\'local\' while passing a dependent phx object.')
def _job_in_queue(self):
if not os.path.isfile('jobidQ2R'):
return False
else:
# Get jobidQ2R
jobidQ2R = open('jobidQ2R').readline().strip()
# See if jobidQ2R is in the queue
jobidQ2Rs_in_queue = commands.getoutput('qselect').split('\n')
if jobidQ2R in jobidQ2Rs_in_queue:
status, output = commands.getstatusoutput('qstat {0}'.format(jobidQ2R))
if status == 0:
lines = output.split('\n')
fields = lines[2].split()
job_status = fields[4]
if job_status == 'C':
return False
else:
return True
else:
return False
def _local(self, **kwargs):
"""
Run the calculation through command line
"""
inFileName = self.title.strip('\'\"') + '.in'
outFileName = self.title.strip('\'\"') + '.out'
command = ''
if self.run_params['ppn'] is not 1: # allow mpirun if multiple cores requested
command += 'mpirun -np {0} '.format(self.run_params['ppn'])
command += self.run_params['q2r.x']
for key, value in self.parallel_params.items():
if value is not None:
command += ' -{0} {1} '.format(str(key), str(value))
command += ' < {0} > {1}'.format(inFileName, outFileName)
p = Popen([command], stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
p.wait() # wait for comannd to finish
out, err = p.communicate()
if err != '':
raise Exception(err)
def _check_complete(self):
fileName = self.title.strip('\'\"') + '.out'
outFile = open(str(fileName), 'r')
found = False
for line in outFile:
if 'JOB DONE' in line:
found = True
break
return found
def _qeInput(self):
fileName = self.title.strip('\'\"') + '.in'
inFile = open(str(fileName), 'a')
inFile.write('# This file generated by qepy\n')
inFile.write('# For more info, visit github.com/kparrish/qepy\n')
inFile.write(' &input' + '\n')
for key, val in self.quote_input_params.items():
if val is not None:
inFile.write(' {0:s}={1:s},\n'.format(str(key), str(val)))
inFile.write(' /' + '\n')
inFile.close()
#-- END _qeInput --#
def _qeSubmission(self, executable):
inFileName = self.title.strip('\'\"') + '.in'
outFileName = self.title.strip('\'\"') + '.out'
if executable == 'pwx':
runFile = open('pwxrun.sh', 'w')
elif executable == 'phx':
runFile = open('phxrun.sh', 'w')
elif executable == 'q2r':
runFile = open('q2rrun.sh', 'w')
runFile.write('#!/bin/bash' + '\n')
runFile.write('#PBS {0}\n'.format(str(self.run_params['options'])))
runFile.write('#PBS -l nodes={0}:ppn={1}\n'.format(str(self.run_params['nodes']), self.run_params['ppn']))
runFile.write('#PBS -l walltime={0}:00:00\n'.format(str(self.run_params['walltime'])))
runFile.write('#PBS -l mem={0}\n'.format(str(self.run_params['mem'])))
runFile.write('#PBS -l vmem={0}\n'.format(str(self.run_params['vmem'])))
runFile.write('#PBS -N {0}\n'.format(str(self.run_params['jobname'])))
runFile.write('cd $PBS_O_WORKDIR\n')
runFile.write('pwd\n')
runFile.write('\n')
if executable == 'pwx':
runFile.write('mpirun -np {0} {1} '.format(str(self.run_params['ppn']), str(self.run_params['pw.x'])))
elif executable == 'phx':
runFile.write('mpirun -np {0} {1} '.format(str(self.run_params['ppn']), str(self.run_params['ph.x'])))
elif executable == 'q2r':
runFile.write('mpirun -np {0} {1} '.format(str(self.run_params['ppn']), str(self.run_params['q2r.x'])))
for key, value in self.parallel_params.items():
if value is not None:
runFile.write('-{0} {1} '.format(str(key), str(value)))
runFile.write('< {0} > {1}\n'.format(str(inFileName), str(outFileName)))
runFile.close()
#-- END _qeSubmission --#
|
kparrish/qepy
|
qepy/qepy.py
|
Python
|
gpl-2.0
| 41,365
|
[
"CRYSTAL",
"Quantum ESPRESSO",
"VisIt"
] |
6e56cac78a5665f8751b8037a898bf06880aa7e34887791ec896e20201acf7e2
|
# -*- coding: utf-8 -*-
from setuptools import setup
def local_version(version):
# https://github.com/pypa/setuptools_scm/issues/342
return ""
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="mpcontribs-io",
author="Patrick Huck",
author_email="phuck@lbl.gov",
description="MPContribs I/O Library",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/materialsproject/MPContribs/tree/master/mpcontribs-io",
packages=["mpcontribs.io.core", "mpcontribs.io.archie"],
install_requires=["archieml", "ipython", "pandas", "plotly", "pymatgen"],
license="MIT",
zip_safe=False,
include_package_data=True,
use_scm_version={
"root": "..",
"relative_to": __file__,
"local_scheme": local_version,
},
setup_requires=["setuptools_scm"],
)
|
materialsproject/MPContribs
|
mpcontribs-io/setup.py
|
Python
|
mit
| 907
|
[
"pymatgen"
] |
0de6c62f037aab796b87c2924c0a9d49b75c08092d1e425ae4981e0b89506ff9
|
# -*- coding:Utf-8 -*-
"""
.. currentmodule:: pylayers.network.network
Node Class
==========
.. autosummary::
:toctree: generated/
Node.__init__
Node.randomMAC
Network Class
==============
.. autosummary::
:toctree: generated/
Network.__init__
Network.__repr__
Network creation
----------------
.. autosummary::
:toctree: generated/
Network.add_devices
Network.create
Network._get_edges_typ
Network._get_grp
Network._get_llink
Network._get_wstd
Network._get_SubNet
Network._connect
Network._init_PN
Network attributes queries
--------------------------
.. autosummary::
:toctree: generated/
Network.get_pos
Network.get_orient
Network.get_pos_est
Network.overview
Network.haspe
Network.pp
Network update
--------------
.. autosummary::
:toctree: generated/
Network.update_pos
Network.update_orient
Network.update_edges
Network.update_PN
Network.compute_LDPs
Network.update_LDPs
Network Utilities
-----------------
.. autosummary::
:toctree: generated/
Network.perm
Network.combi
Network.Gen_tuple
Network.dist_edge
Network.show
Network save
------------
.. autosummary::
:toctree: generated/
Network.csv_save
Network.init_save
Network.mat_save
Network.txt_save
Network.loc_save
Network.pyray_save
Network.loc_save
Network.ini_save
PNetwork Class
==============
SimPy Process compliant version of the Network class
.. autosummary::
:toctree: generated/
PNetwork.__init__
PNetwork.run
"""
#####################################################################
#This file is part of Network.
#Foobar is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#Foobar is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with Foobar. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------
#authors :
#Nicolas AMIOT : nicolas.amiot@univ-rennes1.fr
#Bernard UGUEN : bernard.uguen@univ-rennes1.fr
#####################################################################
import numpy as np
import scipy as sp
import networkx as nx
import itertools
import pickle as pk
import pkgutil
from copy import deepcopy
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
#import Tkinter as tk
import ConfigParser
import copy
import pdb
#from PyLayers.Network.Node import Node
import pylayers.util.pyutil as pyu
from pylayers.network.emsolver import EMSolver
from pylayers.network.show import ShowNet,ShowTable
#from pylayers.util.pymysqldb import Database
import pylayers.util.pyutil as pyu
from pylayers.util.project import *
from pylayers.util.utilnet import str2bool
import time
import pylayers.util.pyutil as pyu
from SimPy.SimulationRT import Process,hold
import pprint
import select
import sys
try:
from mayavi import mlab
from tvtk.tools import visual
except:
print 'mayavi not installed'
# How to take into account 1 specific key specifique for 1 MultiGraph
# MULTIGRAPH !!!! G.add_edge(10,11,key='wifi',attr_dict=dict(Pr=0,TOA=10))
class Node(PyLayers,nx.MultiGraph):
""" Class Node
inherit from networkx.MultiGraph()
Attributes
----------
Id : float/hex/str/...
node Id
p : np.array
True position
t : time.time()
Tag time
wstd : list
available wstd of the node
PN : Network.Network
Personal vision of the Network
pos : Dictionnary
parser from Node.Node to networkx.node.pos
Method
------
RandomMac(): Generate a RAndom Mac adress
"""
def __init__(self,**kwargs):
nx.MultiGraph.__init__(self)
defaults = { 'ID':0,
'name':'',
'p':np.array(()),
't':0.,
'pe':np.array(()),
'te':0.,
'wstd':[],
'epwr':{},
'sens':{},
'typ':'ag',
'grp':'',
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
# Personnal Network init
self.ID = kwargs['ID']
self.PN = Network(owner=self.ID,PN=True)
self.PN.add_node(self.ID,dict(ID=kwargs['ID'],
name=kwargs['name'],
pe=kwargs['pe'],
te=kwargs['te'],
wstd=kwargs['wstd'],
epwr=kwargs['epwr'],
sens=kwargs['sens'],
typ=kwargs['typ'],
))
# Network init
self.add_node(self.ID,dict(ID=kwargs['ID'],
name=kwargs['name'],
PN=self.PN,
p=kwargs['p'],
pe=self.PN.node[self.ID]['pe'],
t=kwargs['t'],
wstd=kwargs['wstd'],
epwr=kwargs['epwr'],
sens=kwargs['sens'],
typ=kwargs['typ'],
grp=kwargs['grp']))
self.p = self.node[self.ID]['p']
self.pe = self.PN.node[self.ID]['pe']
self.t = self.node[self.ID]['t']
self.wstd = self.node[self.ID]['wstd']
self.epwr = self.node[self.ID]['epwr']
self.sens = self.node[self.ID]['sens']
def randomMAC(self):
""" Generate a random MAC address
Returns
-------
macadress : string
"""
mac = [ 0x00, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
return ':'.join(map(lambda x: "%02x" % x, mac))
class Network(PyLayers,nx.MultiDiGraph):
""" Network class
inherits from networkx.Graph()
Attributes
----------
wstd : dictionnary
keys = wstd
value = list of nodes id
wstde : dictionnary
keys = wstd
value = list of edges id
SubNet : dictionnary
keys = wstd
value = Subgraph of the given wstd
pos : dictionnary
keys = node id
value = node position
Methods
-------
_get_wstd(self) : Get wstd from nodes of the network
_connect(self) : Connect each node from a wireless standard
create(self) : compute get_wstd(),get_pos() and connect()
update_LDP(self,n1,n2,wstd,LDP=None,value=[]) : update Location Dependent Parameter
compute_LDP(self,wstd) : compute the LDP value thanks to a ElectroMag Solver
update_pos(self,n,p=np.array) : update node (or node list) position
get_pos(self,wstd=None) : get node positions
pp(self) : pretty print on std out all edtges informations
show(rat=None,legend=True) : Display network for all rat or specified in Rat.
"""
def __init__(self,owner='sim',EMS=EMSolver(),PN=False):
""" object constructor
Parameters
----------
owner : string
'sim' |
EMS : EMSolver
PN : Boolean
personal network activation
"""
nx.MultiDiGraph.__init__(self)
self.owner=owner
self.wstd={}
self.LDP = ['TOA','Pr']
self.SubNet={}
self.grp={}
self.EMS=EMS
self.coll_plot={}
self.pos={}
self.mat={}
self.links={}
self.relinks={}
self.idx = 0
self.lidx = 0
self.isPN=PN
def __repr__(self):
if not self.isPN:
s = 'Network information\n*******************\n'
s = s + 'number of nodes: ' + str(len(self.nodes())) +'\n'
title = '{0:7} | {1:15} |{2:7} | {3:4} | {4:17} | {5:10} | {6:10} '.format('ID', 'name', 'group', 'type', 'position (x,y,z)','antenna', 'wstd')
s = s + title + '\n' + '-'*len(title) + '\n'
subnet = self.SubNet.keys()
for sn in subnet:
for n in self.SubNet[sn].nodes():
# for compliance with simulnet and simultraj
# to be merged
try:
wstd = self.node[n]['wstd'].keys()
except:
wstd = self.node[n]['wstd']
try:
ant = self.node[n]['ant']['antenna']._filename.split('.')[0]
except:
ant=''
s = s + '{0:7} | {1:15} |{2:7} | {3:4} | {4:5.2f} {5:5.2f} {6:5.2f} | {7:10} | {8:10} '\
.format(self.node[n]['ID'][:7], self.node[n]['name'][:15],
self.node[n]['grp'][:7], self.node[n]['typ'][:4], self.node[n]['p'][0],
self.node[n]['p'][1],self.node[n]['p'][2],ant,wstd[:10]) + '\n'
# try:
# s = s + 'node ID: ' + str(self.node[n]['ID']) + '\n'
# except:
# s = s + 'node ID: ' + str(n) + '\n'
# try :
# s = s + 'wstd: ' + str(self.node[n]['wstd'].keys()) + '\n'
# except:
# s = s + 'wstd: ' + str(self.node[n]['wstd']) + '\n'
# try:
# s = s + 'grp: ' + str(self.node[n]['grp']) + '\n'
# except:
# s = s + 'type: ' + str(self.node[n]['typ']) + '\n'
# try:
# s = s + 'pos: ' + str(self.node[n]['p']) + '\n'
# except:
# pass
# s = s + '\n'
# # typ = nx.get_node_attributes(self,'typ').values()
# # nodes = np.array(nx.get_node_attributes(self,'typ').items())
# # nb_ag = len(np.where(nodes=='ag')[0])
# # nb_ap = len(np.where(nodes=='ap')[0])
# # pag=np.where(nodes=='ag')
# # pap=np.where(nodes=='ap')
# # s = s + '\n' + str(nb_ag) + ' Mobile Agents\n -------------\n'
# # s = s + 'Agents IDs : ' + str([nodes[i,0] for i in pag[0]]) +'\n'
# # s = s + '\n' + str(nb_ap) + ' Access points\n -------------\n'
# # s = s + 'number of access point : ' + '\n'
# # s = s + 'access points IDs : ' + str([nodes[i,0] for i in pap[0]]) +'\n'
# # if len(self.SubNet.keys()) != 0 :
# # s = s + '\n\nSubNetworks :' +str(self.SubNet.keys()) + '\n===========\n'
# # for sub in self.SubNet.keys():
# # s = s + '\t'+ sub + '\n' + self.SubNet[sub].__repr__() + '\n'
else:
s = 'Personnal Network of node ' +str(self.owner)+ ' information\n***************************************\n'
s = s + '{0:7} |{1:20} | {2:5} | {3:7}| {4:7}| {5:7}| {6:7}| {7:7}| {8:10}|'.format('peer','wstd', 'TOA','std TOA','tTOA', 'Pr', 'std Pr', 'tPr','visibility')
for e1,e2 in self.edges():
for r in self.edge[e1][e2].keys():
TOA = self.edge[e1][e2][r]['TOA'][0]
stdTOA = self.edge[e1][e2][r]['TOA'][1]
pr = self.edge[e1][e2][r]['Pr'][0]
stdpr = self.edge[e1][e2][r]['Pr'][1]
try :
tTOA = self.edge[e1][e2][r]['tTOA']
except:
tTOA = 'nan'
try :
tpr = self.edge[e1][e2][r]['tPr']
except:
tpr = 'nan'
vis = self.edge[e1][e2][r]['vis']
np.set_printoptions(precision=3)
s = s + '\n' + '{0:7} |{1:20} | {2:5.2f} | {3:7.2f}| {4:7}| {5:7.2f}| {6:7.2f}| {7:7}| {8:10}|'.format(e2 ,r ,TOA ,stdTOA ,tTOA ,pr , stdpr ,tpr, vis)
return s
def add_devices(self, dev, p=[], grp=''):
""" add devices to the current network
Parameters
----------
dev : list
list of Devices
p : ndarray (Ndev x 3)
np.array of devices positions
grp : string
name of the group of devices
"""
if not isinstance(dev,list):
dev=[dev]
if p == []:
p = np.nan*np.zeros((len(dev),3))
elif len(p.shape) == 1:
p = p.reshape(1,3)
if (p.shape[0] != len(dev)):
raise AttributeError('number of devices != nb pos')
# check if unique ID (in dev and in network ) else raise error
ids = [d.ID for d in dev]
for d in dev:
if d.ID in self:
raise AttributeError('Devices must have a different ID')
# determine node type
#
# ap : access point
# ag : agent
#
if 'ap' in grp:
typ = 'ap'
else :
typ = 'ag'
[d.__dict__.update({'p': p[ud, :],
'T': np.eye(3),
'grp':grp,
'typ':typ,
'dev':d,
}) for ud, d in enumerate(dev)]
#
# self.add_nodes_from([(d.ID, ldic[ud]) for ud,d in enumerate(dev)])
#
self.add_nodes_from([(d.ID, d.__dict__) for d in dev])
# create personnal network
for ud, d in enumerate(dev):
self.node[d.ID]['PN']= Network(owner=d.ID, PN=True)
self.node[d.ID]['PN'].add_nodes_from([(d.ID,d.__dict__)])
# get wireless standard
self._get_wstd()
# for d in dev:
# for s in d.wstd.keys():
# try:
# self.wstd[s]
# if d.ID not in self.wstd[s]:
# self.wstd[s].append(d.ID)
# except :
# self.wstd[s]=[d.ID]
def perm(self,iterable,r,key,d=dict()):
""" calculate permutation
Notes
-----
combi = itertools.permutation(iterable,r) adapted
This is an adapted version of itertools.permutations to
comply with the networkx.add_edges_from method.
itertools.permutations(range(4), 3) --> 012 013 021 023 031 302 102 103 ...
self.perm([10,11],2,'wifi') --> (10, 11, 'wifi', {'Pr': [], 'TOA': []})
(11, 10, 'wifi', {'Pr': [], 'TOA': []})
Parameters
----------
iterable : list
list of node
r : int
number of node gathered in the output tuple ( always set 2 ! )
Returns
--------
out : tuple(node_list,r,wstd,d):
node_list : list of node1
r : gather r node in the tuple
wstd : the specified wstd
d : dictionnary of wstd attribute
Examples
--------
>>> from pylayers.network.network import *
>>> N=Network()
>>> l= [0,1,2]
>>> key='toto'
>>> d=dict(key1=1,key2=2)
>>> perm=N.perm(l,2,key,d)
>>> perm.next()
(0, 1, 'toto', {'Pr': [], 'TOA': [], 'key1': 1, 'key2': 2})
>>> perm.next()
(0, 2, 'toto', {'Pr': [], 'TOA': [], 'key1': 1, 'key2': 2})
>>> perm.next()
(1, 0, 'toto', {'Pr': [], 'TOA': [], 'key1': 1, 'key2': 2})
>>> perm.next()
(1, 2, 'toto', {'Pr': [], 'TOA': [], 'key1': 1, 'key2': 2})
>>> perm.next()
(2, 0, 'toto', {'Pr': [], 'TOA': [], 'key1': 1, 'key2': 2})
>>> perm.next()
(2, 1, 'toto', {'Pr': [], 'TOA': [], 'key1': 1, 'key2': 2})
"""
# for l in self.LDP:
# d[l]=[]
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = range(n)
cycles = range(n, n-r, -1)
yield tuple((pool[indices[0]],pool[indices[1]],key,d))
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple((pool[indices[0]],pool[indices[1]],key,d))
break
else:
return
def combi(self,iterable,r,key,d=dict()):
""" calculate combination
Notes
-----
combi = itertools.combination(iterable,r) adapted
This is an adapted version of itertools.combinations in order
to comply with the networkx.add_edges_from method.
itertools.combinations('ABCD', 2) --> AB AC AD BC BD CD
itertools.combinations(range(4), 3) --> 012 013 023 123
self.combi([10,11,12],2,'wifi') --> (10, 11, 'wifi', {'Pr': [], 'TOA': []})
(10, 12, 'wifi', {'Pr': [], 'TOA': []})
(11, 12, 'wifi', {'Pr': [], 'TOA': []})
Parameters
----------
iterable : list
list of node
r : int
number of node gathered in the output tuple ( always set 2 ! )
d : dict
Returns
-------
out : tuple(node_list,r,wstd,d):
node_list : list of node1
r : gather r node in the tuple
wstd : the specified wstd
d : dictionnary of wstd attribute
Examples
--------
>>> from pylayers.network.network import *
>>> N=Network()
>>> l= [0,1,2,3,4]
>>> key='toto'
>>> d=dict(key1=1,key2=2)
>>> comb=N.combi(l,2,key,d)
>>> comb.next()
(0, 1, 'toto', {'Pr': [], 'TOA': [], 'key1': 1, 'key2': 2})
>>> comb.next()
(0, 2, 'toto', {'Pr': [], 'TOA': [], 'key1': 1, 'key2': 2})
"""
# for l in self.LDP:
# d[l]=[]
pool = iterable
n = len(pool)
if r > n:
return
indices = range(r)
yield tuple((pool[indices[0]],pool[indices[1]],key,d))
while True:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple((pool[indices[0]],pool[indices[1]],key,d))
def Gen_tuple(self,gene,wstd,var):
""" generate a specific tuple
Parameters
----------
gene : tuple(x,y) iterator
wstd : str
var : list
len(var) = len(gene)
Yield
-----
tuple : (gene[i][0],gene[i][1],wstd,var[i]) for iteration i
Examples
--------
>>> from pylayers.network.network import *
>>> N=Network()
>>> tup = zip(range(5),range(5))
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> g = iter(tup)
>>> wstd='string wstd'
>>> var=[10,11,12,13,14]
>>> T=N.Gen_tuple(g,wstd,var)
>>> T.next()
(0, 0, 'string wstd', 10)
>>> T.next()
(1, 1, 'string wstd', 11)
"""
gvar=iter(var)
while True:
G=gene.next()
Gvar=gvar.next()
yield(tuple((G[0],G[1],wstd,Gvar)))
def _get_wstd(self):
""" get wireless standards from nodes of the network
wstd argument specifies which wireless standard to append to the network.
If None, all wireless standards are appended.
Examples
--------
>>> from pylayers.network.network import *
>>> N=Network()
>>> N=Network.Network()
>>> for i in range(3):
no = Node(ID=i,wstd=['wifi','bt'])
N.add_nodes_from(no.nodes(data=True))
>>> N._get_wstd()
{'bt': [0, 1, 2], 'wifi': [0, 1, 2]}
"""
# if Rat !='None' :
for no in self.nodes():
for r in self.node[no]['wstd']:
try:
self.wstd[r].append(no)
except :
self.wstd[r]=[no]
# else :
# for no in self.nodes():
# for r in self.node[no]['wstd']:
# try:
# self.wstd[r].extend(no)
# except :
# self.wstd[r]=[no]
# uniquify results
for ws in self.wstd.keys():
self.wstd[ws] = {}.fromkeys(self.wstd[ws]).keys()
def update_edges(self, d , wstd, nodes=[]):
""" update edges information for a given wstd
Parameters
----------
d: dict :
dictionnary of information to be updated
wstd : list | dict
list of wstd where d has to be modified
nodes : list
list of nodes where information has to be applied
raise error if nodes in the list are not in wstd
"""
if isinstance(wstd,dict):
wstd = wstd.keys()
elif not isinstance(wstd, list):
wstd = [wstd]
for w in wstd:
if nodes == []:
edges=self.perm(self.wstd[w], 2, w, d=d)
else:
nin = [n in self.wstd[w] for n in nodes]
# raise error if some nodes are note in the wstd
# no error raised if none nodes in wstd
if sum(nin) != len(nodes) and (sum(nin) != 0):
unin = np.where(np.array(nin) == False)[0]
raise AttributeError(str(np.array(nodes)[unin]) +' are not in ' + w)
else :
edges=self.perm(nodes, 2, w, d=d)
try:
self.SubNet[w].add_edges_from(edges)
except:
self.add_edges_from(edges)
def _connect(self):
""" connect nodes
This method
1) Connect all nodes from the network sharing the same wstd
2) Create the associated SubNetworks
3) Create lists of links : self.links and self.relinks
"""
edge_dict={}
for l in self.LDP:
edge_dict[l]=np.array((np.nan, np.nan))
edge_dict['vis'] = False
for wstd in self.wstd.keys():
self.update_edges(edge_dict,wstd)
self._get_SubNet(wstd)
# update edges type informatiosn
self._get_edges_typ()
# create lists of links
self._get_llinks()
def _get_llinks(self):
""" get list of links from the Network
Notes
-----
Fill self.links and self.relinks
"""
for wstd in self.wstd.keys():
self.links[wstd]=[]
self.relinks[wstd]=[]
for i in itertools.combinations(self.wstd[wstd],2):
self.links[wstd].append([i[0],i[1],self.edge[i[0]][i[1]][wstd]['typ']])
# if self.node[i[0]]['grp'] == self.node[i[1]]['grp']\
# and (self.node[i[0]]['typ'] != 'ag'\
# or self.node[i[0]]['typ'] != 'ag'):
# self.links[wstd].append([i,'OB'])
# else :
# nx.set_edge_attributes(self,i,{'typ':'OffB'})
# self.links[wstd].append([i,'OffB'])
self.relinks[wstd]=[[i[1],i[0],i[2]] for i in self.links[wstd]]
def _get_edges_typ(self):
""" apply specific type on edges
Notes
-----
types are :
OB : On body
when link' nodes of a link are:
on the same agent
and belong to the same group
B2B : Body to Body
when link' nodes of a link are:
between 2 agents
B2I : Body to Infrastructure
when link' nodes of a link are:
between an agent and an access point
I2I : Infrastructure to Infrastructure
when link' nodes of a link are:
between 2 access points
"""
d = {}
for n in self.SubNet:
for e in self.SubNet[n].edges():
e0 = self.node[e[0]]
e1 = self.node[e[1]]
if e0['typ'] == e1['typ'] == 'ag':
if e0['grp'] == e1['grp']:
self.update_edges({'typ': 'OB'}, n, e)
else :
self.update_edges({'typ': 'B2B'}, n, e)
elif e0['typ'] == e1['typ'] == 'ap':
# if e0['grp'] == e1['grp']:
self.update_edges({'typ': 'I2I'}, n, e)
# print str(e0['ID']),str(e1['ID']),'I2I'
else:
self.update_edges({'typ': 'B2I'}, n, e)
def _get_grp(self):
"""
get group of the nodes of a network
"""
for n in self.nodes():
grp = self.node[n]['grp']
if grp not in self.grp.keys():
self.grp[grp] = []
if n not in self.grp[grp]:
self.grp[grp].extend([n])
def _get_SubNet(self,wstd=[]):
"""
get SubNetworks of a network
Warnings
--------
ALWAYS use self._get_wstd() BEFORE !
Parameters
----------
wstd : specify which SubNet to create
Examples
--------
>>> from pylayers.network.network import *
>>> N=Network()
>>> for i in range(2):
no = Node.Node(ID=i,wstd=['wifi','bt'])
N.add_nodes_from(no.nodes(data=True))
>>> no = Node.Node(ID=2,wstd=['wifi'])
>>> N.add_nodes_from(no.nodes(data=True))
>>> N._get_wstd() # VERY IMPORTANT
>>> N._get_SubNet()
>>> N.SubNet['bt'].nodes()
[0, 1]
>>> N.SubNet['wifi'].nodes()
[0, 1, 2]
"""
if wstd == []:
for wstd in self.wstd:
# creating all SubNetworks
self.SubNet[wstd]= self.subgraph(self.wstd[wstd])
# remove information from previous subnetwork (because subgraph copy the whole edge information)
ek = self.SubNet[wstd].edges(keys=True)
for e in ek :
if e[2] != wstd:
self.SubNet[wstd].remove_edge(e[0],e[1],e[2])
for n in self.SubNet[wstd].nodes():
try:
self.SubNet[wstd].node[n]['epwr']=self.SubNet[wstd].node[n]['epwr']
self.SubNet[wstd].node[n]['sens']=self.SubNet[wstd].node[n]['sens']
except:
pass
elif wstd in self.wstd:
# creating SubNetworks
self.SubNet[wstd]= self.subgraph(self.wstd[wstd])
# remove information from previous subnetwork (because subgraph copy the whole edge information)
for k in self.wstd.keys():
if k != wstd:
try:
self.SubNet[wstd].remove_edges_from(self.SubNet[k].edges(keys=True))
except :
pass
for n in self.SubNet[wstd].nodes():
try:
self.SubNet[wstd].node[n]['epwr']=self.SubNet[wstd].node[n]['epwr']
self.SubNet[wstd].node[n]['sens']=self.SubNet[wstd].node[n]['sens']
except:
pass
else :
raise AttributeError('invalid wstd name')
def _init_PN(self):
"""
Initializing personnal networks
"""
for wstd, subnet in self.SubNet.iteritems():
for n in subnet.nodes():
for nn in subnet.nodes():
if nn != n:
try:
if wstd not in subnet.node[n]['PN'].node[nn]['wstd']:
subnet.node[n]['PN'].node[nn]['wstd'].append(wstd)
except:
subnet.node[n]['PN'].add_node(nn,attr_dict=dict(wstd=[wstd],pe=np.array(()),te=0.),typ=subnet.node[nn]['typ'])
Z= subnet.edges(n,keys=True,data=True)
subnet.node[n]['PN'].add_edges_from(Z)
def create(self):
""" create the network
This method computes :
* _get_wstd()
* _get_grp()
* _connect()
* _init_PN
"""
self._get_wstd()
self._get_grp()
self._connect()
self._init_PN()
def update_PN(self):
""" update personnal network
"""
####################################################################################
# first iteration requested to correctely initiatilzing Personnal Networks's Subnets
for wstd in self.wstd.iterkeys():
for ldp in self.LDP:
self.compute_LDPs(self.nodes(),wstd)
for n in self.nodes():
self.node[n]['PN']._get_wstd()
self.node[n]['PN']._get_SubNet()
# Add access point position in each personal network (PN)
[self.node[n]['PN'].node[n2].update({'pe':self.node[n2]['p']}) for n2 in self.node[n]['PN'].node.iterkeys() if self.node[n]['PN'].node[n2]['typ'] == 'ap']
####################################################################################
# def visibility(func):
# def wrapper(*args, **kwargs):
# a = list(args)
# pdb.set_trace()
# print 'decorator',a
# return func(*args, **kwargs)
# return wrapper
def dist_edge(self,e,dp):
""" compute distance to edge
Parameters
----------
e :
dp:
"""
return(np.array([np.sqrt(np.sum((dp[i[0]]-dp[i[1]])**2)) for i in e]))
def update_LDPs(self,ln,wstd,lD):
"""Set a value between 2 nodes (n1 and n2) for a specific LDP from a wstd
This method update : * The network edges
* The personal network (PN) of both n1 and n2
Parameters
----------
n1 : node ID
n2 : node ID
wstd : string
A specific wstd which exist in the network ( if not , raises an error)
ln : list
list of nodes
lD : list of dictionnary:
[ {LDP1_1:[value , std],LDP2_1:[value , std] } , {LDPL_N:[value , std],LDPL_N:[value , std] } ] for N nodes and L LDPS
.. toto::
Check if LDP value is compliant with the LDP
"""
self.SubNet[wstd].add_edges_from(self.Gen_tuple(ln,wstd,lD))
def compute_LDPs(self,wstd):
"""compute edge LDP
Parameters
----------
wstd : string
A specific wstd which exists in the network ( if not , raises an error)
"""
# value : list : [LDP value , LDP standard deviation]
# method : ElectroMagnetic Solver method ( 'direct', 'Multiwall', 'PyRay'
p=nx.get_node_attributes(self.SubNet[wstd],'p')
epwr=nx.get_node_attributes(self.SubNet[wstd],'epwr')
sens=nx.get_node_attributes(self.SubNet[wstd],'sens')
e=self.links[wstd]#self.SubNet[wstd].edges()
re=self.relinks[wstd] # reverse link aka other direction of link
lp,lt, d, v= self.EMS.solve(p,e,'all',wstd,epwr,sens)
lD=[{'Pr':lp[i],'TOA':lt[np.mod(i,len(e))] ,'d':d[np.mod(i,len(e))],'vis':v[i]} for i in range(len(d))]
self.update_LDPs(iter(e+re),wstd,lD)
def update_orient(self, n, T, now=0.):
"""
Update Orientation(s) of a Device(s)/node(s)
Parameters
----------
n : float/string (or a list of)
node ID (Nn x 3)
T : np.array ( or a list of )
node orientation (Nn x 3 x 3)
Todo
----
update the orientation of the antenna in the ACS (for now only DCS is updated)
"""
if (isinstance(T,np.ndarray)) or (isinstance(n,list) and isinstance(T,list) ):
# Tranfrom input as list
if not(isinstance(n,list)):
n=[n]
T=[T]
if len(n) == len(T):
d=dict(zip(n,T)) # transform data to be complient with nx.set_node_attributes
nowd=dict(zip(n,[now]*len(n)))
else :
raise TypeError('n and T must have the same length')
# update position
nx.set_node_attributes(self,'T',d)
# update time of ground truth position
nx.set_node_attributes(self,'t',nowd)
else :
raise TypeError('n and p must be either: a key and a np.ndarray, or 2 lists')
def update_pos(self, n, p, now=0., p_pe='p'):
"""
Update Position(s) of Device(s)/node(s)
Parameters
----------
n : float/string (or a list of)
node ID
p : np.array ( or a list of )
node position
Todo
----
update the position of the antenna in the ACS (for now only DCS is updated)
"""
if (isinstance(p,np.ndarray)) or (isinstance(n,list) and isinstance(p,list) ):
# Tranfrom input as list
if not(isinstance(n,list)):
n=[n]
p=[p]
if len(n) == len(p):
d=dict(zip(n,p)) # transform data to be complient with nx.set_node_attributes
nowd=dict(zip(n,[now]*len(n)))
else :
raise TypeError('n and p must have the same length')
# update position
nx.set_node_attributes(self,p_pe,d)
# update time of ground truth position
if p_pe=='p':
nx.set_node_attributes(self,'t',nowd)
else :
raise TypeError('n and p must be either: a key and a np.ndarray, or 2 lists')
def update_dis(self):
p = self.get_pos()
e = self.edges()
lp = np.array([np.array((p[e[i][0]],p[e[i][1]])) for i in range(len(e))])
d = np.sqrt(np.sum((lp[:,0]-lp[:,1])**2,axis=1))
[self.edge[ve[0]][ve[1]].update({'d':d[ie]}) for ie,ve in enumerate(self.edges())]
def get_orient(self,wstd=None):
""" get node orientations
Parameters
----------
wstd : specify a wstd to display node orientaion.
If None, all wstd are displayed
Returns
-------
dictionnary : key : node ID
value : np.array node position
"""
if wstd == None:
return nx.get_node_attributes(self,'T')
else :
try:
return nx.get_node_attributes(self.SubNet[wstd],'T')
except:
raise AttributeError('invalid wstd name')
def get_pos(self,wstd=None):
""" get node positions
Parameters
----------
wstd : specify a wstd to display node position. If None, all wstd are return
Returns
-------
dictionnary : key : node ID
value : np.array node position
"""
if wstd == None:
if self.node[self.nodes()[0]].has_key('p'):
return nx.get_node_attributes(self,'p')
else :
return nx.get_node_attributes(self,'pe')
else :
try:
if self.SubNet[wstd].node[self.SubNet[wstd].nodes()[0]].has_key('p'):
return nx.get_node_attributes(self.SubNet[wstd],'p')
else :
return nx.get_node_attributes(self.SubNet[wstd],'pe')
except:
raise AttributeError('invalid wstd name')
def get_pos_est(self,wstd=None):
""" get node estimated positions ( only available in PN network)
Parameters
----------
wstd : specify a wstd to display node position. If None, all wstd are displayed
Returns
-------
dictionnary : key : node ID
value : np.array node position
"""
if wstd == None:
return nx.get_node_attributes(self,'pe')
else :
try:
return nx.get_node_attributes(self.SubNet[wstd],'pe')
except:
raise AttributeError('invalid wstd name')
def haspe(self,n):
""" Test if a node has an estimated point pe key
Parameters
----------
n : int
node numbner
Returns
-------
Boolean : True if node n has a pe k
"""
try:
return self.node[n]['pe'].any()
except:
return False
def overview(self):
""" overview of the network
Returns
-------
O : dict
"""
O={}
for sn in self.SubNet.iteritems():
for ldp in self.LDP:
try:
O[sn[0]].update({ldp:nx.get_edge_attributes(sn[1],ldp)})
except:
O[sn[0]]={ldp:nx.get_edge_attributes(sn[1],ldp)}
return (O)
def pp(self):
""" pretty print information
OBSOLETE
Print information on edges connection and LDPs values and accuracy
"""
for wstd in self.wstd.keys():
print '-'*30
print wstd
print('{0:10} | {1:5} | {2:5} | {3:5} | {4:5} | {5:5} |'.format('Node link','TOA ','TOA std', 'Pr','Pr std', 'distance' ))
print '-'*30
T=nx.get_edge_attributes(self.SubNet[wstd],'TOA')
P=nx.get_edge_attributes(self.SubNet[wstd],'Pr')
D=nx.get_edge_attributes(self.SubNet[wstd],'d')
for i in self.SubNet[wstd].edges(): # boucle sur toute les liaisons
print('{0:10} | {1:1.4} | {2:7.4} | {3:1.4} | {4:7.4} | {5:7.4} |'.format(i,T[i][0],T[i][1],P[i][0],P[i][1],D[i]))
def show(self,**kwargs):
"""
Show the network
Parameters
----------
wstd : specify a wstd to display. If None, all wstd are displayed
legend : Bool. Toggle display edge legend
ion : interactive mode for matplotlib
info : plot information on edges
fig : plt.figure() to plot
ax : plt.figure.ax to plot
name : figure name
"""
C = ConfigParser.ConfigParser()
C.read(pyu.getlong('show.ini', 'ini'))
color = ['r', 'g', 'b', 'm', 'y', 'c']*5
style = ['-']*10
wstdcolor = {k:color[uk] for uk, k in enumerate(self.SubNet.keys())}
wstdes = {k:style[uk] for uk, k in enumerate(self.SubNet.keys())}
# stdcolor = dict(C.items('wstdcolor'))
# wstdes = dict(C.items('wstdestyle'))
if wstd == None:
rloop = self.wstd.keys()
else :
if isinstance(wstd,list):
rloop = wstd
elif isinstance(wstd,str) :
rloop=[wstd]
else :
raise AttributeError('Arg must be a string or a string list')
if fig==None:
fig = plt.figure()
ax=fig.add_subplot(111)
elif ax== None:
ax=fig.add_subplot(111)
else:
plt.figure(name)
ax.axis('scaled')
try:
self.coll_plot['node'][1]=[]
self.coll_plot['label'][1]=[]
self.coll_plot['edge'][1]=[]
Cl=[]
except:
self.coll_plot['node']=[[]]
self.coll_plot['node'].append([])
self.coll_plot['label']=[[]]
self.coll_plot['label'].append([])
self.coll_plot['edge']=[[]]
self.coll_plot['edge'].append([])
Cl=[]
for ii,rl in enumerate(rloop):
pos = self.get_pos(rl)
pos = {k:v[:2] for k,v in pos.items()}
self.coll_plot['node'][1].append(nx.draw_networkx_nodes(
self,
pos=pos,
nodelist=self.SubNet[rl].nodes(),
node_size=100.,
node_color='r',
ax=ax))
Cl=nx.draw_networkx_labels(self.SubNet[rl],
pos=pos,
font_size=10,
ax=ax)
self.coll_plot['label'][1].extend(Cl.values())
self.coll_plot['edge'][1].append((nx.draw_networkx_edges(
self,
pos=pos,
edgelist=self.SubNet[rl].edges(),
arrows=False,
width=2.,
alpha=0.9,
edge_color=wstdcolor[rl],
style=wstdes[rl],
ax=ax)))
if legend:
ax.legend((self.coll_plot['edge'][1]),(rloop),loc=3)
if info :
L=nx.get_edge_attributes(self,'TOA')
if ion:
try:
[jj.remove() for jj in self.coll_plot['node'][0]]
[jj.remove() for jj in self.coll_plot['edge'][0] if jj != None]
[jj.remove() for jj in self.coll_plot['label'][0]]
except:
pass
plt.draw()
self.coll_plot['node'][0]=self.coll_plot['node'][1]
self.coll_plot['edge'][0]=self.coll_plot['edge'][1]
self.coll_plot['label'][0]=self.coll_plot['label'][1]
return fig, ax
def _show3(self, wstd=None,newfig=False):
""" Mayavi _show3
Parameters
----------
wstd : list
list of wireless standards
"""
color = ['r', 'g', 'b', 'm', 'y', 'c']*5
wstdcolor = {k:color[uk] for uk, k in enumerate(self.SubNet.keys())}
cold = pyu.coldict()
if not newfig:
f = mlab.gcf()
if wstd == None:
rloop = self.wstd.keys()
else :
if isinstance(wstd,list):
rloop = wstd
elif isinstance(wstd,str) :
rloop=[wstd]
else :
raise AttributeError('Arg must be a string or a string list')
for ii,rl in enumerate(rloop):
pos = self.get_pos(rl)
posv = pos.values()
mp = dict(zip(pos.keys(),range(len(pos.keys()))))
edg = self.SubNet[rl].edges()
connect = [(mp[e[0]],mp[e[1]]) for e in edg]
posv = np.array(posv)
pts = mlab.points3d(posv[:,0], posv[:,1], posv[:,2],
scale_factor=0.01, resolution=10)
pts.mlab_source.dataset.lines = np.array(connect)
tube = mlab.pipeline.tube(pts, tube_radius=0.01)
colhex = cold[wstdcolor[rl]]
col = tuple(pyu.rgb(colhex)/255.)
mlab.pipeline.surface(tube, color=col)
def csv_save(self,filename,S):
""" save node positions into csv file
Parameters
----------
filename : string
name of the csv file
S : Simulation
Scipy.Simulation object
"""
pos = np.array(nx.get_node_attributes(self,'p').values())
pos = np.hstack((pos,np.zeros((len(self.nodes()),1)))) # passage en 3D
pos = pos.reshape((1,len(self.nodes())*3))
filecsv = pyu.getlong(filename,pstruc['DIRNETSAVE'])+'.csv'
#file=open('../save_data/' +filename +'.csv','a')
file = open(filecsv,'a')
file.write(str(S.now()) +',')
np.savetxt(file,pos,delimiter=',')
file.close()
def init_save(self,height=1.5):
"""
Parameter
---------
init_save
"""
pos=nx.get_node_attributes(self,'p').items()
AP=[]
AG=[]
api=1
loc=False
method = []
# get methods for localization
simcfg = ConfigParser.ConfigParser()
simcfg.read(pyu.getlong('simulnet.ini','ini'))
save =eval(simcfg.get('Save','save'))
if 'loc' in save:
loc = True
method = eval(simcfg.get('Localization','method'))
## find Agent and Acces point
for i in range(len(pos)):
if self.node[pos[i][0]]['typ'] =='ap':
AP.append(pos[i][0])
if not os.path.isfile(pyu.getlong(str(pos[i][0]) + '.ini',pstruc['DIRNETSAVE'])):
file=open(pyu.getlong(str(pos[i][0]) + '.ini',pstruc['DIRNETSAVE']),'w')
config = ConfigParser.ConfigParser()
config.add_section('coordinates')
# config.set('coordinates',str(api), str(pos[i][1][0]) + ' ' + str(pos[i][1][1]) + ' '+str(height))
config.set('coordinates','1', str(pos[i][1][0]) + ' ' + str(pos[i][1][1]) + ' '+str(height))
api=api+1
config.write(file)
file.close()
else:
AG.append(pos[i][0])
config = ConfigParser.ConfigParser()
if not os.path.isfile(pyu.getlong(str(pos[i][0]) + '.ini',pstruc['DIRNETSAVE'])):
file=open(pyu.getlong(str(pos[i][0]) + '.ini',pstruc['DIRNETSAVE']),'w')
config.add_section('coordinates')
if loc :
if 'geo' in method:
config.add_section('geo_est')
if 'alg' in method:
config.add_section('alg_est')
# if simulation has already been runed with localization, this
# ensure that localization section will be created
else :
file=open(pyu.getlong(str(pos[i][0]) + '.ini',pstruc['DIRNETSAVE']),'w')
config.read(pyu.getlong(str(pos[i][0]) + '.ini',pstruc['DIRNETSAVE']))
if 'coordinates' not in config.sections():
config.add_section('coordinates')
if 'geo_est' not in config.sections() and 'geo' in method:
config.add_section('geo_est')
if 'alg_est' not in config.sections() and 'alg' in method:
config.add_section('alg_est')
config.write(file)
file.close()
if 'pyray' in save :
file2=open(pyu.getlong('pyray.ini',pstruc['DIRNETSAVE']),'w')
config = ConfigParser.ConfigParser()
config.add_section('nodes')
config.add_section('layout')
config.add_section('simulation')
config.set('nodes','AG',str(AG))
config.set('nodes','AP',str(AP))
config.set('simulation','updatetime',str(simcfg.get('Network','network_update_time')))
config.set('layout','layoutname',str(simcfg.get('Layout','filename')))
config.write(file2)
file2.close()
if 'loc' in save :
file2=open(pyu.getlong('loc.ini',pstruc['DIRNETSAVE']),'w')
config = ConfigParser.ConfigParser()
config.add_section('nodes')
config.add_section('simulation')
config.set('nodes','AG',str(AG))
config.set('nodes','AP',str(AP))
config.set('simulation','loc_updatetime',str(simcfg.get('Localization','localization_update_time')))
config.set('simulation','method',str(simcfg.get('Localization','method')))
config.set('simulation','duration',str(simcfg.get('Simulation','duration')))
config.write(file2)
file2.close()
return method
def mat_save(self,S):
"""
DEPRECATED
REPLACED BY pylayers.util.save
DEPRECATED
save node positions into a matlab structure file
Parameters
----------
filename : string
name of the mat file
S : Simulation
Scipy.Simulation object
"""
pos=nx.get_node_attributes(self,'p').items()
for i in range(len(pos)):
if not 'BS' in pos[i][0]:
try:
self.mat[pos[i][0]]['pos']=np.vstack((self.mat[pos[i][0]]['pos'],pos[i][1]))
self.mat[pos[i][0]]['time']=np.vstack((self.mat[pos[i][0]]['time'],S.now()))
except:
self.mat[pos[i][0]]={}
self.mat[pos[i][0]]['pos']=pos[i][1]
self.mat[pos[i][0]]['time']=np.array(S.now())
else :
try:
self.mat[pos[i][0]]['pos']=pos[i][1]
except:
self.mat[pos[i][0]]={}
self.mat[pos[i][0]]['pos']=pos[i][1]
sp.io.savemat(pyu.getlong('mat.mat','save_data'),self.mat)
# def sql_save(self,S):
# """
# save network state into mysqldatabase
# Attributes:
# ----------
#
# S : Simulation
# Scipy.Simulation object
# """
# self.db.writenet(self,S.now())
def txt_save(self,S):
"""
DEPRECATED
REPLACED BY pylayers.util.save
DEPRECATED
save network state into mysqldatabase
Parameters
----------
S : Simulation
Scipy.Simulation object
"""
pyu.writenet(self,S)
def loc_save(self,S):
"""
DEPRECATED
REPLACED BY pylayers.util.save
DEPRECATED
save txt
node ID , True pos x , True pos y , est pos x , est pos y , timestamp
Parameters
----------
S : Simulation
Scipy.Simulation object
"""
pos=nx.get_node_attributes(self,'p')
pe=nx.get_node_attributes(self,'pe_alg')
typ = nx.get_node_attributes(self,'typ')
if self.idx == 0:
entete = 'NodeID, True Position x, True Position y, Est Position x, Est Position y, Timestamp\n'
file=open(os.path.join(basename,pstruc['DIRNETSAVE'],'simulation.txt'),'write')
file.write(entete)
file.close()
try:
file=open(os.path.join(basename,pstruc['DIRNETSAVE'],'simulation.txt'),'a')
for n in self.nodes():
if typ[n] != 'ap':
data = n + ',' + str(pos[n][0]) + ',' + str(pos[n][1]) + ',' + str(pe[n][0][0]) + ',' + str(pe[n][0][1]) + ',' +pyu.timestamp(S.now()) +',\n'
file.write(data)
file.close()
self.idx = self.idx +1
except:
pass
# def dual_save(self,S):
# """
# DEPRECATED
# REPLACED BY pylayers.util.save
# DEPRECATED
# save txt
# Parameters
# ----------
# S : Simulation
# Scipy.Simulation object
# """
# pos=nx.get_node_attributes(self,'p')
# pclust = nx.get_node_attributes(self,'pe_clust')
# typ = nx.get_node_attributes(self,'typ')
# if self.idx == 0:
# entete = 'Timestamp, True Position x, True Position y, Est Position1 x, Est Position1 y,Est Position2 x, Est Position2 y\n'
# file=open(basename+'/' + pstruc['DIRNETSAVE'] +'/pos.txt','write')
# file.write(entete)
# file.close()
# file2=open(basename+'/' + pstruc['DIRNETSAVE'] +'/rsslink.txt','write')
# entete2 = 'Timestamp, link, linkid, Pr, distance\n'
# file2.write(entete2)
# file2.close()
# file3=open(basename+'/' + pstruc['DIRNETSAVE'] +'/anchorposition.txt','write')
# data3 = 'node,pos x, pos y\n'
# file3.write(data3)
# for n in self.nodes():
# data3= n + ',' + str(self.node[n]['p'][0]) + ',' + str(self.node[n]['p'][1]) + '\n'
# file3.write(data3)
# file3.close()
# file4=open(basename+'/' + pstruc['DIRNETSAVE'] +'/toa.txt','w')
# entete4 = 'Timestamp, typ, toaid, toa,distance\n'
# file4.write(entete4)
# file4.close()
# try:
# file=open(basename+'/' + pstruc['DIRNETSAVE'] +'/pos.txt','a')
# file2=open(basename+'/' + pstruc['DIRNETSAVE'] +'/rsslink.txt','a')
# file4=open(basename+'/' + pstruc['DIRNETSAVE'] +'/toa.txt','a')
# for n in self.nodes():
# if n == '1':
# data = pyu.timestamp(S.now()) +','+ str(pos[n][0]) + ',' + str(pos[n][1]) + ',' + str(pclust[n][0,0]) + ',' + str(pclust[n][0,1]) + ',' + str(pclust[n][1,0]) + ',' + str(pclust[n][1,1]) +'\n'
# for e in self.edge[n].keys():
# if e != '6' and e !='7':
# try:
# data2 = data2 +',link,' + str(e) + ',' + str(self.edge[n][e]['rat1']['Pr'][0]) +',' + str(np.sqrt(np.sum((pos[n]-pos[e])**2)))
# except:
# data2 = pyu.timestamp(S.now()) + ',link,' + str(e) + ',' + str(self.edge[n][e]['rat1']['Pr'][0]) +',' + str(np.sqrt(np.sum((pos[n]-pos[e])**2)))
# else :
# try:
# data4 = data4 +',toa,' + str(e) + ',' + str(self.edge[n][e]['rat1']['TOA'][0]) +',' + str(np.sqrt(np.sum((pos[n]-pos[e])**2)))
# except:
# data4 = pyu.timestamp(S.now()) + ',toa,' + str(e) + ',' + str(self.edge[n][e]['rat1']['TOA'][0]) +',' +str(np.sqrt(np.sum((pos[n]-pos[e])**2)))
# data2=data2 + '\n'
# data4=data4 + '\n'
# file.write(data)
# file2.write(data2)
# file4.write(data4)
# file.close()
# file2.close()
# file4.close()
# self.idx = self.idx +1
# except:
# pass
def pyray_save(self,S):
"""
save node positions into ini file, compliant with pyray standard
Parameters
----------
filename : string
name of the pyray file
S : Simulation
Scipy.Simulation object
"""
assert len(self.SubNet.keys()) == 1 , NameError('when network.ini_save() \
is used , only 1 wstd must be involved in the Network.\
Please modify agent.ini')
height= 1.5
pos=nx.get_node_attributes(self,'p').items()
### create ini files
if self.idx == 0:
self.init_save(height=height)
### save agent positions
for i in range(len(pos)):
if self.node[pos[i][0]]['typ'] !='ap':
config = ConfigParser.ConfigParser()
config.read(pyu.getlong(str(pos[i][0]) + '.ini',pstruc['DIRNETSAVE']))
config.set('coordinates',str(self.idx+1),value = str(pos[i][1][0]) + ' ' + str(pos[i][1][1]) + ' '+str(height))
file=open(pyu.getlong(str(pos[i][0]) + '.ini',pstruc['DIRNETSAVE']),'w')
config.write(file)
file.close()
def loc_save(self,S,node='all',p=False):
"""
save node estimated positions into ini file,
Attributes:
----------
S : Simulation
Scipy.Simulation object
"""
if node == 'all':
node = self.nodes()
elif not isinstance(node,list):
node = [node]
height=1.5
### create ini files
if self.lidx == 0:
self.init_save(height=height)
pe_alg = nx.get_node_attributes(self,'pe_alg')
pe_geo = nx.get_node_attributes(self,'pe_geo')
p = nx.get_node_attributes(self,'p')
### save agent positions estimations
for n in node:
if self.node[n]['typ'] !='ap':
config = ConfigParser.ConfigParser()
config.read(pyu.getlong(str(n[0]) + '.ini',pstruc['DIRNETSAVE']))
if pe_alg != {} :
config.set('alg_est',str(self.idx+1),value = str(pe_alg[n[0]][0]) + ' ' + str(pe_alg[n[0]][1]) + ' '+str(height))
if pe_geo != {} :
config.set('geo_est',str(self.idx+1),value = str(pe_geo[n[0]][0]) + ' ' + str(pe_geo[n[0]][1]) + ' '+str(height))
if p:
config.set('coordinates',str(self.idx+1),value = str(p[n[0]][0]) + ' ' + str(p[n[0]][1]) + ' '+str(height))
file=open(pyu.getlong(str(n[0]) + '.ini',pstruc['DIRNETSAVE']),'w')
config.write(file)
file.close()
self.lidx=self.lidx+1
def ini_save(self,S,filename='simulnet_data.ini',height=1.5):
"""
----------
DEPRECATED
----------
Save an .ini file of node position .
Only links which involve mobile nodes (typ 'ag') are kept.
The produced init file is filled as follow:
[timestamp]
nodeID1_nodeID2 = x1,y1,z1,x2,y2,z2
nodeID2_nodeID4 = x2,y2,z2,x4,y4,z4
....
Attributes:
----------
S : Simulation
Scipy.Simulation object
filename : string
name of the saved ini file
height : float
height of the nodes
"""
assert len(self.SubNet.keys()) == 1 , NameError('when network.ini_save() \
is used , only 1 wstd must be involved in the Network.\
Please modify agent.ini')
if self.idx == 0:
file=open(pyu.getlong(filename ,'output'),'w')
else:
file=open(pyu.getlong(filename ,'output'),'a')
config = ConfigParser.ConfigParser()
timestamp = pyu.timestamp(S.now())
config.add_section(timestamp)
for e in self.edges():
if not ((self.node[e[0][0]]['typ'] == 'ap') and (self.node[e[1][0]]['typ'] == 'ap')):
key=str(e[0]) +'_' +str(e[1])
value1 = str(self.node[e[0][0]]['p'][0])+ ',' +str(self.node[e[0][0]]['p'][1])+','+str(height)
value2 = str(self.node[e[1][0]]['p'][0])+ ',' +str(self.node[e[1][0]]['p'][1])+','+str(height)
config.set(timestamp, key, value1 + ' , ' + value2)
config.write(file)
file.close()
self.idx=self.idx+1
#class PN(nx.MultiDiGraph):
# def __init__(self,N):
# nx.MultiDiGraph.__init__(self)
# self.add_nodes_from(N)
# pdb.set_trace()
# self.add_edges_from( (u,v,key,deepcopy(datadict))
# for u,nbrs in self.adjacency_iter()
# for v,keydict in nbrs.items()
# for key,datadict in keydict.items() )
# pdb.set_trace()
# self.node=N.node
class PNetwork(Process):
"""
Process version of the Network class
"""
def __init__(self,**args):
defaults={'net':Network(),
'L':[],
'net_updt_time':0.001,
'sim':None,
'show_sg':False,
'disp_inf':False,
'save':[]}
## initialize attributes
for key, value in defaults.items():
if args.has_key(key):
setattr(self, key, args[key])
else:
setattr(self, key, value)
args[key]=value
self.args=args
Process.__init__(self,name='PNetwork',sim=self.sim)
self.cpt=self.sim.now()
self.filename='pos'
if 'mysql' in self.save:
config = ConfigParser.ConfigParser()
config.read(pyu.getlong('simulnet.ini','ini'))
sql_opt = dict(config.items('Mysql'))
self.net.db = Database(sql_opt['host'],sql_opt['user'],sql_opt['passwd'],sql_opt['dbname'])
def run(self):
####################################################################################
# first iteration requested to correctely initiatilzing Personnal Networks's Subnets
for wstd in self.net.wstd.iterkeys():
self.net.compute_LDPs(wstd)
for n in self.net.nodes():
self.net.node[n]['PN']._get_wstd()
self.net.node[n]['PN']._get_SubNet()
# Add access point position in each personal network (PN)
[self.net.node[n]['PN'].node[n2].update({'pe':self.net.node[n2]['p']}) for n2 in self.net.node[n]['PN'].node.iterkeys() if self.net.node[n]['PN'].node[n2]['typ'] == 'ap']
####################################################################################
self.pos=self.net.get_pos()
if 'csv' in self.save:
nbnodes = len(self.net.nodes())
entete = 'time'
inode=self.net.nodes_iter()
for i in inode:
entete = entete +',x'+str(i) +',y'+str(i)+',z'+str(i)
entete=entete +'\n'
filecsv = pyu.getlong(self.filename,pstruc['DIRNETSAVE'])+'.csv'
#file=open('../save_data/' +self.filename +'.csv','w')
file = open(filecsv,'w')
file.write(entete)
file.close()
while True:
############### compute LDP
for wstd in self.net.wstd.iterkeys():
self.net.compute_LDPs(wstd)
if self.show_sg:
############### compute Signature (Sg)
tx=self.net.node.keys()[0]
rx=self.net.node.keys()[1]
Sg=self.net.compute_Sg(tx,rx)
############## Show
if self.show_sg:
self.net.show_sig(Sg,tx,rx,ion=True,fig=fig,ax=ax)
if self.disp_inf:
self.net.pp()
# ############# save network
# REPLACED BY A SAVE PROCESS
if 'csv' in self.save:
self.net.csv_save(self.filename,self.sim)
# if 'pyray' in self.save:
# self.net.pyray_save(self.sim)
# if 'matlab' in self.save:
# self.net.mat_save(self.sim)
# if 'msql' in self.save:
# self.net.sql_save(self.sim)
# if 'txt' in self.save:
# self.net.txt_save(self.sim)
# if 'ini' in self.save:
# self.net.ini_save(self.sim)
# if 'loc' in self.save:
# self.net.loc_save(self.sim)
# if 'dual' in self.save:
# self.net.dual_save(self.sim)
self.net.pos=self.net.get_pos()
if self.sim.verbose:
print 'network updated @',self.sim.now()
self.net.idx=self.net.idx+1
yield hold, self, self.net_updt_time
|
buguen/pylayers
|
pylayers/network/network.py
|
Python
|
lgpl-3.0
| 64,459
|
[
"Mayavi"
] |
c7e1a0bf2666b500f13afe8a55ad7944458ef098c73769681e47b19b68228d1f
|
# Copyright (C) 2012 Swift Navigation Inc.
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
"""
The :mod:`peregrine.acquisition` module contains classes and functions related
to satellite acquisition.
"""
import numpy as np
import pyfftw
import cPickle
import defaults
from include.generateCAcode import caCodes
import logging
logger = logging.getLogger(__name__)
DEFAULT_WISDOM_FILE = "fftw_wisdom"
"""The default filename used for FFTW wisdom files."""
DEFAULT_THRESHOLD = 20.0
"""The default correlation power to consider an acquisition successful."""
# Import progressbar if it is available.
_progressbar_available = True
try:
import progressbar
except ImportError:
_progressbar_available = False
class Acquisition:
"""
Functions for performing satellite acquisitions on a set of samples.
The :class:`Acquisition` class pre-computes and stores a number of variables
given the sample data and its parameters so that repeated acquisitions using
the same parameters and samples (but different codes and carrier frequencies)
are performed as efficiently as possible.
Parameters
----------
samples : :class:`numpy.ndarray` or `None`
Array of samples to use for acquisition. Can be `None` but in this case
`init_samples` *must* be called with an array of samples before any other
acquisition functions are used.
sampling_freq : float, optional
The sampling frequency of the samples in Hz.
IF : float, optional
The receiver intermediate frequency used when capturing the samples.
samples_per_code : float, optional
The number of samples corresponding to one code length.
code_length : int, optional
The number of chips in the chipping code.
offsets : int, optional
Offsets, in units of code length (1ms), to use when performing long
integrations to avoid clobbering by nav bit edges.
If None, will try to figure out some suitable ones.
wisdom_file : string or `None`, optional
The filename from which to load and save FFTW `Wisdom
<http://www.fftw.org/doc/Words-of-Wisdom_002dSaving-Plans.html>`_,
pre-calculated data about how to most efficiently perform the FFT
operations required on the current hardware. Using FFTW wisdom greatly
reduces the time required to perform an acquisition. If `wisdom_file` is
`None` then no wisdom file is loaded or saved.
"""
def __init__(self,
samples,
sampling_freq=defaults.sampling_freq,
IF=defaults.IF,
samples_per_code=defaults.samples_per_code,
code_length=defaults.code_length,
n_codes_integrate=4,
offsets = None,
wisdom_file=DEFAULT_WISDOM_FILE):
self.sampling_freq = sampling_freq
self.IF = IF
self.samples_per_code = int(round(samples_per_code))
self.n_integrate = n_codes_integrate * self.samples_per_code
self.code_length = code_length
self.samples_per_chip = float(samples_per_code) / code_length
if offsets is None:
if n_codes_integrate <= 10:
offsets = [0, self.n_integrate]
elif n_codes_integrate <= 13:
offsets = [0, 2*(n_codes_integrate - 10)*self.samples_per_code,
self.n_integrate]
elif n_codes_integrate <= 15:
offsets = [0, (n_codes_integrate - 10) * self.samples_per_code,
2*(n_codes_integrate - 10) * self.samples_per_code,
self.n_integrate]
else:
raise ValueError("Integration interval too long to guess nav-declobber "
+ "offsets. Specify them or generalize the technique.")
self.offsets = offsets
# Try to load saved FFTW wisdom.
if wisdom_file is not None:
try:
self.load_wisdom(wisdom_file)
except IOError:
logger.warning("Couldn't open FFTW wisdom file, "
"the first run might take longer than usual.")
if samples is not None:
self.init_samples(samples)
# Setup acquisition:
# Allocate aligned arrays for the code FFT.
self.code = pyfftw.n_byte_align_empty((self.n_integrate), 16,
dtype=np.complex128)
self.code_ft = pyfftw.n_byte_align_empty((self.n_integrate), 16,
dtype=np.complex128)
# Create an FFTW transforms which will execute the code FFT.
self.code_fft = pyfftw.FFTW(self.code, self.code_ft)
# Allocate aligned arrays for the inverse FFT.
self.corr_ft = pyfftw.n_byte_align_empty((self.n_integrate), 16,
dtype=np.complex128)
self.corr = pyfftw.n_byte_align_empty((self.n_integrate), 16,
dtype=np.complex128)
# Setup FFTW transforms for inverse FFT.
self.corr_ifft = pyfftw.FFTW(self.corr_ft, self.corr,
direction='FFTW_BACKWARD')
# Save FFTW wisdom for later
if wisdom_file is not None:
self.save_wisdom(wisdom_file)
def init_samples(self, samples):
"""
Update the samples used for acquisition.
This function pre-calculates some values that are used later in
acquisition. This function can be called to replace the samples used for
acquisition with another set having the same sampling frequency, IF etc.
.. warning: If no samples were provided when the class was instantiated
then this method *must* be called before calling any other
acquisition functions.
Parameters
----------
samples : :class:`numpy.ndarray`
Array of samples to use for acquisition.
"""
self.samples = samples
# Create some short sets of data to correlate with
self.short_samples = [samples[off:(off + self.n_integrate)]
for off in self.offsets]
# Pre-compute Fourier transforms of the short signals
self.short_samples_ft = [np.fft.fft(samps) for samps in self.short_samples]
def interpolate(self, S_0, S_1, S_2, interpolation='gaussian'):
"""
Use interpolation to refine an FFT frequency estimate.
.. image:: /_static/interpolation_diagram.png
:align: center
:alt: Interpolation diagram
For an FFT bin spacing of :math:`\delta f`, the input frequency is
estimated as:
.. math:: f_{in} \\approx \delta f (k + \Delta)
Where :math:`k` is the FFT bin with the maximum magnitude and
:math:`\Delta \in [-\\frac{1}{2}, \\frac{1}{2}]` is a correction found by
interpolation.
**Parabolic interpolation:**
.. math:: \Delta = \\frac{1}{2} \\frac{S[k+1] - S[k-1]}{2S[k] - S[k-1] - S[k+1]}
Where :math:`S[n]` is the magnitude of FFT bin :math:`n`.
**Gaussian interpolation:**
.. math:: \Delta = \\frac{1}{2} \\frac{\ln(S[k+1]) - \ln(S[k-1])}{2\ln(S[k]) - \ln(S[k-1]) - \ln(S[k+1])}
The Gaussian interpolation method gives better results, especially when
used with a Gaussian window function, at the expense of computational
complexity. See [1]_ for detailed comparison.
Parameters
----------
S_0 : float
:math:`S[k-1]`, i.e. the magnitude of FFT bin one before the maxima.
S_1 : float
:math:`S[k]` i.e. the magnitude of the maximum FFT.
S_2 : float
:math:`S[k+1]`, i.e. the magnitude of FFT bin one after the maxima.
Returns
-------
out : float
The fractional number of FFT bins :math:`\Delta` that the interpolated
maximum is from the maximum point :math:`S[k]`.
References
----------
.. [1] Gasior, M. et al., "Improving FFT frequency measurement resolution
by parabolic and Gaussian spectrum interpolation" AIP Conf.Proc. 732
(2004) 276-285 `CERN-AB-2004-023-BDI
<http://cdsweb.cern.ch/record/738182>`_
"""
if interpolation == 'parabolic':
# Parabolic interpolation.
return 0.5 * (S_2 - S_0) / (2*S_1 - S_0 - S_2)
elif interpolation == 'gaussian':
# Gaussian interpolation.
ln_S_0 = np.log(S_0)
ln_S_1 = np.log(S_1)
ln_S_2 = np.log(S_2)
return 0.5 * (ln_S_2 - ln_S_0) / (2*ln_S_1 - ln_S_0 - ln_S_2)
elif interpolation == 'none':
return 0
else:
raise ValueError("Unknown interpolation mode '%s'", interpolation)
def acquire(self, code, freqs, progress_callback=None):
"""
Perform an acquisition with a given code.
Perform a code-phase parallel acquisition with a given code over a set of
carrier frequencies.
Parameters
----------
code : :class:`numpy.ndarray`, shape(`code_length`,)
A numpy array containing the code to acquire. Should contain one element
per chip with value +/- 1.
freqs : iterable
A list of carrier frequencies in Hz to search over.
progress_callback : callable or `None`, optional
A function that is called to report on the progress of the acquisition.
Can be `None`. The function should have the following signature::
progress_callback(current_step_number, total_number_of_steps)
Returns
-------
out : :class:`numpy.ndarray`, shape(len(`freqs`), `samples_per_code`)
2D array containing correlation powers at different frequencies and code
phases. Code phase axis is in samples from zero to `samples_per_code`.
"""
# Allocate array to hold results.
results = np.empty((len(self.offsets), len(freqs), self.samples_per_code))
# Upsample the code to our sampling frequency.
code_indices = np.arange(1.0, self.n_integrate + 1.0) / \
self.samples_per_chip
code_indices = np.remainder(np.asarray(code_indices, np.int), self.code_length)
self.code[:] = code[code_indices]
# Find the conjugate Fourier transform of the code which will be used to
# perform the correlation.
self.code_fft.execute()
code_ft_conj = np.conj(self.code_ft)
acq_mag = []
for n, freq in enumerate(freqs):
# Report on our progress
if progress_callback:
progress_callback(n + 1, len(freqs))
# Shift the signal in the frequency domain to remove the carrier
# i.e. mix down to baseband.
shift = round(float(freq) * len(self.short_samples_ft[0]) /
self.sampling_freq)
# Search over the possible nav bit offset intervals
for offset_i in range(len(self.offsets)):
short_samples_ft_bb = np.append(self.short_samples_ft[offset_i][shift:],
self.short_samples_ft[offset_i][:shift])
# Multiplication in frequency <-> correlation in time.
self.corr_ft[:] = short_samples_ft_bb * code_ft_conj
# Perform inverse Fourier transform to obtain correlation results.
self.corr_ifft.execute()
acq_mag = np.abs(self.corr[:self.samples_per_code])
results[offset_i][n] = np.square(acq_mag)
# Choose the nav-bit-declobber sample interval with the best correlation
max_indices = np.unravel_index(results.argmax(), results.shape)
return results[max_indices[0]]
def find_peak(self, freqs, results, interpolation='gaussian'):
"""
Find the peak within an set of acquisition results.
Finds the point in the acquisition results array with the greatest
correlation power and determines the code phase and carrier frequency
corresponding to that point. The Signal-to-Noise Ratio (SNR) of the peak is
also estimated.
Parameters
----------
freqs : iterable
List of frequencies mapping the results frequency index to a value in Hz.
results : :class:`numpy.ndarray`, shape(len(`freqs`), `samples_per_code`)
2D array containing correlation powers at different frequencies and code
phases. Code phase axis is in samples from zero to `samples_per_code`.
Returns
-------
out : (float, float, float)
| The tuple
| `(code_phase, carrier_freq, SNR)`
| Where `code_phase` is in chips, `carrier_freq` is in Hz and `SNR` is
(currently) in arbitrary units.
"""
# Find the results index of the maximum.
freq_index, cp_samples = np.unravel_index(results.argmax(),
results.shape)
if freq_index > 1 and freq_index < len(freqs)-1:
delta = self.interpolate(
results[freq_index-1][cp_samples],
results[freq_index][cp_samples],
results[freq_index+1][cp_samples],
interpolation
)
if delta > 0:
freq = freqs[freq_index] + (freqs[freq_index+1] - freqs[freq_index]) * delta
else:
freq = freqs[freq_index] - (freqs[freq_index-1] - freqs[freq_index]) * delta
else:
freq = freqs[freq_index]
code_phase = float(cp_samples) / self.samples_per_chip
# Calculate SNR for the peak.
snr = np.max(results) / np.mean(results)
return (code_phase, freq, snr)
def acquisition(self,
prns=range(32),
doppler_priors = None,
doppler_search = 7000,
doppler_step = None,
threshold=DEFAULT_THRESHOLD,
show_progress=True,
multi=True
):
"""
Perform an acquisition for a given list of PRNs.
Perform an acquisition for a given list of PRNs across a range of Doppler
frequencies.
This function returns :class:`AcquisitionResult` objects containing the
location of the acquisition peak for PRNs that have an acquisition
Signal-to-Noise ratio (SNR) greater than `threshold`.
This calls `acquire` to find the precise code phase and a carrier frequency
estimate to within `doppler_step` Hz and then uses interpolation to refine
the carrier frequency estimate.
Parameters
----------
prns : iterable, optional
List of PRNs to acquire. Default: 0..31 (0-indexed)
doppler_prior: list of floats, optional
List of expected Doppler frequencies in Hz (one per PRN). Search will be
centered about these. If None, will search around 0 for all PRNs.
doppler_search: float, optional
Maximum frequency away from doppler_prior to search. Default: 7000
doppler_step : float, optional
Doppler frequency step to use when performing the coarse Doppler
frequency search.
threshold : float, optional
Threshold SNR value for a satellite to be considered acquired.
show_progress : bool, optional
When `True` a progress bar will be printed showing acquisition status and
estimated time remaining.
Returns
-------
out : [AcquisitionResult]
A list of :class:`AcquisitionResult` objects, one per PRN in `prns`.
"""
logger.info("Acquisition starting")
from peregrine.parallel_processing import parmap
# If the Doppler step is not specified, compute it from the coarse
# acquisition length.
if doppler_step is None:
# TODO: Work out the best frequency bin spacing.
# This is slightly sub-optimal if power is split between two bins,
# perhaps you could peak fit or look at pairs of bins to get true peak
# magnitude.
doppler_step = self.sampling_freq / self.n_integrate
if doppler_priors is None:
doppler_priors = np.zeros_like(prns)
# If progressbar is not available, disable show_progress.
if show_progress and not _progressbar_available:
show_progress = False
logger.warning("show_progress = True but progressbar module not found.")
# Setup our progress bar if we need it
if show_progress and not multi:
widgets = [' Acquisition ',
progressbar.Attribute('prn', '(PRN: %02d)', '(PRN --)'), ' ',
progressbar.Percentage(), ' ',
progressbar.ETA(), ' ',
progressbar.Bar()]
pbar = progressbar.ProgressBar(widgets=widgets,
maxval=len(prns) *
(2 * doppler_search / doppler_step + 1))
pbar.start()
else:
pbar = None
def do_acq(n):
prn = prns[n]
doppler_prior = doppler_priors[n]
freqs = np.arange(doppler_prior - doppler_search,
doppler_prior + doppler_search, doppler_step) + self.IF
if pbar:
def progress_callback(freq_num, num_freqs):
pbar.update(n*len(freqs) + freq_num, attr={'prn': prn + 1})
else:
progress_callback = None
coarse_results = self.acquire(caCodes[prn], freqs,
progress_callback=progress_callback)
code_phase, carr_freq, snr = self.find_peak(freqs, coarse_results,
interpolation = 'gaussian')
# If the result is above the threshold, then we have acquired the
# satellite.
status = '-'
if (snr > threshold):
status = 'A'
# Save properties of the detected satellite signal
acq_result = AcquisitionResult(prn,
carr_freq,
carr_freq - self.IF,
code_phase,
snr,
status)
# If the acquisition was successful, log it
if (snr > threshold):
logger.debug("Acquired %s" % acq_result)
return acq_result
if multi:
acq_results = parmap(do_acq, range(len(prns)), show_progress=show_progress)
else:
acq_results = map(do_acq, range(len(prns)))
# Acquisition is finished
# Stop printing progress bar
if pbar:
pbar.finish()
logger.info("Acquisition finished")
acquired_prns = [ar.prn + 1 for ar in acq_results if ar.status == 'A']
logger.info("Acquired %d satellites, PRNs: %s.",
len(acquired_prns), acquired_prns)
return acq_results
def load_wisdom(self, wisdom_file=DEFAULT_WISDOM_FILE):
"""Load saved FFTW wisdom from file."""
with open(wisdom_file, 'rb') as f:
wisdom = cPickle.load(f)
pyfftw.import_wisdom(wisdom)
def save_wisdom(self, wisdom_file=DEFAULT_WISDOM_FILE):
"""Save FFTW wisdom to file."""
with open(wisdom_file, 'wb') as f:
cPickle.dump(pyfftw.export_wisdom(), f, protocol=cPickle.HIGHEST_PROTOCOL)
class AcquisitionResult:
"""
Stores the acquisition parameters of a single satellite.
Parameters
----------
prn : int
PRN of the satellite.
carr_freq : float
Carrier frequency in Hz.
doppler : float
Doppler frequency in Hz,
i.e. `carr_freq` - receiver intermediate frequency.
code_phase : float
Code phase in chips.
snr : float
Signal-to-Noise Ratio.
status : {'A', '-'}
The acquisition status of the satellite:
* `'A'` : The satellite has been successfully acquired.
* `'-'` : The acquisition was not successful, the SNR was below the
acquisition threshold.
"""
__slots__ = ('prn', 'carr_freq', 'doppler', 'code_phase', 'snr', 'status')
def __init__(self, prn, carr_freq, doppler, code_phase, snr, status):
self.prn = prn
self.snr = snr
self.carr_freq = carr_freq
self.doppler = doppler
self.code_phase = code_phase
self.status = status
def __str__(self):
return "PRN %2d SNR %6.2f @ CP %6.1f, %+8.2f Hz %s" % \
(self.prn + 1, self.snr, self.code_phase, self.doppler, self.status)
def __repr__(self):
return "<AcquisitionResult %s>" % self.__str__()
def save_acq_results(filename, acq_results):
"""
Save a set of acquisition results to a file.
Parameters
----------
filename : string
Filename to save acquisition results to.
acq_results : [:class:`AcquisitionResult`]
List of :class:`AcquisitionResult` objects to save.
"""
with open(filename, 'wb') as f:
cPickle.dump(acq_results, f, protocol=cPickle.HIGHEST_PROTOCOL)
def load_acq_results(filename):
"""
Load a set of acquisition results from a file.
Parameters
----------
filename : string
Filename to load acquisition results from.
Returns
-------
acq_results : [:class:`AcquisitionResult`]
List of :class:`AcquisitionResult` objects loaded from the file.
"""
with open(filename, 'rb') as f:
return cPickle.load(f)
def print_scores(acq_results, pred, pred_dopp = None):
if pred_dopp is None:
pred_dopp = np.zeros_like(pred)
print "PRN\tPred'd\tAcq'd\tError\tSNR"
n_match = 0
worst_dopp_err = 0
sum_dopp_err = 0
sum_abs_dopp_err = 0
for i, prn in enumerate(pred):
print "%2d\t%+6.0f" % (prn + 1, pred_dopp[i]),
if acq_results[i].status == 'A':
n_match += 1
dopp_err = acq_results[i].doppler - pred_dopp[i]
sum_dopp_err += dopp_err
sum_abs_dopp_err += abs(dopp_err)
if abs(dopp_err) > abs(worst_dopp_err):
worst_dopp_err = dopp_err
print "\t%+6.0f\t%+5.0f\t%5.1f" % (
acq_results[i].doppler, dopp_err, acq_results[i].snr)
else:
print
print "Found %d of %d, mean doppler error = %+5.0f Hz, mean abs err = %4.0f Hz, worst = %+5.0f Hz"\
% (n_match, len(pred),
sum_dopp_err/max(1, n_match), sum_abs_dopp_err/max(1, n_match), worst_dopp_err)
|
henryhallam/peregrine
|
peregrine/acquisition.py
|
Python
|
gpl-3.0
| 21,587
|
[
"Gaussian"
] |
fc1a3797ff6d548e1e01a8cbb826818954365c296e521be98f26d6c0c55af721
|
"""
Locals computes the value of locals()
"""
from pythran.passmanager import ModuleAnalysis
import pythran.metadata as md
import ast
class Locals(ModuleAnalysis):
"""
Statically compute the value of locals() before each statement
Yields a dictionary binding every node to the set of variable names defined
*before* this node.
Following snippet illustrates its behavior:
>>> import ast
>>> from pythran import passmanager
>>> pm = passmanager.PassManager('test')
>>> code = '''
... def b(n):
... m = n + 1
... def b(n):
... return n + 1
... return b(m)'''
>>> tree = ast.parse(code)
>>> l = pm.gather(Locals, tree)
>>> l[tree.body[0].body[0]]
set(['n'])
>>> l[tree.body[0].body[1]]
set(['b', 'm', 'n'])
"""
def __init__(self):
self.result = dict()
self.locals = set()
self.nesting = 0
super(Locals, self).__init__()
def generic_visit(self, node):
super(Locals, self).generic_visit(node)
if node not in self.result:
self.result[node] = self.result[self.expr_parent]
def store_and_visit(self, node):
self.expr_parent = node
self.result[node] = self.locals.copy()
self.generic_visit(node)
def visit_Module(self, node):
self.expr_parent = node
self.result[node] = self.locals
map(self.visit, node.body)
def visit_FunctionDef(self, node):
# special case for nested functions
if self.nesting:
self.locals.add(node.name)
self.nesting += 1
self.expr_parent = node
self.result[node] = self.locals.copy()
parent_locals = self.locals.copy()
map(self.visit, node.args.defaults)
self.locals.update(arg.id for arg in node.args.args)
map(self.visit, node.body)
self.locals = parent_locals
self.nesting -= 1
def visit_Assign(self, node):
self.expr_parent = node
self.result[node] = self.locals.copy()
md.visit(self, node)
self.visit(node.value)
self.locals.update(t.id for t in node.targets
if isinstance(t, ast.Name))
map(self.visit, node.targets)
def visit_For(self, node):
self.expr_parent = node
self.result[node] = self.locals.copy()
md.visit(self, node)
self.visit(node.iter)
self.locals.add(node.target.id)
map(self.visit, node.body)
map(self.visit, node.orelse)
def visit_Import(self, node):
self.result[node] = self.locals.copy()
self.locals.update(alias.name for alias in node.names)
def visit_ImportFrom(self, node):
self.result[node] = self.locals.copy()
self.locals.update(alias.name for alias in node.names)
def visit_ExceptHandler(self, node):
self.expr_parent = node
self.result[node] = self.locals.copy()
if node.name:
self.locals.add(node.name.id)
node.type and self.visit(node.type)
map(self.visit, node.body)
# statements that do not define a new variable
visit_Return = store_and_visit
visit_Yield = store_and_visit
visit_TryExcept = store_and_visit
visit_AugAssign = store_and_visit
visit_Print = store_and_visit
visit_While = store_and_visit
visit_If = store_and_visit
visit_Raise = store_and_visit
visit_Assert = store_and_visit
visit_Expr = store_and_visit
visit_Pass = store_and_visit
visit_Break = store_and_visit
visit_Continue = store_and_visit
|
hainm/pythran
|
pythran/analyses/locals_analysis.py
|
Python
|
bsd-3-clause
| 3,598
|
[
"VisIt"
] |
0076391b6056838a2708462217f174a638f79817b8be21fafa0d789c4d39fa39
|
# Copyright 2020 The gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Simple Stubs."""
# TODO(https://github.com/grpc/grpc/issues/21965): Run under setuptools.
import os
_MAXIMUM_CHANNELS = 10
os.environ["GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS"] = "1"
os.environ["GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM"] = str(_MAXIMUM_CHANNELS)
import contextlib
import datetime
import inspect
import logging
import unittest
import sys
import time
from typing import Callable, Optional
from tests.unit import test_common
import grpc
import grpc.experimental
_REQUEST = b"0000"
_CACHE_EPOCHS = 8
_CACHE_TRIALS = 6
_SERVER_RESPONSE_COUNT = 10
_CLIENT_REQUEST_COUNT = _SERVER_RESPONSE_COUNT
_STRESS_EPOCHS = _MAXIMUM_CHANNELS * 10
_UNARY_UNARY = "/test/UnaryUnary"
_UNARY_STREAM = "/test/UnaryStream"
_STREAM_UNARY = "/test/StreamUnary"
_STREAM_STREAM = "/test/StreamStream"
def _unary_unary_handler(request, context):
return request
def _unary_stream_handler(request, context):
for _ in range(_SERVER_RESPONSE_COUNT):
yield request
def _stream_unary_handler(request_iterator, context):
request = None
for single_request in request_iterator:
request = single_request
return request
def _stream_stream_handler(request_iterator, context):
for request in request_iterator:
yield request
class _GenericHandler(grpc.GenericRpcHandler):
def service(self, handler_call_details):
if handler_call_details.method == _UNARY_UNARY:
return grpc.unary_unary_rpc_method_handler(_unary_unary_handler)
elif handler_call_details.method == _UNARY_STREAM:
return grpc.unary_stream_rpc_method_handler(_unary_stream_handler)
elif handler_call_details.method == _STREAM_UNARY:
return grpc.stream_unary_rpc_method_handler(_stream_unary_handler)
elif handler_call_details.method == _STREAM_STREAM:
return grpc.stream_stream_rpc_method_handler(_stream_stream_handler)
else:
raise NotImplementedError()
def _time_invocation(to_time: Callable[[], None]) -> datetime.timedelta:
start = datetime.datetime.now()
to_time()
return datetime.datetime.now() - start
@contextlib.contextmanager
def _server(credentials: Optional[grpc.ServerCredentials]):
try:
server = test_common.test_server()
target = '[::]:0'
if credentials is None:
port = server.add_insecure_port(target)
else:
port = server.add_secure_port(target, credentials)
server.add_generic_rpc_handlers((_GenericHandler(),))
server.start()
yield port
finally:
server.stop(None)
class SimpleStubsTest(unittest.TestCase):
def assert_cached(self, to_check: Callable[[str], None]) -> None:
"""Asserts that a function caches intermediate data/state.
To be specific, given a function whose caching behavior is
deterministic in the value of a supplied string, this function asserts
that, on average, subsequent invocations of the function for a specific
string are faster than first invocations with that same string.
Args:
to_check: A function returning nothing, that caches values based on
an arbitrary supplied string.
"""
initial_runs = []
cached_runs = []
for epoch in range(_CACHE_EPOCHS):
runs = []
text = str(epoch)
for trial in range(_CACHE_TRIALS):
runs.append(_time_invocation(lambda: to_check(text)))
initial_runs.append(runs[0])
cached_runs.extend(runs[1:])
average_cold = sum((run for run in initial_runs),
datetime.timedelta()) / len(initial_runs)
average_warm = sum((run for run in cached_runs),
datetime.timedelta()) / len(cached_runs)
self.assertLess(average_warm, average_cold)
def assert_eventually(self,
predicate: Callable[[], bool],
*,
timeout: Optional[datetime.timedelta] = None,
message: Optional[Callable[[], str]] = None) -> None:
message = message or (lambda: "Proposition did not evaluate to true")
timeout = timeout or datetime.timedelta(seconds=10)
end = datetime.datetime.now() + timeout
while datetime.datetime.now() < end:
if predicate():
break
time.sleep(0.5)
else:
self.fail(message() + " after " + str(timeout))
def test_unary_unary_insecure(self):
with _server(None) as port:
target = f'localhost:{port}'
response = grpc.experimental.unary_unary(
_REQUEST,
target,
_UNARY_UNARY,
channel_credentials=grpc.experimental.
insecure_channel_credentials())
self.assertEqual(_REQUEST, response)
def test_unary_unary_secure(self):
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
response = grpc.experimental.unary_unary(
_REQUEST,
target,
_UNARY_UNARY,
channel_credentials=grpc.local_channel_credentials())
self.assertEqual(_REQUEST, response)
def test_channels_cached(self):
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
test_name = inspect.stack()[0][3]
args = (_REQUEST, target, _UNARY_UNARY)
kwargs = {"channel_credentials": grpc.local_channel_credentials()}
def _invoke(seed: str):
run_kwargs = dict(kwargs)
run_kwargs["options"] = ((test_name + seed, ""),)
grpc.experimental.unary_unary(*args, **run_kwargs)
self.assert_cached(_invoke)
def test_channels_evicted(self):
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
response = grpc.experimental.unary_unary(
_REQUEST,
target,
_UNARY_UNARY,
channel_credentials=grpc.local_channel_credentials())
self.assert_eventually(
lambda: grpc._simple_stubs.ChannelCache.get(
)._test_only_channel_count() == 0,
message=lambda:
f"{grpc._simple_stubs.ChannelCache.get()._test_only_channel_count()} remain"
)
def test_total_channels_enforced(self):
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
for i in range(_STRESS_EPOCHS):
# Ensure we get a new channel each time.
options = (("foo", str(i)),)
# Send messages at full blast.
grpc.experimental.unary_unary(
_REQUEST,
target,
_UNARY_UNARY,
options=options,
channel_credentials=grpc.local_channel_credentials())
self.assert_eventually(
lambda: grpc._simple_stubs.ChannelCache.get(
)._test_only_channel_count() <= _MAXIMUM_CHANNELS + 1,
message=lambda:
f"{grpc._simple_stubs.ChannelCache.get()._test_only_channel_count()} channels remain"
)
def test_unary_stream(self):
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
for response in grpc.experimental.unary_stream(
_REQUEST,
target,
_UNARY_STREAM,
channel_credentials=grpc.local_channel_credentials()):
self.assertEqual(_REQUEST, response)
def test_stream_unary(self):
def request_iter():
for _ in range(_CLIENT_REQUEST_COUNT):
yield _REQUEST
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
response = grpc.experimental.stream_unary(
request_iter(),
target,
_STREAM_UNARY,
channel_credentials=grpc.local_channel_credentials())
self.assertEqual(_REQUEST, response)
def test_stream_stream(self):
def request_iter():
for _ in range(_CLIENT_REQUEST_COUNT):
yield _REQUEST
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
for response in grpc.experimental.stream_stream(
request_iter(),
target,
_STREAM_STREAM,
channel_credentials=grpc.local_channel_credentials()):
self.assertEqual(_REQUEST, response)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
unittest.main(verbosity=2)
|
firebase/grpc-SwiftPM
|
src/python/grpcio_tests/tests_py3_only/unit/_simple_stubs_test.py
|
Python
|
apache-2.0
| 9,641
|
[
"BLAST"
] |
6ea62d039e5007a5a9800b674b11a5cee4483a191ec69e330c051072f550ca5f
|
'''
This is a scaled down version of clingo-dl show casing how to implement a
propagator for difference logic.
'''
from typing import List, Sequence, Optional, MutableMapping, Tuple, Set, cast
import heapq
import sys
from clingo import ast
from clingo.symbol import Function, Number, Symbol, SymbolType, Tuple_
from clingo.theory_atoms import TheoryTerm, TheoryTermType
from clingo.solving import Model
from clingo.propagator import Assignment, PropagateControl, PropagateInit, Propagator
from clingo.application import clingo_main, Application, ApplicationOptions
from clingo.control import Control
from clingo import SolveResult, parse_term
from clingo.ast import parse_files, AST, ProgramBuilder, Transformer
Node = Symbol # pylint: disable=invalid-name
Weight = int
Level = int
Edge = Tuple[Node, Node]
WeightedEdge = Tuple[Node, Node, Weight]
MapNodeWeight = MutableMapping[Node, Weight]
THEORY = """
#theory dl{
diff_term {
- : 3, unary;
** : 2, binary, right;
* : 1, binary, left;
/ : 1, binary, left;
\\ : 1, binary, left;
+ : 0, binary, left;
- : 0, binary, left
};
&diff/1 : diff_term, {<=}, diff_term, any
}.
"""
_BOP = {"+": lambda a, b: a + b,
"-": lambda a, b: a - b,
"*": lambda a, b: a * b,
"**": lambda a, b: a ** b,
"\\": lambda a, b: a % b,
"/": lambda a, b: a // b}
def _evaluate(term: TheoryTerm) -> Symbol:
'''
Evaluates the operators in a theory term in the same fashion as clingo
evaluates its arithmetic functions.
'''
# tuples
if term.type == TheoryTermType.Tuple:
return Tuple_([_evaluate(x) for x in term.arguments])
# functions and arithmetic operations
if term.type == TheoryTermType.Function:
# binary operations
if term.name in _BOP and len(term.arguments) == 2:
term_a = _evaluate(term.arguments[0])
term_b = _evaluate(term.arguments[1])
if term_a.type != SymbolType.Number or term_b.type != SymbolType.Number:
raise RuntimeError("Invalid Binary Operation")
if term.name in ("/", "\\") and term_b.number == 0:
raise RuntimeError("Division by Zero")
return Number(_BOP[term.name](term_a.number, term_b.number))
# unary operations
if term.name == "-" and len(term.arguments) == 1:
term_a = _evaluate(term.arguments[0])
if term_a.type == SymbolType.Number:
return Number(-term_a.number)
if term_a.type == SymbolType.Function and term_a.name:
return Function(term_a.name, term_a.arguments, not term_a.positive)
raise RuntimeError("Invalid Unary Operation")
# functions
return Function(term.name, [_evaluate(x) for x in term.arguments])
# constants
if term.type == TheoryTermType.Symbol:
return Function(term.name)
# numbers
if term.type == TheoryTermType.Number:
return Number(term.number)
raise RuntimeError("Invalid Syntax")
class HeadBodyTransformer(Transformer):
'''
Transformer to tag head and body occurrences of `&diff` atoms.
'''
def visit_Literal(self, lit: AST, in_lit: bool = False) -> AST:
'''
Visit literal; any theory atom in a literal is a body literal.
'''
return lit.update(**self.visit_children(lit, True))
def visit_TheoryAtom(self, atom: AST, in_lit: bool = False) -> AST:
'''
Visit theory atom and tag as given by in_lit.
'''
# pylint: disable=invalid-name,no-self-use
term = atom.term
if term.name == "diff" and not term.arguments:
loc = "body" if in_lit else "head"
atom.term = ast.Function(
term.location,
term.name,
[ast.Function(term.location, loc, [], False)], False)
return atom
class Graph:
'''
This class captures a graph with weighted edges that can be extended
incrementally.
Adding an edge triggers a cycle check that will report negative cycles.
'''
_potential: MapNodeWeight
_graph: MutableMapping[Node, MapNodeWeight]
_gamma: MapNodeWeight
_last_edges: MutableMapping[Node, WeightedEdge]
_previous_edge: MutableMapping[Level, MutableMapping[Edge, Weight]]
_previous_potential: MutableMapping[Level, MapNodeWeight]
def __init__(self):
self._potential = {} # {node: potential}
self._graph = {} # {node: {node : weight}}
self._gamma = {} # {node: gamma}
self._last_edges = {} # {node: edge}
self._previous_edge = {} # {level: {(node, node): weight}}
self._previous_potential = {} # {level: {node: potential}}
@staticmethod
def _set(level, key, val, previous, get_current):
p = previous.setdefault(level, {})
c, k = get_current(key)
if not key in p:
p[key] = c[k] if k in c else None
c[k] = val
@staticmethod
def _reset(level, previous, get_current):
if level in previous:
for key, val in previous[level].items():
c, k = get_current(key)
if val is None:
del c[k]
else:
c[k] = val
del previous[level]
def _reset_edge(self, level: Level):
self._reset(level, self._previous_edge, lambda key: (self._graph[key[0]], key[1]))
def _reset_potential(self, level: Level):
self._reset(level, self._previous_potential, lambda key: (self._potential, key))
def _set_edge(self, level: Level, key: Edge, val: Weight):
self._set(level, key, val, self._previous_edge, lambda key: (self._graph[key[0]], key[1]))
def _set_potential(self, level: Level, key: Node, val: Weight):
self._set(level, key, val, self._previous_potential, lambda key: (self._potential, key))
def add_edge(self, level: Level, edge: WeightedEdge) -> Optional[List[WeightedEdge]]:
'''
Add an edge to the graph and return a negative cycle (if there is one).
'''
u, v, d = edge
# If edge already exists from u to v with lower weight, new edge is redundant
if u in self._graph and v in self._graph[u] and self._graph[u][v] <= d:
return None
# Initialize potential and graph
if u not in self._potential:
self._set_potential(level, u, 0)
if v not in self._potential:
self._set_potential(level, v, 0)
self._graph.setdefault(u, {})
self._graph.setdefault(v, {})
changed: Set[Node] = set() # Set of nodes for which potential has been changed
min_gamma: List[Tuple[Weight, Node]] = []
# Update potential change induced by new edge, 0 for other nodes
self._gamma[u] = 0
self._gamma[v] = self._potential[u] + d - self._potential[v]
if self._gamma[v] < 0:
heapq.heappush(min_gamma, (self._gamma[v], v))
self._last_edges[v] = (u, v, d)
# Propagate negative potential change
while len(min_gamma) > 0 and self._gamma[u] == 0:
_, s = heapq.heappop(min_gamma)
if s not in changed:
self._set_potential(level, s, self._potential[s] + self._gamma[s])
self._gamma[s] = 0
changed.add(s)
for t in self._graph[s]:
if t not in changed:
gamma_t = self._potential[s] + self._graph[s][t] - self._potential[t]
if gamma_t < self._gamma[t]:
self._gamma[t] = gamma_t
heapq.heappush(min_gamma, (gamma_t, t))
self._last_edges[t] = (s, t, self._graph[s][t])
cycle = None
# Check if there is a negative cycle
if self._gamma[u] < 0:
cycle = []
x, y, c = self._last_edges[v]
cycle.append((x, y, c))
while v != x:
x, y, c = self._last_edges[x]
cycle.append((x, y, c))
else:
self._set_edge(level, (u, v), d)
# Ensure that all gamma values are zero
self._gamma[v] = 0
while len(min_gamma) > 0:
_, s = heapq.heappop(min_gamma)
self._gamma[s] = 0
return cycle
def get_assignment(self) -> List[Tuple[Node, Weight]]:
'''
Get the current assignment to integer variables.
'''
zero = Number(0)
adjust = self._potential[zero] if zero in self._potential else 0
return [(node, adjust - self._potential[node]) for node in self._potential if node != zero]
def backtrack(self, level):
'''
Backtrack the given level.
'''
self._reset_edge(level)
self._reset_potential(level)
class DLPropagator(Propagator):
'''
A propagator for difference constraints.
'''
_l2e: MutableMapping[int, List[WeightedEdge]]
_e2l: MutableMapping[WeightedEdge, List[int]]
_states: List[Graph]
def __init__(self):
self._l2e = {} # {literal: [(node, node, weight)]}
self._e2l = {} # {(node, node, weight): [literal]}
self._states = [] # [Graph]
def _add_edge(self, init: PropagateInit, lit: int, u: Node, v: Node, w: Weight):
edge = (u, v, w)
self._l2e.setdefault(lit, []).append(edge)
self._e2l.setdefault(edge, []).append(lit)
init.add_watch(lit)
def init(self, init: PropagateInit):
'''
Initialize the propagator extracting difference constraints from the
theory data.
'''
for atom in init.theory_atoms:
term = atom.term
if term.name == "diff" and len(term.arguments) == 1:
assert atom.guard is not None
u = _evaluate(atom.elements[0].terms[0].arguments[0])
v = _evaluate(atom.elements[0].terms[0].arguments[1])
w = _evaluate(atom.guard[1]).number
lit = init.solver_literal(atom.literal)
self._add_edge(init, lit, u, v, w)
if term.arguments[0].name == "body":
self._add_edge(init, -lit, v, u, -w - 1)
def propagate(self, control: PropagateControl, changes: Sequence[int]):
'''
Add edges that became true to the graph to check for negative cycles.
'''
state = self._state(control.thread_id)
level = control.assignment.decision_level
for lit in changes:
for edge in self._l2e[lit]:
cycle = state.add_edge(level, edge)
if cycle is not None:
c = [self._literal(control, e) for e in cycle]
if control.add_nogood(c):
control.propagate()
return
def undo(self, thread_id: int, assign: Assignment, changes: Sequence[int]):
'''
Backtrack the last decision level propagated.
'''
# pylint: disable=unused-argument
self._state(thread_id).backtrack(assign.decision_level)
def on_model(self, model: Model):
'''
This function should be called when a model has been found to extend it
with the integer variable assignments.
'''
assignment = self._state(model.thread_id).get_assignment()
model.extend([Function("dl", [var, Number(value)]) for var, value in assignment])
def _state(self, thread_id: int) -> Graph:
while len(self._states) <= thread_id:
self._states.append(Graph())
return self._states[thread_id]
def _literal(self, control, edge):
for lit in self._e2l[edge]:
if control.assignment.is_true(lit):
return lit
raise RuntimeError('must not happen')
class DLApp(Application):
'''
Application extending clingo with difference constraints.
'''
program_name: str = "clingo-dl"
version: str = "1.0"
_propagator: DLPropagator
_minimize: Optional[Symbol]
_bound: Optional[int]
def __init__(self):
self._propagator = DLPropagator()
self._minimize = None
self._bound = None
def _parse_minimize(self, val):
var = parse_term(val)
if var.type == SymbolType.Number:
return False
self._minimize = var
return True
def register_options(self, options: ApplicationOptions):
'''
Register application options.
'''
group = 'Clingo.DL Options'
options.add(group, 'minimize-variable', 'Minimize the given variable', self._parse_minimize, argument="<var>")
def _read(self, path: str):
if path == "-":
return sys.stdin.read()
with open(path) as file_:
return file_.read()
def _rewrite(self, ctl: Control, files: Sequence[str]):
with ProgramBuilder(ctl) as bld:
hbt = HeadBodyTransformer()
parse_files(
files,
lambda stm: bld.add(cast(AST, hbt.visit(stm))))
def _on_model(self, model: Model):
self._propagator.on_model(model)
for symbol in model.symbols(theory=True):
if symbol.match("dl", 2):
n, v = symbol.arguments
if n == self._minimize:
self._bound = v.number
break
def main(self, ctl: Control, files: Sequence[str]):
'''
Register the difference constraint propagator, and then ground and
solve.
'''
ctl.register_propagator(self._propagator)
ctl.add("base", [], THEORY)
if not files:
files = ["-"]
self._rewrite(ctl, files)
ctl.ground([("base", [])])
if self._minimize is None:
ctl.solve(on_model=self._propagator.on_model)
else:
ctl.add("bound", ["b", "v"], "&diff(head) { v-0 } <= b.")
while cast(SolveResult, ctl.solve(on_model=self._on_model)).satisfiable:
print("Found new bound: {}".format(self._bound))
if self._bound is None:
break
ctl.ground([("bound", [Number(cast(int, self._bound) - 1), self._minimize])])
if self._bound is not None:
print("Optimum found")
if __name__ == "__main__":
sys.exit(int(clingo_main(DLApp(), sys.argv[1:])))
|
potassco/clingo
|
examples/clingo/dl/app.py
|
Python
|
mit
| 14,545
|
[
"VisIt"
] |
ec3fc5082ef6b39aa7633e19fec79f025a1fd09c3de8b118264e976a42e82088
|
# (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.fileformats.netcdf._load_cube` function."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import netCDF4
import numpy as np
from iris.coords import DimCoord
import iris.fileformats.cf
from iris.fileformats.netcdf import _load_cube
from iris.tests import mock
class TestCoordAttributes(tests.IrisTest):
@staticmethod
def _patcher(engine, cf, cf_group):
coordinates = []
for coord in cf_group:
engine.cube.add_aux_coord(coord)
coordinates.append((coord, coord.name()))
engine.provides['coordinates'] = coordinates
def setUp(self):
this = 'iris.fileformats.netcdf._assert_case_specific_facts'
patch = mock.patch(this, side_effect=self._patcher)
patch.start()
self.addCleanup(patch.stop)
self.engine = mock.Mock()
self.filename = 'DUMMY'
self.flag_masks = mock.sentinel.flag_masks
self.flag_meanings = mock.sentinel.flag_meanings
self.flag_values = mock.sentinel.flag_values
self.valid_range = mock.sentinel.valid_range
self.valid_min = mock.sentinel.valid_min
self.valid_max = mock.sentinel.valid_max
def _make(self, names, attrs):
coords = [DimCoord(i, long_name=name) for i, name in enumerate(names)]
cf_group = {}
for name, cf_attrs in zip(names, attrs):
cf_attrs_unused = mock.Mock(return_value=cf_attrs)
cf_group[name] = mock.Mock(cf_attrs_unused=cf_attrs_unused)
cf = mock.Mock(cf_group=cf_group)
cf_var = mock.MagicMock(spec=iris.fileformats.cf.CFVariable,
dtype=np.dtype('i4'),
cf_data=mock.Mock(_FillValue=None),
cf_name='DUMMY_VAR',
cf_group=coords,
shape=(1,))
return cf, cf_var
def test_flag_pass_thru(self):
items = [('masks', 'flag_masks', self.flag_masks),
('meanings', 'flag_meanings', self.flag_meanings),
('values', 'flag_values', self.flag_values)]
for name, attr, value in items:
names = [name]
attrs = [[(attr, value)]]
cf, cf_var = self._make(names, attrs)
cube = _load_cube(self.engine, cf, cf_var, self.filename)
self.assertEqual(len(cube.coords(name)), 1)
coord = cube.coord(name)
self.assertEqual(len(coord.attributes), 1)
self.assertEqual(list(coord.attributes.keys()), [attr])
self.assertEqual(list(coord.attributes.values()), [value])
def test_flag_pass_thru_multi(self):
names = ['masks', 'meanings', 'values']
attrs = [[('flag_masks', self.flag_masks),
('wibble', 'wibble')],
[('flag_meanings', self.flag_meanings),
('add_offset', 'add_offset')],
[('flag_values', self.flag_values)],
[('valid_range', self.valid_range)],
[('valid_min', self.valid_min)],
[('valid_max', self.valid_max)]]
cf, cf_var = self._make(names, attrs)
cube = _load_cube(self.engine, cf, cf_var, self.filename)
self.assertEqual(len(cube.coords()), 3)
self.assertEqual(set([c.name() for c in cube.coords()]), set(names))
expected = [attrs[0],
[attrs[1][0]],
attrs[2],
attrs[3],
attrs[4],
attrs[5]]
for name, expect in zip(names, expected):
attributes = cube.coord(name).attributes
self.assertEqual(set(attributes.items()), set(expect))
class TestCubeAttributes(tests.IrisTest):
def setUp(self):
this = 'iris.fileformats.netcdf._assert_case_specific_facts'
patch = mock.patch(this)
patch.start()
self.addCleanup(patch.stop)
self.engine = mock.Mock()
self.cf = None
self.filename = 'DUMMY'
self.flag_masks = mock.sentinel.flag_masks
self.flag_meanings = mock.sentinel.flag_meanings
self.flag_values = mock.sentinel.flag_values
self.valid_range = mock.sentinel.valid_range
self.valid_min = mock.sentinel.valid_min
self.valid_max = mock.sentinel.valid_max
def _make(self, attrs):
cf_attrs_unused = mock.Mock(return_value=attrs)
cf_var = mock.MagicMock(spec=iris.fileformats.cf.CFVariable,
dtype=np.dtype('i4'),
cf_data=mock.Mock(_FillValue=None),
cf_name='DUMMY_VAR',
cf_group=mock.Mock(),
cf_attrs_unused=cf_attrs_unused,
shape=(1,))
return cf_var
def test_flag_pass_thru(self):
attrs = [('flag_masks', self.flag_masks),
('flag_meanings', self.flag_meanings),
('flag_values', self.flag_values)]
for key, value in attrs:
cf_var = self._make([(key, value)])
cube = _load_cube(self.engine, self.cf, cf_var, self.filename)
self.assertEqual(len(cube.attributes), 1)
self.assertEqual(list(cube.attributes.keys()), [key])
self.assertEqual(list(cube.attributes.values()), [value])
def test_flag_pass_thru_multi(self):
attrs = [('flag_masks', self.flag_masks),
('wibble', 'wobble'),
('flag_meanings', self.flag_meanings),
('add_offset', 'add_offset'),
('flag_values', self.flag_values),
('standard_name', 'air_temperature'),
('valid_range', self.valid_range),
('valid_min', self.valid_min),
('valid_max', self.valid_max)]
# Expect everything from above to be returned except those
# corresponding to exclude_ind.
expected = set([attrs[ind] for ind in [0, 1, 2, 4, 6, 7, 8]])
cf_var = self._make(attrs)
cube = _load_cube(self.engine, self.cf, cf_var, self.filename)
self.assertEqual(len(cube.attributes), len(expected))
self.assertEqual(set(cube.attributes.items()), expected)
if __name__ == "__main__":
tests.main()
|
LukeC92/iris
|
lib/iris/tests/unit/fileformats/netcdf/test__load_cube.py
|
Python
|
lgpl-3.0
| 7,296
|
[
"NetCDF"
] |
4b62be05ffb74aaa24781bb1e6717a3f48163abebec961489fd1673576e6aba0
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @package parse_blast.py
# @author Lea Picard
import sys, os, subprocess
from Bio import Entrez, SeqIO
from collections import defaultdict
def dict2fasta(dico):
"""
Function that takes a dictionary with key are ID and value Seq, and returns a fasta string.
:param dico: a python dictionary
:type dico: dict()
:rtype: fasta str()
:return: string with format fasta
Example:
>>> dico = {"Seq1":"ATGCTGCAGTAG","Seq2":"ATGCCGATCGATG","Seq3":"ATGCTCAGTCAGTAG"}
>>> dict2fasta(dico)
>Seq1
ATGCTGCAGTAG
>Seq2
ATGCCGATCGATG
>Seq3
ATGCTCAGTCAGTAG
"""
txtoutput = ""
for key, value in dico.items():
txtoutput += ">%s\n%s\n" % (str(key),str(value))
return txtoutput
Entrez.email = "lea.picard@ens-lyon.fr"
Entrez.tool = "LeaPpipeline"
blastRes = "/home/lea/Documents/genes/21genes/results_test_outfmt.tsv"
# split blast results by gene
with open(blastRes, "r") as blast:
listBlastRes = blast.read().strip().split('# BLASTN 2.2.31+')
dGene2TaxAcc = defaultdict(dict) # dict of dict d[gene][tax] = acc
dTax2Acc = {} # dict keys = taxID, values = accession for this tax
dAcc2Seq = {} # dict keys = accession, values = cds sequence for the accession
dGene2Out = {} # dict keys = gene
outDir = "/".join(blastRes.split("/")[0:-1])
geneName = ""
print(outDir)
# for each gene result in blast results
for geneRes in listBlastRes:
if geneRes == "":
next
else:
for hit in geneRes.split("\n"):
# get gene name from results header, create output file name and initialize dGene2TaxAcc for each gene
if hit.startswith("# Query"):
geneName = hit.split(" ")[2].split("|")[1]
dGene2Out[geneName] = outDir+"/"+geneName+".fasta"
dGene2TaxAcc[geneName] = {}
# skip other lines in the header
elif hit.startswith("#") and not hit.startswith("# Query") or hit == "":
next
# parse hits (get gene accession ID and taxID)
else:
cols = hit.split("\t")
geneAcc = cols[1].split("|")[3]
taxID = cols[2]
# if taxon identifier has not been encountered yet
# add it to the dGene2TaxAcc of the current gene and associate it to corresponding accession
if taxID not in dGene2TaxAcc[geneName].keys():
dGene2TaxAcc[geneName][taxID] = geneAcc
# for each gene, get accessions associated to unique taxon identifier
for gene in dGene2TaxAcc.keys():
print("%s: %i sequences" %(gene, len(dGene2TaxAcc[gene].keys())))
i = 1
# for each accession, get genbank file from Entrez
for acc in dGene2TaxAcc[gene].values():
# skip sequence LT160002.1 (script stuck otherwise)
if acc.startswith("LT"):
print("skipping problematic sequence for SAMD9L")
next
else:
print("%s" %acc)
i = i + 1
outfile = blastRes.split(".")[0]+"_accessions_"+gene+".txt"
with open(outfile, "w") as out:
i = 1
for key in dGene2TaxAcc[gene].keys():
out.write(dGene2TaxAcc[gene][key]+"\n")
#out.write(acc+"\n")
"""
handle = Entrez.efetch(db="nucleotide", id=acc, rettype="gb", retmode="text")
record = SeqIO.read(handle, "gb")
handle.close()
# get species name and formate it so that it looks like homSap panPan etc
recordSp = record.annotations["source"].split(" ")[0].lower()[:3]+record.annotations["source"].split(" ")[1].title()[:3]
# check if CDS is described in genbank file
for feature in record.features:
if feature.type == "CDS":
# get start and stop position then CDS
start = feature.location.start
end = feature.location.end
quals = feature.qualifiers.get('db_xref')
seq = record.seq[start:end]
if not quals:
next
# check if there is a geneID (might use later for another filter of duplicate sequences)
else:
for qual in quals:
if qual.startswith("GeneID"):
geneID = qual.split(":")[-1]
else:
seq = record.seq
# fill dict[fasta sequence identifier like homSap|NM_012345] = fasta (CDS) sequence)
seqID = recordSp+"|"+acc
dAcc2Seq[seqID] = str(seq)
print(len(dAcc2Seq.keys()))
# print dAcc2Seq in a output file in a fasta format
with open(dGene2Out[gene], "w") as geneOut:
print("opened %s" %dGene2Out[gene])
geneOut.write(dict2fasta(dAcc2Seq))
# reinitialize dAcc2Seq so that sequences from other genes are not appended to the previous gene
dAcc2Seq = {}
"""
|
leapicard/pipeline
|
parse_blast.py
|
Python
|
gpl-3.0
| 4,316
|
[
"BLAST"
] |
cc7d50115771158bb110d3310a3c8b4ab1c24011c0c6445cbad622f7b7dab74b
|
# Version: 0.15
"""
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.org/project/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
First, decide on values for the following configuration variables:
* `VCS`: the version control system you use. Currently accepts "git".
* `style`: the style of version string to be produced. See "Styles" below for
details. Defaults to "pep440", which looks like
`TAG[+DISTANCE.gSHORTHASH[.dirty]]`.
* `versionfile_source`:
A project-relative pathname into which the generated version strings should
be written. This is usually a `_version.py` next to your project's main
`__init__.py` file, so it can be imported at runtime. If your project uses
`src/myproject/__init__.py`, this should be `src/myproject/_version.py`.
This file should be checked in to your VCS as usual: the copy created below
by `setup.py setup_versioneer` will include code that parses expanded VCS
keywords in generated tarballs. The 'build' and 'sdist' commands will
replace it with a copy that has just the calculated version string.
This must be set even if your project does not have any modules (and will
therefore never import `_version.py`), since "setup.py sdist" -based trees
still need somewhere to record the pre-calculated version strings. Anywhere
in the source tree should do. If there is a `__init__.py` next to your
`_version.py`, the `setup.py setup_versioneer` command (described below)
will append some `__version__`-setting assignments, if they aren't already
present.
* `versionfile_build`:
Like `versionfile_source`, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
then you will probably have `versionfile_build='myproject/_version.py'` and
`versionfile_source='src/myproject/_version.py'`.
If this is set to None, then `setup.py build` will not attempt to rewrite
any `_version.py` in the built tree. If your project does not have any
libraries (e.g. if it only builds a script), then you should use
`versionfile_build = None` and override `distutils.command.build_scripts`
to explicitly insert a copy of `versioneer.get_version()` into your
generated script.
* `tag_prefix`:
a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
If your tags look like 'myproject-1.2.0', then you should use
tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
should be an empty string.
* `parentdir_prefix`:
a optional string, frequently the same as tag_prefix, which appears at the
start of all unpacked tarball filenames. If your tarball unpacks into
'myproject-1.2.0', this should be 'myproject-'. To disable this feature,
just omit the field from your `setup.cfg`.
This tool provides one script, named `versioneer`. That script has one mode,
"install", which writes a copy of `versioneer.py` into the current directory
and runs `versioneer.py setup` to finish the installation.
To versioneer-enable your project:
* 1: Modify your `setup.cfg`, adding a section named `[versioneer]` and
populating it with the configuration values you decided earlier (note that
the option names are not case-sensitive):
````
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix = ""
parentdir_prefix = myproject-
````
* 2: Run `versioneer install`. This will do the following:
* copy `versioneer.py` into the top of your source tree
* create `_version.py` in the right place (`versionfile_source`)
* modify your `__init__.py` (if one exists next to `_version.py`) to define
`__version__` (by calling a function from `_version.py`)
* modify your `MANIFEST.in` to include both `versioneer.py` and the
generated `_version.py` in sdist tarballs
`versioneer install` will complain about any problems it finds with your
`setup.py` or `setup.cfg`. Run it multiple times until you have fixed all
the problems.
* 3: add a `import versioneer` to your setup.py, and add the following
arguments to the setup() call:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
* 4: commit these changes to your VCS. To make sure you won't forget,
`versioneer install` will mark everything it touched for addition using
`git add`. Don't forget to add `setup.py` and `setup.cfg` too.
## Post-Installation Usage
Once established, all uses of your tree from a VCS checkout should get the
current version string. All generated tarballs should include an embedded
version string (so users who unpack them will not need a VCS tool installed).
If you distribute your project through PyPI, then the release process should
boil down to two steps:
* 1: git tag 1.0
* 2: python setup.py register sdist upload
If you distribute it through github (i.e. users use github to generate
tarballs with `git archive`), the process is:
* 1: git tag 1.0
* 2: git push; git push --tags
Versioneer will report "0+untagged.NUMCOMMITS.gHASH" until your tree has at
least one tag in its history.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See details.md in the Versioneer source tree for
descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
### Upgrading to 0.15
Starting with this version, Versioneer is configured with a `[versioneer]`
section in your `setup.cfg` file. Earlier versions required the `setup.py` to
set attributes on the `versioneer` module immediately after import. The new
version will refuse to run (raising an exception during import) until you
have provided the necessary `setup.cfg` section.
In addition, the Versioneer package provides an executable named
`versioneer`, and the installation process is driven by running `versioneer
install`. In 0.14 and earlier, the executable was named
`versioneer-installer` and was run without an argument.
### Upgrading to 0.14
0.14 changes the format of the version string. 0.13 and earlier used
hyphen-separated strings like "0.11-2-g1076c97-dirty". 0.14 and beyond use a
plus-separated "local version" section strings, with dot-separated
components, like "0.11+2.g1076c97". PEP440-strict tools did not like the old
format, but should be ok with the new one.
### Upgrading from 0.11 to 0.12
Nothing special.
### Upgrading from 0.10 to 0.11
You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running
`setup.py setup_versioneer`. This will enable the use of additional
version-control systems (SVN, etc) in the future.
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is hereby released into the
public domain. The `_version.py` that it creates is also in the public
domain.
"""
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig(object):
pass
def get_root():
# we require that all commands are run from the project root, i.e. the
# directory that contains setup.py, setup.cfg, and versioneer.py .
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
pass
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
LONG_VERSION_PY['git'] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with "
"prefix '%%s'" %% (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.15) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
from warnings import catch_warnings
with catch_warnings(record=True):
import json
import sys
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
class VersioneerBadRootError(Exception):
pass
def get_versions(verbose=False):
# returns dict with two keys: 'version' and 'full'
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version"}
def get_version():
return get_versions()["version"]
def get_cmdclass():
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix = ""
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
|
cbertinato/pandas
|
versioneer.py
|
Python
|
bsd-3-clause
| 62,487
|
[
"Brian"
] |
1fa0c2905a517714fc8dbef75142d5cde4ddc3d129f55e9ce8c00b704432f7c3
|
#!/usr/bin/env python
import argparse
import csv
from . import orca_parser
from .copper_imidazole_analysis import CopperImidazoleAnalysis
cia = CopperImidazoleAnalysis()
parser = argparse.ArgumentParser(description="Given pathnames of ORCA output files, make a dump of certain spectroscopic parameters to a CSV file.")
parser.add_argument("--csvname", dest="csvname", metavar="<CSV output root name>", type=str, default="output.csv", help="optional name for the CSV output file")
parser.add_argument(dest="namelist", metavar="<ORCA filename>", nargs="+", type=str, default=None, help="ORCA output files")
args = parser.parse_args()
namelist = args.namelist
with open(args.csvname, 'wb') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
csvwriter.writerow(["g-tensor",
"id_copper",
"A_copper",
"euler_copper",
"nqcc_copper",
"id_nitrogen_far",
"A_nitrogen_far",
"euler_nitrogen_far",
"nqcc_nitrogen_far",
"filename"])
for name in namelist:
orcafile = orca_parser.ORCAOutputParser(name)
gtensor, giso = orcafile.return_gtensor()
id_cu = cia.copper_id(orcafile)
id_far = cia.nitrogen_far_id(orcafile)
atensor_cu = cia.hyperfine(orcafile, id_cu)
atensor_far = cia.hyperfine(orcafile, id_far)
euler_cu = cia.euler(orcafile, id_cu)
euler_far = cia.euler(orcafile, id_far)
nqi_cu, nqcc_cu, eta_cu = cia.nqi(orcafile, id_cu)
nqi_far, nqcc_far, eta_far = cia.nqi(orcafile, id_far)
csvwriter.writerow([gtensor,
id_cu,
atensor_cu,
euler_cu,
nqcc_cu,
id_far,
atensor_far,
euler_far,
nqcc_far,
name])
|
berquist/orcaparse
|
scripts/copper_imidazole_csv.py
|
Python
|
mpl-2.0
| 2,075
|
[
"ORCA"
] |
b69c3b1ecafdf2406977fbd1cd1859dd53a3113c70db4ad639cba1c9249a6108
|
#!/usr/bin/env python
# getgeneseqfromgenbank.py
# gets the name and sequence of gene(s) from a Genbank file
# Copyright (C) 2007 C. Jayakumar
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This script takes a Genbank file as input, and reads the feature list
# for any gene features. It collects information about the gene and
# the sequence of the gene and prints them out to the given outputfile.
# it prints to the output file in the following comma-delimited format
# /gene,/note,/locus_tag,sequence
# It uses the Biopython module (www.biopython.org).
# The GenBank sequence file should be in the same directory as the script.
# What you need to have:
# 1. Python 2.5.1
# 2. BioPython 1.43.2
# How to run the script:
# run 'python <script filename> <inputfile e.g sequence file> <outputfile>'
# Caveats
# if the <outputfile> already exists, it is OVERWRITTEN.
# What it does
# it searches the 'features' in a GenBank file for the 'gene' feature.
# Once a 'gene' feature is found, it looks for the required information
# It also looks for the gene location and gets the actual gene from the whole
# gene sequence present in the GenBank file, after performing any complement
# (with reversing) operation on the sequence
# print the license
print '''
getgeneseqfromgenbank.py
gets the name and sequence of gene(s) from a Genbank file
Copyright (C) 2007 C. Jayakumar
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it
under certain conditions. See the GNU General Public License
for more details at <http://www.gnu.org/licenses/>.
'''
import sys
from os.path import exists
# verify that the proper number of arguments are passed-in
if len(sys.argv) != 3:
print "Usage: getgeneseqfromgenbank.py <inputfile> <outputfile>"
quit()
# verify that the input file is valid
inputfile = sys.argv[1]
if not exists(inputfile):
print "Error: %s does not exist or cannot be read." % inputfile
quit()
outputfile = sys.argv[2]
outputfilehandle = open(sys.argv[2], "w")
# print the start
print "Getting the gene information and appending to the outputfile..."
# get the sequence
from Bio import SeqIO
for seq_record in SeqIO.parse(open(inputfile, "r"), "genbank") :
genomeseq = seq_record.seq
# get the feature list
from Bio import GenBank
feature_parser = GenBank.FeatureParser()
gb_record = feature_parser.parse(open(inputfile, "r"))
# get and print to the output file, the required information of each gene
for featur in gb_record.features:
if featur.type == 'gene':
if featur.strand == 1:
geneseq = (genomeseq[featur.location.nofuzzy_start:featur.location.nofuzzy_end]).tostring()
start = featur.location.nofuzzy_start
end = featur.location.nofuzzy_end
sense = "sense"
elif featur.strand == -1:
geneseq = ((genomeseq[featur.location.nofuzzy_start:featur.location.nofuzzy_end]).reverse_complement()).tostring()
start = featur.location.nofuzzy_end
end = featur.location.nofuzzy_start
sense = "anti-sense"
if featur.qualifiers.has_key('gene'):
genename = featur.qualifiers['gene']
else:
genename = ''
if featur.qualifiers.has_key('note'):
genenote = featur.qualifiers['note']
else:
genenote = ''
if featur.qualifiers.has_key('locus_tag'):
genelocustag = featur.qualifiers['locus_tag']
else:
genelocustag = ''
# print to the output file in the following comma-delimited format
# /gene,/note,/locus_tag,sequence
# should probably use the CSV module here
print >> outputfilehandle, '%s~%s~%s~%i~%i~%s~%s' % (genename, genenote, genelocustag, start, end, sense, geneseq)
# close all open resources
outputfilehandle.close()
# print the completion
print 'Done.'
|
C-Jay-Kumar/script-tools
|
python/getgeneseqfromgenbank.py
|
Python
|
gpl-3.0
| 4,560
|
[
"Biopython"
] |
91c08f778d652af6423e8d5880f0d28ff5c872881e37bc636d2983d17a7efdd3
|
# -*- coding: utf-8 -*-
# !/bin/env python
# In this file we generate all kind of data: real one, random one, ...
import random
import math
from optimizedGPS.structure import GPSGraph
from optimizedGPS.structure import GraphMLParser
from optimizedGPS.structure import Driver
from optimizedGPS.structure import DriversGraph
def generate_grid_data(length=5, width=5, **kwargs):
"""
build a grid where the top-links node has name node_0_0
from a given node, the reachable nodes are the one to the right and at the bottom
:param length: number of nodes + 1 from links to right
:param width: number of nodes + 1 from top to bottom
:param kwargs: further options
:return: An GPSGraph instance
"""
graph = GPSGraph(name=kwargs.get('graph_name') or 'graph-%s-%s' % (length, width))
for i in range(length):
for j in range(width):
source = 'n_%s_%s' % (i, j)
graph.add_node(source)
if i < length - 1:
target = 'n_%s_%s' % (i + 1, j)
graph.add_node(target)
graph.add_edge(source, target, distance=1.0)
if j < width - 1:
target = 'n_%s_%s' % (i, j + 1)
graph.add_node(target)
graph.add_edge(source, target, distance=1.0)
else:
if j < width - 1:
target = 'n_%s_%s' % (i, j + 1)
graph.add_node(target)
graph.add_edge(source, target, distance=1.0)
return graph
def generate_graph_from_file(file_loc, **kwargs):
"""
Use the GraphMLParser to extract the graph from the given file
:param file_loc: path to the file
:param kwargs: further options
:return: a GPSGraph instance
"""
if file_loc.endswith('.graphml'):
return GraphMLParser().parse(file_loc, **kwargs)
return None
def generate_random_drivers(graph, total_drivers=10, av_drivers=3, seed=None):
"""
Given a graph, we add total_drivers drivers as following:
- we take uniformly at random a source node and a reachable target node from source node
- we generate a random number of drivers taking using a gaussian center on av_drivers with 1 as variance
- we add these generated drivers to the graph, until we reach total_drivers
- If we get too many drivers, among the last added drivers, we remove some of them to get exactly the numberw
we want
:param graph: A GPSGraph instance
:param total_drivers: int
:param av_drivers: float
:param seed: see random package
:return: None
"""
drivers_graph = DriversGraph()
random.seed(seed)
total = total_drivers
while total > 0:
# Pick a random start node and a random end node different of the start node
start = graph.get_random_node(starting_node=True, seed=seed)
end = graph.get_random_node(random_walk_start=start, seed=seed)
# add some drivers from start to end
nb = max(random.gauss(av_drivers, 1.), 0.0)
nb = int(min(nb, total))
for n in range(nb):
starting_time = random.randint(0, int(math.log(nb)) + 1)
drivers_graph.add_driver(Driver(start, end, starting_time))
total -= nb
return drivers_graph
def generate_test_graph(length=2, width=3):
"""
Generate a grid graph with the given arguments
Generate always the same drivers from nodes 'n_0_0', 'n_1_0' and 'n_0_1' to final node
:param length: see generate_grid_data
:param width: see generate_grid_data
:return: a GPSGraph instance
"""
graph = generate_grid_data(length=length, width=width, graph_name='graph-%s-%s-test' % (length, width))
drivers_graph = DriversGraph()
drivers_graph.add_driver(Driver('n_0_0', 'n_%s_%s' % (length - 1, width - 1), 0))
drivers_graph.add_driver(Driver('n_0_0', 'n_%s_%s' % (length - 1, width - 1), 1))
drivers_graph.add_driver(Driver('n_0_0', 'n_%s_%s' % (length - 1, width - 1), 1))
drivers_graph.add_driver(Driver('n_0_1', 'n_%s_%s' % (length - 1, width - 1), 0))
drivers_graph.add_driver(Driver('n_0_1', 'n_%s_%s' % (length - 1, width - 1), 2))
drivers_graph.add_driver(Driver('n_1_0', 'n_%s_%s' % (length - 1, width - 1), 0))
drivers_graph.add_driver(Driver('n_1_0', 'n_%s_%s' % (length - 1, width - 1), 1))
drivers_graph.add_driver(Driver('n_1_0', 'n_%s_%s' % (length - 1, width - 1), 2))
return graph, drivers_graph
def generate_grid_graph_random_driver(length=2, width=3, nb_drivers=10):
"""
Generate a grid graph with the given length and width
Generate random drivers using generate_random_drivers function
:param length: see generate_grid_data
:param width: see generate_grid_data
:param nb_drivers: see generate_random_drivers. Coresponds to total_drivers argument
:return:
"""
name = 'grid-graph-%s-%s-%s' % (length, width, nb_drivers)
graph = generate_grid_data(length=length, width=width, graph_name=name)
drivers_graph = generate_random_drivers(graph, total_drivers=nb_drivers)
return graph, drivers_graph
def generate_bad_heuristic_graphs(traffic_influence=2, annex_road_congestion=0, number_drivers_group=1):
"""
:param traffic_influence: on the main road (0 -> 1), the congestion
:param annex_road_congestion: on annex roads (0 -> 1 and 0 -> 2), the difference between their congestion
and the traffic influence
:param number_drivers_group: we have a schema for the drivers to add in order to get a bad heuristic.
We repeat this schema as many times as number_drivers_group
:return:
"""
graph = GPSGraph(name="bad_heuristic_graph:traffic_influence=%s:annex_road_length=%s"
% (traffic_influence, annex_road_congestion))
graph.add_edge("0", "1", congestion_func=lambda x: traffic_influence)
graph.add_edge("0", "2", congestion_func=lambda x: (traffic_influence + annex_road_congestion) * x + 2)
graph.add_edge("1", "3", congestion_func=lambda x: 2)
graph.add_edge("2", "3", congestion_func=lambda x: 1)
graph.add_edge("3", "2", congestion_func=lambda x: traffic_influence + annex_road_congestion)
drivers_graph = DriversGraph()
for _ in xrange(number_drivers_group):
drivers_graph.add_driver(Driver("0", "3", 0))
drivers_graph.add_driver(Driver("0", "3", 0))
drivers_graph.add_driver(Driver("0", "2", 1))
return graph, drivers_graph
|
mickael-grima/optimizedGPS
|
optimizedGPS/data/data_generator.py
|
Python
|
apache-2.0
| 6,547
|
[
"Gaussian"
] |
4a62c51574374668d06954b096da2156cf43cd68e8c990da15728b721a5da18e
|
# encoding=utf8
#!/usr/bin/env python
import urllib2
import json
import re
from bs4 import BeautifulSoup
from nltk.tokenize import sent_tokenize
from socket import error as SocketError
#
# checks whether the first argument is the same word as a plural string, checking plurals
#
def equalCheckingPlurals(string, pluralString):
# only check plurals if first 3 letters match
if string[0] != pluralString[0]:
return None
if len(string) > 1 and len(pluralString) > 1 and string[1] != pluralString[1]:
return None
if len(string) > 2 and len(pluralString) > 2 and string[2] != pluralString[2]:
return None
# check all possible plurals of string
if string == pluralString or string + "s" == pluralString or string + "es" == pluralString or string[:-1] + "ies" == pluralString or string[:-1] + "ves" == pluralString:
return pluralString
return None
#
# checks whether the first argument matches a string in a list of plurals, checking plurals
#
def inCheckingPlurals(string, pluralList):
for pluralString in pluralList:
if equalCheckingPlurals(string, pluralString):
return pluralString
return None
# arrays for labeling ingredients (categorized for the purpose of cooking, to tomato is veg, not fruit)
dairyIngredients = ['buttermilk', 'cottage', 'cream', 'creamer', 'creamy', 'creme', 'ghee', 'half-and-half',
'milk', 'yogurt']
cheeses = ['bocconcini', 'mozzarella', 'gouda', 'swiss', 'brie']
meats = ['bacon', 'beefs', 'burgers', 'chorizo', 'dogs', 'frankfurters', 'giblets', 'ham', 'lambs', 'livers',
'meatballs', 'meatloaves', 'meats', 'mignon', 'mincemeat', 'pepperonis', "pig's", 'porks',
'prosciutto', 'ribs', 'roasts', 'sausages', 'sirloin', 'tripe', 'veal', 'venison', 'kielbasas',
'liverwurst', 'wieners', 'cotechino', 'linguica', 'pastrami', 'squirrels', 'sauerbraten',
'picadillo', 'carcass', 'brains', 'mortadella', 'rounds', 'sweetbread', 'toad', 'tinga',
'embutido', 'hash', 'broil', 'brisket', 'franks', 'pigs', 'rouladen', 'chops', 'scrapple',
'barbeque', 'spareribs']
poultry = ['bologna', 'bratwursts', 'chickens', 'ducks', 'goose', 'hens', 'pollo', 'salami', 'turkey',
'pheasant', 'quail', 'turducken', 'drumettes', 'wings', 'roosters']
fish = ['albacores', 'bass', 'catfish', 'cods', 'fish', 'flounder', 'grouper', 'haddock', 'halibut', 'mahi',
'monkfish', 'salmon', 'shark', 'snapper', 'sole', 'swordfishes', 'trouts', 'tunas', 'bluefish',
'bonito', 'rockfish', 'mackerel', 'naruto', 'drum', 'marlin', 'tilapia', 'carp', 'kingfish',
'mullets', 'whitefish', 'kippers', 'torsk', 'saltfish']
seafoods = ['anchovies', 'calamaris', 'clams', 'crabs', 'crabmeat', 'crawfish', 'lobsters', 'mussels',
'oysters', 'prawns', 'scallops', 'seafood', 'shrimps', 'squids', 'snails', 'shellfish', 'caviar']
mainProteins = ['beans', 'chickpeas', 'nuts', 'seeds', 'tofu', 'whey', 'buckwheat', 'protein', 'soybeans',
'soy', 'tempeh', 'lentils', 'masoor', 'gluten', 'pine', 'falafel', 'portobello']
fruits = ['apples', 'apricots', 'bananas', 'blackberries', 'blueberries', 'cantaloupe', 'cherries', 'citrons',
'citrus', 'coconuts', 'cranberries', 'currants', 'elderberries', 'figs', 'fruitcakes', 'fruits',
'gooseberries', 'grapefruit', 'grapes', 'guava', 'honeydew', 'huckleberries', 'kiwis','kumquats',
'lemonade', 'lemons', 'limes', 'mangoes', 'marrons', 'mincemeat', 'mulberries', 'nectarines', 'oranges',
'papayas', 'peaches', 'pears', 'persimmon', 'persimmons', 'pineapples', 'plums', 'prunes', 'raisins',
'raspberries', 'slushies', 'smoothies', 'sorrel', 'strawberries', 'tangerines', 'watermelons', 'yuzu',
'lingonberries', 'plantains', 'juniper', 'lingonberries', 'pomegranates', 'serviceberries',
'zinfandel', 'lychees', 'carambola', 'uvas']
vegetables = ['artichokes', 'arugula', 'asparagus', 'avocados', 'bamboo', 'beets', 'broccoli', 'cabbage',
'calzones', 'carrots', 'cauliflower', 'celery', 'chilis', 'chives', 'choy', 'cilantro', 'coleslaw',
'coriander', 'cucumber', 'cucumbers', 'dates', 'eggplant', 'eggplants', 'endive', 'escarole',
'galangal', 'haystacks', 'jicama', 'kale', 'kohlrabi', 'kucai', 'leeks', 'lettuce',
'mushrooms', 'okra', 'olives', 'onions', 'parsley', 'parsnips', 'peas', 'peppers', 'pickles',
'pizzas', 'potatoes', 'pumpkins', 'radishes', 'rutabagas', 'salad', 'sauerkraut', 'shallots', 'slaws',
'spinach', 'sprouts', 'squash', 'tamarind', 'taros', 'tomatillo', 'tomatillos', 'tomatoes', 'turnips',
'vegetable', 'vegetables', 'veggies', 'watercress', 'yams', 'zucchinis', 'chervil', 'daikon', 'iceberg',
'nopales', 'pimentos', 'radicchio', 'karengo', 'nori', 'succotash', 'truffle', 'chard', 'fries', 'leaves',
'browns', 'romain', 'palm', 'sorghum', 'aloo', 'haricots', 'caprese', 'salata', 'shiitake']
sugars = ['Jell-O®', 'butterscotch', 'candied', 'candy', 'caramels', 'frosting', 'fructose', 'gingersnaps',
'glaces', 'glaze', 'glycerin', 'glycerol', 'gumdrops', 'gummi', 'honey', 'icing', 'jellybeans',
'ladyfingers', 'licorice', 'macaroons', 'maple', 'marrons glaces', 'marshmallows', 'marzipan',
'molasses', 'pastries', 'pectin', 'peppermints', 'pie', 'piping', 'puddings', 'puff', 'sourball',
'sprinkles', 'sucanat', 'sugar', 'sweetener', 'syrup', 'tarts', 'toffee', 'twinkies', 'colaciones'
'sherbet', "hershey®'s", 'candies', "confectioners'", 'fudge', 'taffy', 'pink', 'sherbet']
sauces = ['alfredo', 'applesauce', 'chutney', 'cannoli', 'dips', 'guacamole', 'hummus', 'paste', 'spreads',
'tahini', 'tzatziki', 'denjang', 'salsa', 'sauce', 'tapenade', 'coating', 'teriyaki',
'aioli', 'checca', 'amatriciana', 'ragu', 'marinara']
condiments = ['dressing', 'jam', 'ketchup', 'marinade', 'marjoram', 'mayonnaise', 'mirin', 'mustard',
'pesto', 'relish', 'shoyu', 'tamari', 'vinaigrette', 'gochujang']
soups = ['broth', 'chowder', 'dashi', 'soup', 'stew', 'jambalaya', 'gumbo', 'gazpacho', 'goulash', 'pho',
'slumgullion', 'cioppino', 'minestrone']
nuts = ['almonds', 'butternuts', 'candlenuts', 'cashews', 'chestnuts', 'hazelnuts', 'macadamia', 'nuts',
'peanuts', 'pecans', 'pistachios', 'walnuts', 'nuts']
alcoholicIngredients = ['anisette', 'beer', 'bitters', 'bourbon', 'brandy', 'cacao', 'chambord', 'champagne',
'cognac', 'eggnog', 'kirsch', 'kirschwasser', 'liqueur', 'rum', 'schnapps', 'sherry', 'ale',
'spritz', 'tequila', 'vermouth', 'vodka', 'whiskey', 'wine', 'campari', 'alcohol', 'absinthe',
'cachaca', 'liquor', 'cointreau', 'curacao', 'sake', 'sec', 'calvados', 'galliano', 'lillet',
'margaritas', 'coladas', 'negroni', 'mojitos', 'mimosas', 'bahama', 'slammer', 'sauvignon', 'chablis',
'martinis', 'tequinis', 'spritzs', 'cosmopolitan', 'hurricanes', 'sangria', 'sex', "shaggy's", 'nipples',
'stoli']
spices = ['allspice', 'anise', 'arrowroot', 'basil', 'bay', 'capers', 'caraway', 'cardamom', 'cassava',
'cayenne', 'chocolate', 'cilantro', 'cinnamon', 'cloves', 'cocoa', 'coriander', 'cumin', 'dill',
'fennel', 'flax', 'garlic', 'ginger', 'herbs', 'kalonji', 'mace', 'masala', 'miso', 'monosodium',
'nutmeg', 'oregano', 'paprika', 'pepper', 'peppercorns', 'pimento', 'poppy', 'poppyseed',
'powder','rhubarb', 'rosemary', 'saffron', 'sage', 'salt', 'savory', 'seasoning', 'sesame', 'spices',
'sunflower', 'tarragon', 'thyme', 'turmeric', 'vanilla', 'watercress', 'spearmint', 'comfort']
spicy = ['angelica', 'dijon', 'horseradish', 'jerk', 'wasabi', 'spicy']
hotPeppers = ['jalapenos', 'pepperoncinis', 'chiles']
grains = ['bagels', 'baguettes', 'barley', 'biscuits', 'bran', 'bread', 'buns', 'cereal', 'corn', 'cornbread',
'cornstarch', 'couscous', 'crackers', 'croutons', 'crusts', 'dough', 'granola', 'hominy', 'kasha',
'masa', 'matzo', 'millet', 'muffins', 'oats', 'pitas', 'popcorn', 'pretzels', 'quinoa', 'rice', 'rolls',
'shortbread', 'sourdough', 'stuffing', 'tapioca', 'toasts', 'tortillas', 'wheat', 'kaiser', 'cornmeal',
'breadcrumbs', 'graham', 'bulgur', 'farina', 'oatmeal', 'croissants', 'polenta', 'grits', 'pumpernickel',
'sago', 'seitan', 'grains', 'taters', 'risotto', 'shells', 'amarettini', 'mochi', 'cornflakes', 'pilaf',
'puppies']
pastas = ['farfalle', 'fettuccine', 'lasagnas', 'linguine', 'mac', 'macaroni', 'manicotti', 'noodles', 'pasta',
'farfel', 'vermicelli', 'tagliatelle', 'cannelloni', 'penne']
wrappedMeals = ['burritos', 'calzones', 'dumplings', 'empanadas', 'fajitas', 'hero', 'pie', 'pinwheels', 'pizzas',
'quesadillas', 'sandwiches', 'tacos', 'tourtiere', 'wontons', 'hoagie', 'pierogies', 'rarebit',
'joes', 'enchiladas', 'pierogi', 'bierrocks', 'torta', 'reuben', 'wraps', 'piroshki', 'tamales',
'bruschetta', 'antipasto', 'hamburger', 'muffuletta', 'blanket', 'runzas', 'samosas', 'sambousas',
'chalupas', 'spanakopita', 'submarine']
pastaDishes = ['casseroles', 'curry', 'lasagna', 'marzetti', 'mostaccioli', 'spaghetti', 'stroganoff', 'ziti',
'pastini', 'pastitsio', 'fideo', 'spaghettini', 'moussaka', 'tortellinis', 'tallerine', 'talerine',
'scampi', 'ravioli', 'pad', 'gnocchi', 'spaetzle', 'stromboli']
vegetableDishes = ['tabbouleh', 'kabobs', 'suey', 'frittatas', 'quiches', 'raita', 'shieldzini', 'stir',
'sukiyaki']
drinks = ['beverage', 'cider', 'coffee', 'dew™', 'drink', 'eggnog', 'epazote', 'espresso', 'gin', 'juices',
'lemonade', 'limeade', 'milk', 'rosewater', 'soda', 'tea', 'wassail', 'punch', 'shake', 'shirley',
'americano']
cookingLiquids = ['oil', 'vinegar', 'water', 'snow', 'ice']
bakingIngredients = ['ammonia', 'baking', 'eggs', 'flour', 'margarine', 'yeast', 'bisquick®']
cookingFats = ['butter', 'gelatin', 'gravy', 'lard', 'lecithin', 'ovalette', 'shortening', 'xanthan', 'suet']
extras = ['carnations', 'coloring', 'dust', 'flowers', 'lilies', 'spray', 'toppings', 'drippings', 'powdered',
'gold']
fasteners = ['sticks', 'skewers', 'toothpicks']
adhesives = ['glue']
containers = ['jars']
flavorings = ['extract', 'flavorings', 'mint', 'pandan', 'hickory', 'flavored', 'mesquite', 'wood',
'hardwood']
mixtures = ['food', 'mixes']
# words with succeeding noun ("milk" or "cake")
nonDairyMilks = ['almond', 'soy', 'coconut']
cakeTypes = ['pound', 'sponge', 'white', 'yellow', 'bunny', "'scratch'"]
#
# returns a list of labels that match word(s) in list of ingredient/recipe words
#
def getLabelsFromArray(words):
labels = set()
for word in words:
if inCheckingPlurals(word, dairyIngredients):
labels.add("dairy")
labels.add("fat and vitamins")
continue
if ("cheese" == word and "cream" not in words) or word in cheeses:
labels.add("cheese")
labels.add("dairy")
continue
if inCheckingPlurals(word, meats):
labels.add("meat")
continue
if inCheckingPlurals(word, poultry):
labels.add("poultry")
continue
if inCheckingPlurals(word, fish):
labels.add("fish")
continue
if inCheckingPlurals(word, seafoods):
labels.add("seafood")
continue
if inCheckingPlurals(word, mainProteins):
labels.add("main protein")
continue
if inCheckingPlurals(word, fruits):
labels.add("fruit")
continue
if inCheckingPlurals(word, vegetables):
labels.add("vegetable")
continue
if inCheckingPlurals(word, spices):
labels.add("spice or herb")
continue
if inCheckingPlurals(word, sauces):
labels.add("sauce")
continue
if inCheckingPlurals(word, condiments):
labels.add("condiment")
continue
if inCheckingPlurals(word, soups):
labels.add("soup")
continue
if inCheckingPlurals(word, alcoholicIngredients):
labels.add("alcoholic")
continue
if inCheckingPlurals(word, spicy):
labels.add("spicy")
continue
if inCheckingPlurals(word, hotPeppers):
labels.add("vegetable")
labels.add("spicy")
continue
if inCheckingPlurals(word, nuts):
labels.add("nut")
continue
if inCheckingPlurals(word, cookingLiquids):
labels.add("cooking liquid")
continue
if inCheckingPlurals(word, cookingFats):
labels.add("cooking fat")
continue
if inCheckingPlurals(word, bakingIngredients):
labels.add("baking ingredient")
continue
if inCheckingPlurals(word, sugars):
labels.add("sugar")
continue
if inCheckingPlurals(word, grains):
labels.add("grain")
continue
if inCheckingPlurals(word, pastas):
labels.add("pasta")
continue
if inCheckingPlurals(word, drinks):
labels.add("drink")
continue
if inCheckingPlurals(word, wrappedMeals):
labels.add("wrapped meal")
continue
if inCheckingPlurals(word, pastaDishes):
labels.add("pasta dish")
continue
if inCheckingPlurals(word, vegetableDishes):
labels.add("vegetable dish")
continue
if inCheckingPlurals(word, extras):
labels.add("recipe extra")
continue
if inCheckingPlurals(word, flavorings):
labels.add("flavoring")
continue
if inCheckingPlurals(word, mixtures):
labels.add("mixture")
continue
if inCheckingPlurals(word, fasteners):
labels.add("fastener")
continue
if inCheckingPlurals(word, adhesives):
labels.add("adhesive")
continue
if inCheckingPlurals(word, containers):
labels.add("container")
continue
# check for non dairy milks
if "milk" in words:
index = words.index("milk")
if index > 0 and words[index - 1] in nonDairyMilks:
labels.remove("dairy")
# check if "cake" actually is a type of cake
if "cake" in words:
index = words.index("cake")
if index > 0 and words[index - 1] in cakeTypes:
labels.add("sugar")
elif "cakes" in words:
index = words.index("cakes")
if index > 0 and words[index - 1] in cakeTypes:
labels.add("sugar")
# check if "non dairy" in parsed ingredient
if "dairy" in words and "dairy" in labels:
index = words.index("dairy")
if index > 0 and words[index - 1] == "non":
labels.remove("dairy")
# add "greens" but not "green" as vegetable
if "greens" in words:
labels.add("vegetable")
# add "steak" as meat only if not used with fish (ie "salmon steak")
if ("steak" in words or "steaks" in words) and "fish" not in labels:
labels.add("meat")
# chili either a pepper or soup
if "chili" in words:
index = words.index("chili")
if index+1 < len(words) and words[index+1] == "pepper":
labels.add("vegetable")
labels.add("spicy")
else:
labels.add("soup")
# check for unsweetened sugars
if "unsweetened" in words and "sugar" in labels:
labels.remove("sugar")
# check for unflavored flavorings
if "unflavored" in words and "flavoring" in labels:
labels.remove("flavoring")
return list(labels)
# arrays for labeling recipes
breakfasts = ['crepes', 'pancakes', 'waffles', 'eggs', 'beignets', 'doughnuts', 'muffins', 'crepes', 'stroopwaffels',
'brunch', 'omelets']
desserts = ['cookies', 'cakes', 'brownies', 'pies', 'cobblers', 'mousses', 'puffs', 'biscottis', 'wafers', 'splits',
'scones', 'cupcakes', 'puddings', 'snowballs', 'candys', 'cheesecakes', 'wafers', 'macaroons', 'fruitcakes',
'gingerbreads', 'pastries', 'fudges', 'tarts', 'tarte', 'crinkles', 'chews', 'bars', 'squares', 'twists', 'snaps',
'brittles', 'thumbprints', 'babka', 'dessert', 'twinkies', 'cannolis', 'genoise', 'stollen', 'panettone',
'tiramisu', 'tuppakaka', 'vasilopita', 'zeppoli', 'sachertorte', 'spudnuts', 'botercake', 'kolaches', 'eclairs',
'ponczki', 'popovers', 'pulla', 'injera', 'dulce', 'bibingka', 'fastnachts', 'springerle', 'spritsar', 'spruffoli',
'snickerdoodles', 'santa\'s', 'sandtarts', 'sandbakelser', 'rugelach', 'rocky', 'pralines', 'pfeffernusse',
'pavlova', 'meringue', 'melting', 'meltaways', 'listy', 'lebkuchen', 'koulourakia', 'hamantashen', 'fudgies',
'florentines', 'gods', 'bark', 'buckeyes', 'torte', 'ladyfingers', 'baumkuchen', 'kipferl', 'kake', 'mocha',
'strufoli', 'stracciatella', 'rosettes', 'pepparkakor', 'sopapillas', 'kolacky', 'kolaczki', 'velvet', 'yums',
'vaselopita', 'necklaces', 'tres', 'timbales', 'wandies', 'lizzies', 'kringles', 'meringues', 'gateau', 'flan',
'baklava', 'trifle', 'dollies', 'krumkake', 'locks', 'lamingtons', 'napoleons', 'pasties', 'penuche', 'peppernuts',
'delights', 'prusurates', 'savoiardi', 'scotcharoos', 'sandies', 'sfinge', 'sfingi', 'rainbows', 'spitzbuben',
'sponges', 'spumetti', 'streusel', 'sufganiot', 'sufganiyot', 'crumbcake', 'bliss', 'malasadas']
breads = ['bagels', 'bannock', 'biscuits', 'breads', 'brioche', 'buns', 'challahs', 'chow', 'ciabattas', 'cornbread',
'crisps', 'croissants', 'doughs', 'focaccia', 'fougassetoast', 'gingerbreads', 'hoska', 'johnnycakes',
'kaiserbaguettes', 'kiflicrusts', 'kourabiedes', 'lefse', 'loafs', 'loaves', 'naan', 'oatmeal', 'paella',
'pan', 'paximade', 'pizzelles', 'pumpernickel', 'rolls', 'shells', 'shortbread', 'sourdoughs', 'stuffings',
'taralli', 'tortillas']
def getRecipeLabels(parsedRecipe):
labels = set(getLabelsFromArray(parsedRecipe))
for string in parsedRecipe:
if inCheckingPlurals(string, breakfasts):
labels.add("breakfast")
continue
if inCheckingPlurals(string, desserts):
labels.add("dessert")
continue
if inCheckingPlurals(string, breads):
labels.add("bread")
continue
# don't use "grain" as "label" if recipe label has "bread"
if "bread" in labels and "grain" in labels:
labels.remove("grain")
if "alcoholic" in labels:
# if recipe title includes alcohol but no other defining words, it's a drink
if len(labels) == 1:
labels.add("drink")
# if recipe title includes "non-alcoholic", it's not an alcoholic recipe
if "non-alcoholic" in parsedRecipe:
labels.remove("alcoholic")
if "vegetarian" in parsedRecipe:
if "meat" in labels:
labels.remove("meat")
if "seafood" in labels:
labels.remove("seafood")
if "fish" in labels:
labels.remove("fish")
if "poultry" in labels:
labels.remove("poultry")
return list(labels)
# list of measurement units for parsing ingredient
measurementUnits = ['teaspoons','tablespoons','cups','containers','packets','bags','quarts','pounds','cans','bottles',
'pints','packages','ounces','jars','heads','gallons','drops','envelopes','bars','boxes','pinches',
'dashes','bunches','recipes','layers','slices','links','bulbs','stalks','squares','sprigs',
'fillets','pieces','legs','thighs','cubes','granules','strips','trays','leaves','loaves','halves']
#
# transform amount to cups based on amount and original unit
#
def transformToCups(amount, unit):
if unit == "cups":
return amount
elif unit == "quarts":
return amount / 16
elif unit == "quarts":
return amount / 4
elif unit == "pints":
return amount / 2
elif unit == "ounces":
return amount * 8
elif unit == "tablespoons":
return amount * 16
elif unit == "teaspoons":
return amount * 48
else:
return amount
# strings indicating ingredient as optional (currently don't use optional boolean for anything)
# optionalStrings = ['optional', 'to taste', 'as needed', 'if desired']
# list of adjectives and participles used to describe ingredients
descriptions = ['baked', 'beaten', 'blanched', 'boiled', 'boiling', 'boned', 'breaded', 'brewed', 'broken', 'chilled',
'chopped', 'cleaned', 'coarse', 'cold', 'cooked', 'cool', 'cooled', 'cored', 'creamed', 'crisp', 'crumbled',
'crushed', 'cubed', 'cut', 'deboned', 'deseeded', 'diced', 'dissolved', 'divided', 'drained', 'dried', 'dry',
'fine', 'firm', 'fluid', 'fresh', 'frozen', 'grated', 'grilled', 'ground', 'halved', 'hard', 'hardened',
'heated', 'heavy', 'juiced', 'julienned', 'jumbo', 'large', 'lean', 'light', 'lukewarm', 'marinated',
'mashed', 'medium', 'melted', 'minced', 'near', 'opened', 'optional', 'packed', 'peeled', 'pitted', 'popped',
'pounded', 'prepared', 'pressed', 'pureed', 'quartered', 'refrigerated', 'rinsed', 'ripe', 'roasted',
'roasted', 'rolled', 'rough', 'scalded', 'scrubbed', 'seasoned', 'seeded', 'segmented', 'separated',
'shredded', 'sifted', 'skinless', 'sliced', 'slight', 'slivered', 'small', 'soaked', 'soft', 'softened',
'split', 'squeezed', 'stemmed', 'stewed', 'stiff', 'strained', 'strong', 'thawed', 'thick', 'thin', 'tied',
'toasted', 'torn', 'trimmed', 'wrapped', 'vained', 'warm', 'washed', 'weak', 'zested', 'wedged',
'skinned', 'gutted', 'browned', 'patted', 'raw', 'flaked', 'deveined', 'shelled', 'shucked', 'crumbs',
'halves', 'squares', 'zest', 'peel', 'uncooked', 'butterflied', 'unwrapped', 'unbaked', 'warmed']
# list of adverbs used before or after description
precedingAdverbs = ['well', 'very', 'super']
succeedingAdverbs = ['diagonally', 'lengthwise', 'overnight']
# list of prepositions used after ingredient name
prepositions = ['as', 'such', 'for', 'with', 'without', 'if', 'about', 'e.g.', 'in', 'into', 'at', 'until']
# only used as <something> removed, <something> reserved, <x> inches, <x> old, <some> temperature
descriptionsWithPredecessor = ['removed', 'discarded', 'reserved', 'included', 'inch', 'inches', 'old', 'temperature', 'up']
# descriptions that can be removed from ingredient, i.e. candied pineapple chunks
unnecessaryDescriptions = ['chunks', 'pieces', 'rings', 'spears']
# list of prefixes and suffixes that should be hyphenated
hypenatedPrefixes = ['non', 'reduced', 'semi', 'low']
hypenatedSuffixes = ['coated', 'free', 'flavored']
#
# main function
#
jsonFile = open("recipes.json", "w")
jsonFile.truncate()
outputFile = open("output.txt", "w")
outputFile.truncate()
parenthesesRegex = re.compile(r"\([^()]*\)")
# load list of all ingredients
allIngredientsFile = open("allIngredients.txt", "r")
allIngredients = allIngredientsFile.read().split("\n")
allIngredientsFile.close()
while "" in allIngredients:
allIngredients.remove("")
unlabeledIngredients = set()
unlabeledRecipes = set()
# recipes start at id~6660 and end at id=~27000
for recipeId in range(6660, 27000):
soup = None
try:
url = "http://allrecipes.com/recipe/{}".format(recipeId)
# html = urllib2.urlopen(url).read()
# soup = BeautifulSoup( html , "html.parser")
with urllib2.urlopen(url, timeout = 1) as response:
soup = BeautifulSoup(response.read(), "html.parser")
# with urllib2.urlopen(url) as response:
# soup = BeautifulSoup(response)
except AttributeError as e:
outputFile.write("{0}: AttributeError".format(recipeId))
except urllib2.HTTPError as e:
outputFile.write("{0}: No recipe".format(recipeId))
outputFile.write(e.reason)
except urllib2.URLError as e:
outputFile.write("{0}: URL ERROR".format(recipeId))
outputFile.write(e.reason)
except SocketError as e:
outputFile.write("{0}: SOCKET ERROR".format(recipeId))
if soup:
titleSpan = soup.find("h1", class_="recipe-summary__h1")
servingSpan = soup.find("span", class_="servings-count")
calorieSpan = soup.find("span", class_="calorie-count")
directionObjects = soup.find_all("span", class_="recipe-directions__list--item")
ingredientObjects = soup.find_all("span", class_="recipe-ingred_txt")
footnotesSection = soup.find("section", class_="recipe-footnotes")
#
# get title
#
title = titleSpan.text
title = title.replace("Linguini", "Linguine")
title = title.replace("Genoese", "Genoise")
#
# get labels
#
parsedTitle = title.lower().replace("(", "").replace(")", "").replace("-", " ").split(" ");
while "" in parsedTitle:
parsedTitle.remove("")
allLabels = getRecipeLabels(parsedTitle)
if len(allLabels) == 0:
unlabeledRecipes.add(title)
#
# get ingredients
#
count = len(ingredientObjects) - 3 # 2 spans with "Add all" and 1 empty
ingredients = []
for i in range(0, count):
ingredientString = ingredientObjects[i].text
# check if not ingredient, but separator
# ie "For Bread:"
if ingredientString.find("For ") == 0 or " " not in ingredientString or (":" in ingredientString and "eg:" not in ingredientString):
continue
ingredient = {}
ingredient["descriptions"] = []
# move parentheses to description
while True:
parentheses = parenthesesRegex.search(ingredientString)
if not parentheses:
break
searchString = parentheses.group()
ingredientString = ingredientString.replace(searchString, "")
ingredient["descriptions"].append(searchString[1:-1])
# remove "," and "-" then split ingredient into words
ingredientString = ingredientString.replace(","," and ")
ingredientString = ingredientString.replace("-"," ")
parsedIngredient = ingredientString.split(" ")
# remove "", caused by extra spaces
while "" in parsedIngredient:
parsedIngredient.remove("")
# move prepositions to description
for index in range(0, len(parsedIngredient)):
if parsedIngredient[index] in prepositions:
if (index + 1 < len(parsedIngredient) and parsedIngredient[index + 1] == "use") or (index > 0 and parsedIngredient[index - 1] == "bone" and parsedIngredient[index] == "in"):
continue
parsedPrepositionalPhrase = parsedIngredient[index:]
ingredient["descriptions"].append(" ".join(parsedPrepositionalPhrase))
parsedIngredient = parsedIngredient[:index]
break
#
# get ingredient amount
#
ingredient["amount"] = 0
while len(parsedIngredient) > 0:
# check if current word is number of inches, not amount
if len(parsedIngredient) > 1 and parsedIngredient[1] == "inch":
break
# get first word
# if first word is digit or fraction, eval
# "x" not multiplier, "%" used as modulo
try:
ingredient["amount"] += eval(parsedIngredient[0])
del parsedIngredient[0]
except (SyntaxError, NameError, TypeError):
break
#
# get ingredient unit
#
# check words for unit
unitString = ""
for i in range(0, len(parsedIngredient)):
pluralUnit = inCheckingPlurals(parsedIngredient[i], measurementUnits)
if pluralUnit:
unitString = pluralUnit
del parsedIngredient[i]
if i < len(parsedIngredient) and parsedIngredient[i] == "+":
while "+" in parsedIngredient:
index = parsedIngredient.index("+")
del parsedIngredient[index]
ingredient["amount"] += transformToCups(eval(parsedIngredient[index]), parsedIngredient[index+1])
del parsedIngredient[index]
del parsedIngredient[index+1]
break
# check for "cake" as unit, but only if "yeast" somewhere in ingredient
if "yeast" in parsedIngredient:
for word in parsedIngredient:
if equalCheckingPlurals(word, "cakes"):
unitString = "cakes"
parsedIngredient.remove(word)
break
# check if first word in array is "or", then ingredient has 2 possible units
if parsedIngredient[0] == "or":
pluralUnit = inCheckingPlurals(parsedIngredient[1], measurementUnits)
if pluralUnit:
unitString += " " + parsedIngredient[0] + " " + pluralUnit
parsedIngredient = parsedIngredient[2:]
# delete "of" at first index, ie "1 cup of milk" -> "1 cup milk"
if parsedIngredient[0] == "of":
del parsedIngredient[0]
ingredient["unit"] = unitString
#
# get ingredient descriptions
#
# remove useless words
for word in parsedIngredient:
if word in unnecessaryDescriptions:
parsedIngredient.remove(word)
index = 0
while index < len(parsedIngredient):
descriptionString = ""
word = parsedIngredient[index]
# search through descriptions (adjectives)
if word in descriptions:
descriptionString = word
# check previous word
if index > 0:
previousWord = parsedIngredient[index - 1]
if previousWord in precedingAdverbs or previousWord[-2:] == "ly":
descriptionString = previousWord + " " + word
parsedIngredient.remove(previousWord)
# check next word
elif index + 1 < len(parsedIngredient):
nextWord = parsedIngredient[index + 1]
if nextWord in succeedingAdverbs or nextWord[-2:] == "ly":
descriptionString = word + " " + nextWord
parsedIngredient.remove(nextWord)
# word not in descriptions, check if description with predecessor
elif word in descriptionsWithPredecessor and index > 0:
descriptionString = parsedIngredient[index - 1] + " " + word
del parsedIngredient[index - 1]
# either add description string to descriptions or check next word
if descriptionString == "":
index+=1
else:
ingredient["descriptions"].append(descriptionString)
parsedIngredient.remove(word)
# remove "and"
while "and" in parsedIngredient:
parsedIngredient.remove("and")
# remove "style"
while "style" in parsedIngredient:
parsedIngredient.remove("style")
# remove "or" if last word
if parsedIngredient[-1] == "or":
del parsedIngredient[-1]
# replace hyphenated prefixes and suffixes
for word in parsedIngredient:
for hypenatedSuffix in hypenatedSuffixes:
if hypenatedSuffix in word:
word=word.replace(hypenatedSuffix, "-" + hypenatedSuffix)
for hypenatedPrefix in hypenatedPrefixes:
if word.find(hypenatedPrefix) == 0:
word=word.replace(hypenatedPrefix, hypenatedPrefix + "-")
# move various nouns to description
if "powder" in parsedIngredient and ("coffee" in parsedIngredient or "espresso" in parsedIngredient or "tea" in parsedIngredient):
parsedIngredient.remove("powder")
ingredient["descriptions"].append("unbrewed")
#
# get ingredient
#
ingredientString = " ".join(parsedIngredient)
# remove "*", add footnote to description
if "*" in ingredientString:
ingredient["descriptions"].append("* see footnote")
ingredientString = ingredientString.replace("*", "")
# standardize "-" styling
ingredientString = ingredientString.replace("- ", "-")
ingredientString = ingredientString.replace(" -", "-")
ingredientString = ingredientString.replace("Jell O", "Jell-O")
ingredientString = ingredientString.replace("half half", "half-and-half")
# remove unnecessary punctuation
ingredientString = ingredientString.replace(".", "")
ingredientString = ingredientString.replace(";", "")
# fix spelling errors
ingredientString = ingredientString.replace("linguini", "linguine")
ingredientString = ingredientString.replace("filets", "fillets")
ingredientString = ingredientString.replace("chile", "chili")
ingredientString = ingredientString.replace("chiles", "chilis")
ingredientString = ingredientString.replace("chilies", "chilis")
ingredientString = ingredientString.replace("won ton", "wonton")
ingredientString = ingredientString.replace("liquer", "liqueur")
ingredientString = ingredientString.replace("confectioners ", "confectioners' ")
ingredientString = ingredientString.replace("creme de cacao", "chocolate liquer")
ingredientString = ingredientString.replace("pepperjack", "Pepper Jack")
ingredientString = ingredientString.replace("Pepper jack", "Pepper Jack")
# standardize ingredient styling
ingredientString = ingredientString.replace("dressing mix", "dressing")
ingredientString = ingredientString.replace("salad dressing", "dressing")
ingredientString = ingredientString.replace("bourbon whiskey", "bourbon")
ingredientString = ingredientString.replace("pudding mix", "pudding")
if ingredientString == "":
outputFile.write("Bad ingredient string: {0}".format(ingredientObjects[i].text))
ingredientString = ingredientObjects[i].text
pluralString = inCheckingPlurals(ingredientString, allIngredients)
if pluralString:
ingredientString = pluralString
else:
allIngredients.append(ingredientString)
ingredient["ingredient"] = ingredientString
#
# get ingredient labels
#
ingredientString = ingredientString.replace("-flavored", "")
ingredientString = ingredientString.lower()
ingredient["labels"] = getLabelsFromArray(ingredientString.split(" "))
if len(ingredient["labels"]) == 0:
unlabeledIngredients.add(ingredient["ingredient"])
ingredients.append(ingredient)
#
# get directions
#
# get number of spans and concatenate all contents to string
count = len(directionObjects) - 1 # 1 empty span at end
directionsString = directionObjects[0].text
for i in range(1, count):
directionsString += " " + directionObjects[i].text
# use nltk to split direction string into sentences
directionsArray = sent_tokenize(directionsString)
directions = []
for i in range(0, len(directionsArray)):
direction = {}
direction["step"] = i
direction["direction"] = directionsArray[i]
directions.append(direction)
#
# get footnotes
#
footnotes = []
if footnotesSection:
for footnote in footnotesSection.find_all("li"):
footnotes.append(footnote.text)
#
# get servings
#
servings = servingSpan.contents[0].text if servingSpan is not None else None
if servings and servings.isdigit():
servings = eval(servings)
else:
servings = 0
#
# get calories
#
calories = calorieSpan.contents[0].text if calorieSpan is not None else None
if calories and calories.isdigit():
calories = eval(calories)
else:
calories = 0
# write ingredient to JSON file
jsonFile.write(json.dumps({"id": recipeId,
"name": title,
"ingredients": ingredients,
"directions": directions,
"footnotes": footnotes,
"labels": allLabels,
"servings": servings,
"calories": calories}))
jsonFile.write("\n")
print(recipeId)
# write data to files every 10 recipes
if recipeId % 10 == 0:
unlabeledRecipeFile = open("unlabeledRecipes.txt", "w")
unlabeledRecipeFile.truncate()
for string in sorted(unlabeledRecipes):
unlabeledRecipeFile.write("{0}\n".format(string))
unlabeledRecipeFile.close()
unlabeledIngredientsFile = open("unlabeledIngredients.txt", "w")
unlabeledIngredientsFile.truncate()
for string in sorted(unlabeledIngredients):
unlabeledIngredientsFile.write("{0}\n".format(string))
unlabeledIngredientsFile.close()
allIngredientsFile = open("allIngredients.txt", "w")
allIngredientsFile.truncate()
for string in sorted(allIngredients):
allIngredientsFile.write("{0}\n".format(string))
allIngredientsFile.close()
print(recipeId)
jsonFile.close()
outputFile.close()
|
kbrohkahn/kevin.broh-kahn.com
|
view-recipes/parse-recipes-p2.py
|
Python
|
apache-2.0
| 33,917
|
[
"ESPResSo"
] |
a829fc5269028f5d3fb308b37ed4aa69649fa3a5c4ed5ad3cacf997d174504d4
|
class Singelton(object):
def __new__(cls,*a,**kwarg):
return cls
class VisitorError(Exception):
pass
class Visitor(Singelton):
@classmethod
def visit(self,node, *a):
"""Finds and calls the appropiate function visit_<NodeName>"""
try:
visitor = getattr(self, 'visit_' + node.__class__.__name__)
except AttributeError:
raise VisitorError("(%s):Visitor not found(%s)" % (self.__name__,node.__class__.__name__))
return visitor(node,*a)
__call__ = visit
|
Neppord/py2py
|
py2py_lib/out/visitor.py
|
Python
|
mit
| 506
|
[
"VisIt"
] |
d061c7bc1be651cc0d5e74829217ce2ac6b7e06f644e1cdb1683e8846f4096db
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import os
import pwd
import sys
from string import ascii_letters, digits
from six import string_types
from six.moves import configparser
from ansible.parsing.splitter import unquote
from ansible.errors import AnsibleOptionsError
# copied from utils, avoid circular reference fun :)
def mk_boolean(value):
if value is None:
return False
val = str(value)
if val.lower() in [ "true", "t", "y", "1", "yes" ]:
return True
else:
return False
def shell_expand(path):
'''
shell_expand is needed as os.path.expanduser does not work
when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE
'''
if path:
path = os.path.expanduser(os.path.expandvars(path))
return path
def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False, isnone=False, ispath=False):
''' return a configuration variable with casting '''
value = _get_config(p, section, key, env_var, default)
if boolean:
value = mk_boolean(value)
if value:
if integer:
value = int(value)
elif floating:
value = float(value)
elif islist:
if isinstance(value, string_types):
value = [x.strip() for x in value.split(',')]
elif isnone:
if value == "None":
value = None
elif ispath:
value = shell_expand(value)
elif isinstance(value, string_types):
value = unquote(value)
return value
def _get_config(p, section, key, env_var, default):
''' helper function for get_config '''
if env_var is not None:
value = os.environ.get(env_var, None)
if value is not None:
return value
if p is not None:
try:
return p.get(section, key, raw=True)
except:
return default
return default
def load_config_file():
''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
p = configparser.ConfigParser()
path0 = os.getenv("ANSIBLE_CONFIG", None)
if path0 is not None:
path0 = os.path.expanduser(path0)
if os.path.isdir(path0):
path0 += "/ansible.cfg"
path1 = os.getcwd() + "/ansible.cfg"
path2 = os.path.expanduser("~/.ansible.cfg")
path3 = "/etc/ansible/ansible.cfg"
for path in [path0, path1, path2, path3]:
if path is not None and os.path.exists(path):
try:
p.read(path)
except configparser.Error as e:
raise AnsibleOptionsError("Error reading config file: \n{0}".format(e))
return p, path
return None, ''
p, CONFIG_FILE = load_config_file()
active_user = pwd.getpwuid(os.geteuid())[0]
# check all of these extensions when looking for yaml files for things like
# group variables -- really anything we can load
YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ]
# sections in config file
DEFAULTS='defaults'
DEPRECATED_HOST_LIST = get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts', ispath=True)
# generally configurable things
DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True)
DEFAULT_HOST_LIST = get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', DEPRECATED_HOST_LIST, ispath=True)
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None, ispath=True)
DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles', ispath=True)
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp')
DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, '*')
DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, integer=True)
DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '')
DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', 'en_US.UTF-8')
DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10, integer=True)
DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, integer=True)
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user)
DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True)
DEFAULT_PRIVATE_KEY_FILE = get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None, ispath=True)
DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True)
DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True)
DEFAULT_VAULT_PASSWORD_FILE = get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None, ispath=True)
DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart')
DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', False, boolean=True)
DEFAULT_SFTP_BATCH_MODE = get_config(p, 'ssh_connection', 'sftp_batch_mode', 'ANSIBLE_SFTP_BATCH_MODE', True, boolean=True)
DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}')
DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True)
DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
DEFAULT_PRIVATE_ROLE_VARS = get_config(p, DEFAULTS, 'private_role_vars', 'ANSIBLE_PRIVATE_ROLE_VARS', False, boolean=True)
DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None)
DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh')
DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower()
DEFAULT_LOG_PATH = get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '', ispath=True)
DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True)
DEFAULT_INVENTORY_IGNORE = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE', ["~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo"], islist=True)
# disclosure
DEFAULT_NO_LOG = get_config(p, DEFAULTS, 'no_log', 'ANSIBLE_NO_LOG', False, boolean=True)
DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBLE_NO_TARGET_SYSLOG', True, boolean=True)
# selinux
DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs', islist=True)
### PRIVILEGE ESCALATION ###
# Backwards Compat
DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, boolean=True)
DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root')
DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', None)
DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', None)
DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True)
DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True)
DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', None)
DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H')
DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True)
# Become
BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': '', 'doas': 'Permission denied'} #FIXME: deal with i18n
BECOME_MISSING_STRINGS = {'sudo': 'sorry, a password is required to run sudo', 'su': '', 'pbrun': '', 'pfexec': '', 'runas': '', 'doas': 'Authorization required'} #FIXME: deal with i18n
BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas','doas']
BECOME_ALLOW_SAME_USER = get_config(p, 'privilege_escalation', 'become_allow_same_user', 'ANSIBLE_BECOME_ALLOW_SAME_USER', False, boolean=True)
DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True)
DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root')
DEFAULT_BECOME_EXE = get_config(p, 'privilege_escalation', 'become_exe', 'ANSIBLE_BECOME_EXE', None)
DEFAULT_BECOME_FLAGS = get_config(p, 'privilege_escalation', 'become_flags', 'ANSIBLE_BECOME_FLAGS', None)
DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True)
# PLUGINS
DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apt, yum, pkgng, zypper, dnf", islist=True)
# paths
DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action:/usr/share/ansible/plugins/action', ispath=True)
DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache', ispath=True)
DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback:/usr/share/ansible/plugins/callback', ispath=True)
DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '~/.ansible/plugins/connection:/usr/share/ansible/plugins/connection', ispath=True)
DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup', ispath=True)
DEFAULT_INVENTORY_PLUGIN_PATH = get_config(p, DEFAULTS, 'inventory_plugins', 'ANSIBLE_INVENTORY_PLUGINS', '~/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory', ispath=True)
DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars:/usr/share/ansible/plugins/vars', ispath=True)
DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter:/usr/share/ansible/plugins/filter', ispath=True)
DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS', '~/.ansible/plugins/test:/usr/share/ansible/plugins/test', ispath=True)
DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default')
# cache
CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory')
CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None)
CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts')
CACHE_PLUGIN_TIMEOUT = get_config(p, DEFAULTS, 'fact_caching_timeout', 'ANSIBLE_CACHE_PLUGIN_TIMEOUT', 24 * 60 * 60, integer=True)
# Display
ANSIBLE_FORCE_COLOR = get_config(p, DEFAULTS, 'force_color', 'ANSIBLE_FORCE_COLOR', None, boolean=True)
ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, boolean=True)
ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, boolean=True)
DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, boolean=True)
DEFAULT_UNDEFINED_VAR_BEHAVIOR = get_config(p, DEFAULTS, 'error_on_undefined_vars', 'ANSIBLE_ERROR_ON_UNDEFINED_VARS', True, boolean=True)
HOST_KEY_CHECKING = get_config(p, DEFAULTS, 'host_key_checking', 'ANSIBLE_HOST_KEY_CHECKING', True, boolean=True)
SYSTEM_WARNINGS = get_config(p, DEFAULTS, 'system_warnings', 'ANSIBLE_SYSTEM_WARNINGS', True, boolean=True)
DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings', 'ANSIBLE_DEPRECATION_WARNINGS', True, boolean=True)
DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True)
COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', True, boolean=True)
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True)
DEFAULT_CALLBACK_WHITELIST = get_config(p, DEFAULTS, 'callback_whitelist', 'ANSIBLE_CALLBACK_WHITELIST', [], islist=True)
RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/', ispath=True)
DEFAULT_NULL_REPRESENTATION = get_config(p, DEFAULTS, 'null_representation', 'ANSIBLE_NULL_REPRESENTATION', None, isnone=True)
# CONNECTION RELATED
ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', '-o ControlMaster=auto -o ControlPersist=60s')
ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r")
ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True)
ANSIBLE_SSH_RETRIES = get_config(p, 'ssh_connection', 'retries', 'ANSIBLE_SSH_RETRIES', 0, integer=True)
PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True)
# obsolete -- will be formally removed
ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099, integer=True)
ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True)
ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, integer=True)
ACCELERATE_CONNECT_TIMEOUT = get_config(p, 'accelerate', 'accelerate_connect_timeout', 'ACCELERATE_CONNECT_TIMEOUT', 1.0, floating=True)
ACCELERATE_DAEMON_TIMEOUT = get_config(p, 'accelerate', 'accelerate_daemon_timeout', 'ACCELERATE_DAEMON_TIMEOUT', 30, integer=True)
ACCELERATE_KEYS_DIR = get_config(p, 'accelerate', 'accelerate_keys_dir', 'ACCELERATE_KEYS_DIR', '~/.fireball.keys')
ACCELERATE_KEYS_DIR_PERMS = get_config(p, 'accelerate', 'accelerate_keys_dir_perms', 'ACCELERATE_KEYS_DIR_PERMS', '700')
ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_file_perms', 'ACCELERATE_KEYS_FILE_PERMS', '600')
ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True)
PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True)
# galaxy related
DEFAULT_GALAXY_URI = get_config(p, 'galaxy', 'server_uri', 'ANSIBLE_GALAXY_SERVER_URI', 'https://galaxy.ansible.com')
# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated
GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', islist=True)
# characters included in auto-generated passwords
DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_"
# non-configurable things
MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script']
MODULE_NO_JSON = ['command', 'shell', 'raw']
DEFAULT_BECOME_PASS = None
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
DEFAULT_SUBSET = None
DEFAULT_SU_PASS = None
VAULT_VERSION_MIN = 1.0
VAULT_VERSION_MAX = 1.0
MAX_FILE_SIZE_FOR_DIFF = 1*1024*1024
TREE_DIR = None
LOCALHOST = frozenset(['127.0.0.1', 'localhost', '::1'])
|
shawnsi/ansible
|
lib/ansible/constants.py
|
Python
|
gpl-3.0
| 17,940
|
[
"Galaxy"
] |
445cda414858ffae10eef746ece27acd135f56ee8e38efb421a47cdc5d38e364
|
#!/usr/bin/env python
# Python module for simulated annealing - anneal.py - v1.0 - 2 Sep 2009
#
# Copyright (c) 2009, Richard J. Wagner <wagnerr@umich.edu>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
This module performs simulated annealing to find a state of a system that
minimizes its energy.
An example program demonstrates simulated annealing with a traveling
salesman problem to find the shortest route to visit the twenty largest
cities in the United States.
Notes:
Matt Perry 6/24 : Changed to slicing lists instead of deepcopy-ing them.
e.g. state = prevState[:] instead of state = deepcopy(prevState)
Huge performance enhancement (~5-10x faster)
Should be identical behavior if the items in the state list are immutable.
(immutable objects include integers and strings so should be safe)
"""
# How to optimize a system with simulated annealing:
#
# 1) Define a format for describing the state of the system.
#
# 2) Define a function to calculate the energy of a state.
#
# 3) Define a function to make a random change to a state.
#
# 4) Choose a maximum temperature, minimum temperature, and number of steps.
#
# 5) Set the annealer to work with your state and functions.
#
# 6) Study the variation in energy with temperature and duration to find a
# productive annealing schedule.
#
# Or,
#
# 4) Run the automatic annealer which will attempt to choose reasonable values
# for maximum and minimum temperatures and then anneal for the allotted time.
import copy, math, sys, time
from numpy import random
def round_figures(x, n):
"""Returns x rounded to n significant figures."""
return round(x, int(n - math.ceil(math.log10(abs(x)))))
def time_string(seconds):
"""Returns time in seconds as a string formatted HHHH:MM:SS."""
s = int(round(seconds)) # round to nearest second
h, s = divmod(s, 3600) # get hours and remainder
m, s = divmod(s, 60) # split remainder into minutes and seconds
return '%4i:%02i:%02i' % (h, m, s)
class Annealer:
"""Performs simulated annealing by calling functions to calculate
energy and make moves on a state. The temperature schedule for
annealing may be provided manually or estimated automatically.
"""
def __init__(self, energy, move):
self.energy = energy # function to calculate energy of a state
self.move = move # function to make a random change to a state
def anneal(self, state, Tmax, Tmin, steps, updates=0):
"""Minimizes the energy of a system by simulated annealing.
Keyword arguments:
state -- an initial arrangement of the system
Tmax -- maximum temperature (in units of energy)
Tmin -- minimum temperature (must be greater than zero)
steps -- the number of steps requested
updates -- the number of updates to print during annealing
Returns the best state and energy found."""
step = 0
start = time.time()
def update(T, E, acceptance, improvement):
"""Prints the current temperature, energy, acceptance rate,
improvement rate, elapsed time, and remaining time.
The acceptance rate indicates the percentage of moves since the last
update that were accepted by the Metropolis algorithm. It includes
moves that decreased the energy, moves that left the energy
unchanged, and moves that increased the energy yet were reached by
thermal excitation.
The improvement rate indicates the percentage of moves since the
last update that strictly decreased the energy. At high
temperatures it will include both moves that improved the overall
state and moves that simply undid previously accepted moves that
increased the energy by thermal excititation. At low temperatures
it will tend toward zero as the moves that can decrease the energy
are exhausted and moves that would increase the energy are no longer
thermally accessible."""
elapsed = time.time() - start
if step == 0:
print ' Temperature Energy Accept Improve Elapsed Remaining'
print '%12.2f %12.2f %s ' % \
(T, E, time_string(elapsed) )
else:
remain = ( steps - step ) * ( elapsed / step )
print
print ' Temperature Energy Accept Improve Elapsed Remaining'
print '%12.2f %12.2f %7.2f%% %7.2f%% %s %s' % \
(T, E, 100.0*acceptance, 100.0*improvement,
time_string(elapsed), time_string(remain))
print
# Precompute factor for exponential cooling from Tmax to Tmin
if Tmin <= 0.0:
print 'Exponential cooling requires a minimum temperature greater than zero.'
sys.exit()
Tfactor = -math.log( float(Tmax) / Tmin )
# Note initial state
T = Tmax
E = self.energy(state)
#prevState = copy.deepcopy(state)
prevState = state[:]
prevEnergy = E
#bestState = copy.deepcopy(state)
bestState = state[:]
bestEnergy = E
trials, accepts, improves = 0, 0, 0
if updates > 0:
updateWavelength = float(steps) / updates
update(T, E, None, None)
# Attempt moves to new states
while step < steps:
step += 1
T = Tmax * math.exp( Tfactor * step / steps )
self.move(state)
E = self.energy(state)
dE = E - prevEnergy
trials += 1
if dE > 0.0 and math.exp(-dE/T) < random.random():
# Restore previous state
#state = copy.deepcopy(prevState)
state = prevState[:]
E = prevEnergy
else:
# Accept new state and compare to best state
accepts += 1
if dE < 0.0:
improves += 1
#prevState = copy.deepcopy(state)
prevState = state[:]
prevEnergy = E
if E < bestEnergy:
#bestState = copy.deepcopy(state)
bestState = state[:]
bestEnergy = E
if updates > 1:
if step // updateWavelength > (step-1) // updateWavelength:
update(T, E, float(accepts)/trials, float(improves)/trials)
trials, accepts, improves = 0, 0, 0
# Return best state and energy
return bestState, bestEnergy
def auto(self, state, minutes, steps=2000):
"""Minimizes the energy of a system by simulated annealing with
automatic selection of the temperature schedule.
Keyword arguments:
state -- an initial arrangement of the system
minutes -- time to spend annealing (after exploring temperatures)
steps -- number of steps to spend on each stage of exploration
Returns the best state and energy found."""
def run(state, T, steps):
"""Anneals a system at constant temperature and returns the state,
energy, rate of acceptance, and rate of improvement."""
E = self.energy(state)
#prevState = copy.deepcopy(state)
prevState = state[:]
prevEnergy = E
accepts, improves = 0, 0
for step in range(steps):
self.move(state)
E = self.energy(state)
dE = E - prevEnergy
if dE > 0.0 and math.exp(-dE/T) < random.random():
#state = copy.deepcopy(prevState)
state = prevState[:]
E = prevEnergy
else:
accepts += 1
if dE < 0.0:
improves += 1
#prevState = copy.deepcopy(state)
prevState = state[:]
prevEnergy = E
return state, E, float(accepts)/steps, float(improves)/steps
step = 0
start = time.time()
print 'Attempting automatic simulated anneal...'
# Find an initial guess for temperature
T = 0.0
E = self.energy(state)
while T == 0.0:
step += 1
self.move(state)
T = abs( self.energy(state) - E )
print 'Exploring temperature landscape:'
print ' Temperature Energy Accept Improve Elapsed'
def update(T, E, acceptance, improvement):
"""Prints the current temperature, energy, acceptance rate,
improvement rate, and elapsed time."""
elapsed = time.time() - start
print '%12.2f %12.2f %7.2f%% %7.2f%% %s' % \
(T, E, 100.0*acceptance, 100.0*improvement, time_string(elapsed))
# Search for Tmax - a temperature that gives 98% acceptance
state, E, acceptance, improvement = run(state, T, steps)
step += steps
while acceptance > 0.98:
T = round_figures(T/1.5, 2)
state, E, acceptance, improvement = run(state, T, steps)
step += steps
update(T, E, acceptance, improvement)
while acceptance < 0.98:
T = round_figures(T*1.5, 2)
state, E, acceptance, improvement = run(state, T, steps)
step += steps
update(T, E, acceptance, improvement)
Tmax = T
# Search for Tmin - a temperature that gives 0% improvement
while improvement > 0.0:
T = round_figures(T/1.5, 2)
state, E, acceptance, improvement = run(state, T, steps)
step += steps
update(T, E, acceptance, improvement)
Tmin = T
# Calculate anneal duration
elapsed = time.time() - start
duration = round_figures(int(60.0 * minutes * step / elapsed), 2)
# MP: Don't perform anneal, just return params
#return self.anneal(state, Tmax, Tmin, duration, 20)
return {'tmax': Tmax, 'tmin': Tmin, 'steps': duration}
if __name__ == '__main__':
"""Test annealer with a traveling salesman problem."""
# List latitude and longitude (degrees) for the twenty largest U.S. cities
cities = { 'New York City': (40.72,74.00), 'Los Angeles': (34.05,118.25),
'Chicago': (41.88,87.63), 'Houston': (29.77,95.38),
'Phoenix': (33.45,112.07), 'Philadelphia': (39.95,75.17),
'San Antonio': (29.53,98.47), 'Dallas': (32.78,96.80),
'San Diego': (32.78,117.15), 'San Jose': (37.30,121.87),
'Detroit': (42.33,83.05), 'San Francisco': (37.78,122.42),
'Jacksonville': (30.32,81.70), 'Indianapolis': (39.78,86.15),
'Austin': (30.27,97.77), 'Columbus': (39.98,82.98),
'Fort Worth': (32.75,97.33), 'Charlotte': (35.23,80.85),
'Memphis': (35.12,89.97), 'Baltimore': (39.28,76.62) }
def distance(a, b):
"""Calculates distance between two latitude-longitude coordinates."""
R = 3963 # radius of Earth (miles)
lat1, lon1 = math.radians(a[0]), math.radians(a[1])
lat2, lon2 = math.radians(b[0]), math.radians(b[1])
return math.acos( math.sin(lat1)*math.sin(lat2) +
math.cos(lat1)*math.cos(lat2)*math.cos(lon1-lon2) ) * R
def route_move(state):
"""Swaps two cities in the route."""
a = random.randint( 0, len(state)-1 )
b = random.randint( 0, len(state)-1 )
state[a], state[b] = state[b], state[a]
def route_energy(state):
"""Calculates the length of the route."""
e = 0
for i in range(len(state)):
e += distance( cities[state[i-1]], cities[state[i]] )
return e
# Start with the cities listed in random order
state = cities.keys()
random.shuffle(state)
# Minimize the distance to be traveled by simulated annealing with a
# manually chosen temperature schedule
annealer = Annealer(route_energy, route_move)
state, e = annealer.anneal(state, 10000000, 0.01, 18000*len(state), 9)
while state[0] != 'New York City':
state = state[1:] + state[:1] # rotate NYC to start
print "%i mile route:" % route_energy(state)
for city in state:
print "\t", city
# Minimize the distance to be traveled by simulated annealing with an
# automatically chosen temperature schedule
state, e = annealer.auto(state, 4)
while state[0] != 'New York City':
state = state[1:] + state[:1] # rotate NYC to start
print "%i mile route:" % route_energy(state)
for city in state:
print "\t", city
sys.exit()
|
Ecotrust/cogs-priorities
|
util/surrogate/anneal.py
|
Python
|
bsd-3-clause
| 13,776
|
[
"COLUMBUS",
"VisIt"
] |
030d7a66d6c5748695856bd37bfed9d8c950ad68f9350ed903174f17da3de0cd
|
# -*- coding: UTF-8 -*-
import numpy as np
from fgn import fgn
from numpy.polynomial.hermite_e import hermeval
class hermite( fgn ) :
"""A derived class to produce sample paths of a Hermite process of order d
with a specified fractional integration parameter (the Hurst exponent). For
the best performance N-1 should be a power of two."""
def __init__(self, N, d = 2, H = 0.5, K = 16, time = False, **kwargs ) :
self.__t = np.empty( 0, np.float ) if not time else np.arange( N, dtype = np.float ) / ( N - 1 )
## It is imperative that the varince of the fGn be unity
## The hurst exponent for this process is H = 1 + d * ( Hfgn - 1 )
fgn.__init__( self, K * ( N - 1 ) + 1, H = ( H + d - 1.0 ) / d, sigma = 1.0, **kwargs )
## The downsampling parameter (K) is actually the index of the process,
## which when it tends to infinity, converges in distribution to the
## Rosenblatt process (or in general to a Hermite process).
## The non-central limit theorem:
## Z^k(t) = \frac{1}{n^\alpha}\sum_{j=1}^{\left\lfloor kt\right\rfloor} H(\xi_j)
## Converges to $Z_\frac{\alpha}{2}(t)$ -- a hermite process
## Increasing downsample gives better approximation. In theory it should
## tend to infinity. This is a serious drawback.
## c.f. [Abry, Pipiras; 2005]
self.__K = K
self.__H = H
## Define the order of the Hermite polynomial
self.__coef = np.zeros( d + 1, np.float )
self.__coef[ d ] = 1
def __call__( self ) :
## Generate values of a hermite polynomial of the given order at the values
## of a fractional Gaussian Noise with the specified hurst index.
increments = hermeval( fgn.__call__( self ), self.__coef )
## The renorm-group transformation, without the renormalisation by the n^{-H}
increments = np.cumsum( increments )[ self.__K-1::self.__K ] ## / ( self.__K ** self.__H )
return self.__t, np.concatenate( ( [ 0 ], increments / np.max( np.abs( increments ) ) ) )
## Use explicit initialization
def initialize( self ) :
super( hermite, self ).initialize( )
def __del__( self ):
super( hermite, self ).__del__( )
def reset( self ):
super( hermite, self ).reset( )
def set_rnd( self, numpy_random ) :
super( hermite, self ).set_rnd( numpy_random )
self.reset( )
|
ivannz/study_notes
|
year_14_15/course_project/code/project/hermite.py
|
Python
|
mit
| 2,253
|
[
"Gaussian"
] |
81a4fd1480cf103c866b9deef99c3242c66ef109e70e74af1064c9b655df99d8
|
# -*- coding: utf-8 -*-
"""
MolVS - Molecule Validation and Standardization
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MolVS is a python tool built on top of RDKit that performs validation and standardization of chemical structures.
Note that the C++ reimplementation of this is available in the module rdkit.Chem.MolStandardize.rdMolStandardize
:copyright: (c) 2016 by Matt Swain.
:license: MIT, see LICENSE file for more details.
"""
import logging
from rdkit import Chem
from rdkit.Chem.MolStandardize import rdMolStandardize
from .standardize import Standardizer, standardize_smiles, enumerate_tautomers_smiles, canonicalize_tautomer_smiles
from .validate import Validator, validate_smiles
from .errors import MolVSError, StandardizeError, ValidateError
__title__ = 'MolVS'
__version__ = '0.1.1'
__author__ = 'Matt Swain'
__email__ = 'm.swain@me.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2016 Matt Swain'
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
def ReorderTautomers(molecule):
"""Returns the list of the molecule's tautomers
so that the canonical one as determined by the canonical
scoring system in TautomerCanonicalizer appears first.
:param molecule: An RDKit Molecule object.
:return: A list of Molecule objects.
"""
enumerator = rdMolStandardize.TautomerEnumerator()
canon = enumerator.Canonicalize(molecule)
csmi = Chem.MolToSmiles(canon)
res = [canon]
tauts = enumerator.Enumerate(molecule)
smis = [Chem.MolToSmiles(x) for x in tauts]
stpl = sorted((x, y) for x, y in zip(smis, tauts) if x != csmi)
res += [y for x, y in stpl]
return res
|
bp-kelley/rdkit
|
rdkit/Chem/MolStandardize/__init__.py
|
Python
|
bsd-3-clause
| 1,667
|
[
"RDKit"
] |
9ba3d0b4891b55752af7f43fd422f070c736520283cd1cc1f0cb4386d354c711
|
'''
Central/Satellite galaxy without assembly biased model
'''
import numpy as np
import os.path as path
from halo_utils import load_project_halocat
from Corrfunc.theory.wp import wp
from halotools.sim_manager import CachedHaloCatalog
from halotools.empirical_models import HodModelFactory
from halotools.empirical_models import TrivialPhaseSpace, AssembiasZheng07Cens
from halotools.empirical_models import NFWPhaseSpace, AssembiasZheng07Sats
from halotools.empirical_models import PrebuiltHodModelFactory
from halotools.empirical_models.factories.mock_helpers import three_dim_pos_bundle
from halotools.mock_observables.catalog_analysis_helpers import return_xyz_formatted_array
from halotools.empirical_models import enforce_periodicity_of_box
def single_model(Mr):
model = PrebuiltHodModelFactory("zheng07" , threshold = -1.*Mr)
return model
class MCMC_model(object):
def __init__(self, Mr, box, halocat):
self.Mr = Mr
self.model = single_model(Mr)
#self.halocat = load_project_halocat(box)
self.halocat = halocat
self.boxsize = self.halocat.Lbox[0]
self.nthreads = 1
self.pimax = 40.0
self.binfile = path.join(path.dirname(path.abspath(__file__)),
"../", "bin")
self.rbins = np.loadtxt("rbins.dat")
self.autocorr = 1
def __call__(self, theta, prior_range):
return self._sum_stat(theta, prior_range=prior_range)
def _sum_stat(self, theta, prior_range=None):
self.model.param_dict['logM0'] = theta[0]
self.model.param_dict['sigma_logM'] = theta[1]
self.model.param_dict['logMmin'] = theta[2]
self.model.param_dict['alpha'] = theta[3]
self.model.param_dict['logM1'] = theta[4]
self.model.populate_mock(self.halocat)
x = self.model.mock.galaxy_table['x']
y = self.model.mock.galaxy_table['y']
z = self.model.mock.galaxy_table['z']
vz = self.model.mock.galaxy_table['vz']
# applying RSD
pos = return_xyz_formatted_array(x, y, z, velocity = vz, velocity_distortion_dimension = 'z')
# enforcing PBC
pos = enforce_periodicity_of_box(pos, self.boxsize)
pos = pos.astype(np.float32)
#print pos
x, y, z = pos[:,0], pos[:,1], pos[:,2]
wp_result = wp(self.boxsize, self.pimax,
self.nthreads, self.rbins,
x, y, z)
wp_result = np.array([wp_bin[3] for wp_bin in wp_result])
nbar = 1.*len(pos)/(self.boxsize)**3.
return nbar , wp_result
def edge(boxsize, index , nsub):
'''returns edges of a sub-box of
a given index
'''
subbox_size = 1.*boxsize / nsub
zi = (index / (nsub**2)) * subbox_size
i2 = index % (nsub**2)
yi = (i2 / nsub) * subbox_size
i3 = i2 % nsub
xi = (i3) * subbox_size
return xi , yi , zi
def mask_positions(pos , boxsize, subvol_index , nsub):
'''masks the positions of galaxies in
model to compute jk covariance'''
subbox_size = 1.*boxsize / nsub
xi , yi , zi = edge(boxsize, subvol_index, nsub)
submask = np.where((xi <pos[:, 0]) * \
(pos[:, 0] < xi + subbox_size) * \
(yi <pos[:, 1]) * \
(pos[:, 1] < yi + subbox_size) * \
(zi <pos[:, 2]) * \
(pos[:, 2] < zi + subbox_size))
#mask = np.where(np.arange(len(pos))!=submask)
return pos[submask,:][0]
|
mjvakili/gambly
|
code/hod.py
|
Python
|
mit
| 3,537
|
[
"Galaxy"
] |
be0c20bfa9329632a27d27ca3aa05aebace7f5b22038059fcd4064c385c93cca
|
# $HeadURL: $
''' TransferCommand module
'''
from datetime import datetime, timedelta
from DIRAC import S_OK, S_ERROR
from DIRAC.AccountingSystem.Client.ReportsClient import ReportsClient
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient
from DIRAC.ResourceStatusSystem.Command.Command import Command
from DIRAC.ResourceStatusSystem.Utilities import CSHelpers
__RCSID__ = '$Id: $'
class TransferCommand( Command ):
'''
Transfer "master" Command
'''
def __init__( self, args = None, clients = None ):
super( TransferCommand, self ).__init__( args, clients )
if 'ReportsClient' in self.apis:
self.rClient = self.apis[ 'ReportsClient' ]
else:
self.rClient = ReportsClient()
if 'ReportGenerator' in self.apis:
self.rgClient = self.apis[ 'ReportGenerator' ]
else:
self.rgClient = RPCClient( 'Accounting/ReportGenerator' )
self.rClient.rpcClient = self.rgClient
if 'ResourceManagementClient' in self.apis:
self.rmClient = self.apis[ 'ResourceManagementClient' ]
else:
self.rmClient = ResourceManagementClient()
def _storeCommand( self, results ):
'''
Stores the results of doNew method on the database.
'''
for result in results:
resQuery = self.rmClient.addOrModifyTransferCache( result[ 'SourceName' ],
result[ 'DestinationName' ],
result[ 'Metric' ],
result[ 'Value' ] )
if not resQuery[ 'OK' ]:
return resQuery
return S_OK()
def _prepareCommand( self ):
'''
TransferChannelCommand requires four arguments:
- hours : <int>
- direction : Source | Destination
- elementName : <str>
- metric : Quality | FailedTransfers
GGUSTickets are associated with gocDB names, so we have to transform the
diracSiteName into a gocSiteName.
'''
if 'hours' not in self.args:
return S_ERROR( 'Number of hours not specified' )
hours = self.args[ 'hours' ]
if 'direction' not in self.args:
return S_ERROR( 'direction is missing' )
direction = self.args[ 'direction' ]
if direction not in [ 'Source', 'Destination' ]:
return S_ERROR( 'direction is not Source nor Destination' )
if 'name' not in self.args:
return S_ERROR( '"name" is missing' )
name = self.args[ 'name' ]
if 'metric' not in self.args:
return S_ERROR( 'metric is missing' )
metric = self.args[ 'metric' ]
if metric not in [ 'Quality', 'FailedTransfers' ]:
return S_ERROR( 'metric is not Quality nor FailedTransfers' )
return S_OK( ( hours, name, direction, metric ) )
def doNew( self, masterParams = None ):
'''
Gets the parameters to run, either from the master method or from its
own arguments.
For every elementName ( cannot process bulk queries.. ) contacts the
accounting client. It reurns dictionaries like { 'X -> Y' : { id: 100%.. } }
If there are ggus tickets, are recorded and then returned.
'''
if masterParams is not None:
hours, name, direction, metric = masterParams
else:
params = self._prepareCommand()
if not params[ 'OK' ]:
return params
hours, name, direction, metric = params[ 'Value' ]
toD = datetime.utcnow()
fromD = toD - timedelta( hours = hours )
# dictionary with conditions for the accounting
transferDict = {
'OperationType' : 'putAndRegister',
direction : name
}
if metric == 'FailedTransfers':
transferDict[ 'FinalStatus' ] = [ 'Failed' ]
transferResults = self.rClient.getReport( 'DataOperation', metric, fromD,
toD, transferDict, 'Channel' )
if not transferResults[ 'OK' ]:
return transferResults
transferResults = transferResults[ 'Value' ]
if not 'data' in transferResults:
return S_ERROR( 'Missing data key' )
transferResults = transferResults[ 'data' ]
uniformResult = []
for channel, elementDict in transferResults.items():
try:
source, destination = channel.split( ' -> ' )
except ValueError:
continue
channelDict = {}
channelDict[ 'SourceName' ] = source
channelDict[ 'DestinationName' ] = destination
channelDict[ 'Metric' ] = metric
channelDict[ 'Value' ] = sum( elementDict.values() ) / len( elementDict.values() )
uniformResult.append( channelDict )
storeRes = self._storeCommand( uniformResult )
if not storeRes[ 'OK' ]:
return storeRes
# Compute mean of all transfer channels
value = 0
for channelDict in uniformResult:
value += channelDict[ 'Value' ]
if uniformResult:
value = float( value ) / len( uniformResult )
else:
value = None
return S_OK( { 'Mean' : value, 'Name' : name } )
def doCache( self ):
'''
Method that reads the cache table and tries to read from it. It will
return a list of dictionaries if there are results.
'''
params = self._prepareCommand()
if not params[ 'OK' ]:
return params
_hours, name, direction, metric = params[ 'Value' ]
sourceName, destinationName = None, None
if direction == 'Source':
sourceName = name
if direction == 'Destination':
destinationName = name
result = self.rmClient.selectTransferCache( sourceName, destinationName, metric )
if not result[ 'OK' ]:
return result
result = [ dict( zip( result[ 'Columns' ], res ) ) for res in result[ 'Value' ] ]
# Compute mean of all transfer channels
value = 0
for channelDict in result:
value += channelDict[ 'Value' ]
if result:
value = float( value ) / len( result )
else:
value = None
return S_OK( { 'Mean' : value, 'Name' : name } )
def doMaster( self ):
'''
Master method, which looks little bit spaguetti code, sorry !
- It gets all Sites.
- It gets all StorageElements
As there is no bulk query, it compares with what we have on the database.
It queries a portion of them.
'''
sites = CSHelpers.getSites()
if not sites[ 'OK' ]:
return sites
sites = sites[ 'Value' ]
ses = CSHelpers.getStorageElements()
if not ses[ 'OK' ]:
return ses
ses = ses[ 'Value' ]
elementNames = sites + ses
# sourceQuery = self.rmClient.selectTransferCache( meta = { 'columns' : [ 'SourceName' ] } )
# if not sourceQuery[ 'OK' ]:
# return sourceQuery
# sourceQuery = [ element[0] for element in sourceQuery[ 'Value' ] ]
#
# sourceElementsToQuery = list( set( elementNames ).difference( set( sourceQuery ) ) )
self.log.info( 'Processing %s' % ', '.join( elementNames ) )
for metric in [ 'Quality', 'FailedTransfers' ]:
for direction in [ 'Source', 'Destination' ]:
# 2 hours of window
result = self.doNew( ( 2, elementNames, direction, metric ) )
if not result[ 'OK' ]:
self.metrics[ 'failed' ].append( result )
return S_OK( self.metrics )
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
Andrew-McNab-UK/DIRAC
|
ResourceStatusSystem/Command/TransferCommand.py
|
Python
|
gpl-3.0
| 7,715
|
[
"DIRAC"
] |
19350eb85c87ea188dfde76ff27dc40936dccf34a3e719f35429e621586f76c9
|
#!/bin/env python
"""
Create a production to replicate files from some storage elements to others
:since: May 31, 2018
:author: A. Sailer
"""
from DIRAC.Core.Base.Script import Script
@Script()
def main():
"""reads command line parameters, makes check and creates replication transformation"""
from DIRAC import gLogger, exit as dexit
from DIRAC.TransformationSystem.Utilities.ReplicationCLIParameters import Params
clip = Params()
clip.registerSwitches(Script)
Script.parseCommandLine()
from DIRAC.TransformationSystem.Utilities.ReplicationTransformation import createDataTransformation
if not clip.checkSettings(Script)["OK"]:
gLogger.error("ERROR: Missing settings")
dexit(1)
for metaValue in clip.metaValues:
resCreate = createDataTransformation(
flavour=clip.flavour,
targetSE=clip.targetSE,
sourceSE=clip.sourceSE,
metaKey=clip.metaKey,
metaValue=metaValue,
extraData=clip.extraData,
extraname=clip.extraname,
groupSize=clip.groupSize,
tGroup=clip.groupName,
plugin=clip.plugin,
enable=clip.enable,
)
if not resCreate["OK"]:
gLogger.error("Failed to create Transformation", resCreate["Message"])
dexit(1)
dexit(0)
if __name__ == "__main__":
main()
|
DIRACGrid/DIRAC
|
src/DIRAC/TransformationSystem/scripts/dirac_transformation_replication.py
|
Python
|
gpl-3.0
| 1,404
|
[
"DIRAC"
] |
3b47d4011e4f83e1eed23255f87eb58c17b3ed1a83e233e869e9eab8f0530794
|
#!/usr/bin/env python
# Copyright (C) 2007 CAMP
# Please see the accompanying LICENSE file for further information.
from distutils.core import setup, Command
from distutils.command.build_py import build_py as _build_py
from glob import glob
from os.path import join
import os
import sys
import shutil
long_description = """\
ASE is a python package providing an open source Atomic Simulation
Environment in the python scripting language."""
if sys.version_info < (2, 4, 0, 'final', 0):
raise SystemExit, 'Python 2.4 or later is required!'
packages = ['ase',
'ase.asec',
'ase.cluster',
'ase.cluster.data',
'ase.io',
'ase.md',
'ase.dft',
'ase.gui',
'ase.gui.languages',
'ase.data',
'ase.test',
'ase.test.abinit',
'ase.test.aims',
'ase.test.castep',
'ase.test.cmr',
'ase.test.elk',
'ase.test.fio',
'ase.test.fleur',
'ase.test.gaussian',
'ase.test.jacapo',
'ase.test.mopac',
'ase.test.nwchem',
'ase.test.tasks',
'ase.test.vasp',
'ase.tasks',
'ase.utils',
'ase.lattice',
'ase.lattice.spacegroup',
'ase.examples',
'ase.optimize',
'ase.optimize.test',
'ase.vibrations',
'ase.visualize',
'ase.visualize.vtk',
'ase.transport',
'ase.calculators',
'ase.calculators.jacapo']
package_dir={'ase': 'ase'}
package_data={'ase': ['lattice/spacegroup/spacegroup.dat']}
class test(Command):
description = 'build and run test suite; exit code is number of failures'
user_options = [('calculators=', 'c',
'Comma separated list of calculators to test')]
def __init__(self, dist):
Command.__init__(self, dist)
self.sub_commands = ['build']
def initialize_options(self):
self.calculators = None
def finalize_options(self):
pass
def run(self):
self.run_command('build')
buildcmd = self.get_finalized_command('build')
sys.path.insert(0, buildcmd.build_lib)
if self.calculators is not None:
calculators = self.calculators.split(',')
elif 'ASE_CALCULATORS' in os.environ:
calculators = os.environ['ASE_CALCULATORS'].split(',')
else:
calculators = []
from ase.test import test as _test
testdir = '%s/testase-tempfiles' % buildcmd.build_base
origcwd = os.getcwd()
if os.path.isdir(testdir):
shutil.rmtree(testdir) # clean before running tests!
os.mkdir(testdir)
os.chdir(testdir)
try:
results = _test(2, calculators, display=False)
if results.failures or results.errors:
print >> sys.stderr, 'Test suite failed'
raise SystemExit(len(results.failures) + len(results.errors))
finally:
os.chdir(origcwd)
class build_py(_build_py):
"""Custom distutils command to build translations."""
def __init__(self, *args, **kwargs):
_build_py.__init__(self, *args, **kwargs)
# Keep list of files to appease bdist_rpm. We have to keep track of
# all the installed files for no particular reason.
self.mofiles = []
def run(self):
"""Compile translation files (requires gettext)."""
_build_py.run(self)
msgfmt = 'msgfmt'
status = os.system(msgfmt + ' -V')
if status == 0:
for pofile in glob('ase/gui/po/*/LC_MESSAGES/ag.po'):
dirname = join(self.build_lib, os.path.dirname(pofile))
if not os.path.isdir(dirname):
os.makedirs(dirname)
mofile = join(dirname, 'ag.mo')
status = os.system('%s -cv %s --output-file=%s 2>&1' %
(msgfmt, pofile, mofile))
assert status == 0, 'msgfmt failed!'
self.mofiles.append(mofile)
def get_outputs(self, *args, **kwargs):
return _build_py.get_outputs(self, *args, **kwargs) + self.mofiles
# Get the current version number:
execfile('ase/svnversion_io.py') # write ase/svnversion.py and get svnversion
execfile('ase/version.py') # get version_base
if svnversion and os.name not in ['ce', 'nt']: # MSI accepts only version X.X.X
version = version_base + '.' + svnversion
else:
version = version_base
scripts = ['tools/ag', 'tools/ase', 'tools/ASE2ase', 'tools/testase',
'tools/asec']
# provide bat executables in the tarball and always for Win
if 'sdist' in sys.argv or os.name in ['ce', 'nt']:
for s in scripts[:]:
scripts.append(s + '.bat')
setup(name='python-ase',
version=version,
description='Atomic Simulation Environment',
url='https://wiki.fysik.dtu.dk/ase',
maintainer='CAMd',
maintainer_email='camd@fysik.dtu.dk',
license='LGPLv2.1+',
platforms=['linux'],
packages=packages,
package_dir=package_dir,
package_data=package_data,
scripts=scripts,
long_description=long_description,
cmdclass={'build_py': build_py,
'test': test})
|
conwayje/ase-python
|
setup.py
|
Python
|
gpl-2.0
| 5,378
|
[
"ABINIT",
"ASE",
"CASTEP",
"Elk",
"FLEUR",
"Gaussian",
"MOPAC",
"NWChem",
"VASP",
"VTK"
] |
ed4cc6e0fa66c362f245d885c1839f9fa547cc86fe4c817cda2be3ebb5452ff6
|
from setuptools import setup,find_packages
def readme():
with open('README.rst') as f:
return f.read()
setup(name='barnaba',
description='analyze nucleic acid 3D structures and MD trajectories',
long_description=readme(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
url='https://github.com/srnas/barnaba',
author='Sandro Bottaro',
author_email='sandro.bottaro@gmail.com',
use_scm_version = True,
setup_requires = ['setuptools_scm','setuptools_scm_git_archive'],
packages=find_packages(),
python_requires='>=2.6',
install_requires=['numpy','scipy','mdtraj','future'],
test_suite='nose.collector',
scripts=['bin/barnaba'],
zip_safe=False)
|
srnas/barnaba
|
setup.py
|
Python
|
gpl-3.0
| 1,380
|
[
"MDTraj"
] |
3cfeb88dd95d72238f22403c575b2f38d0fe568bfee6de8551bd3d7eafa9fc40
|
__author__ = 'chrispaulson'
import numpy as np
import scipy
from scipy.optimize import minimize
from .matrixops import matrixops
import copy
from matplotlib import pyplot as plt
import pylab
from mpl_toolkits.mplot3d import axes3d
from pyKriging import samplingplan
import inspyred
from random import Random
from time import time
from inspyred import ec
import math as m
class kriging(matrixops):
def __init__(self, X, y, testfunction=None, name='', testPoints=None, **kwargs):
self.X = copy.deepcopy(X)
self.y = copy.deepcopy(y)
self.testfunction = testfunction
self.name = name
self.n = self.X.shape[0]
self.k = self.X.shape[1]
self.theta = np.ones(self.k)
self.pl = np.ones(self.k) * 2.
self.sigma = 0
self.normRange = []
self.ynormRange = []
self.normalizeData()
self.sp = samplingplan.samplingplan(self.k)
#self.updateData()
#self.updateModel()
self.thetamin = 1e-5
self.thetamax = 100
self.pmin = 1
self.pmax = 2
# Setup functions for tracking history
self.history = {}
self.history['points'] = []
self.history['neglnlike'] = []
self.history['theta'] = []
self.history['p'] = []
self.history['rsquared'] = [0]
self.history['adjrsquared'] = [0]
self.history['chisquared'] = [1000]
self.history['lastPredictedPoints'] = []
self.history['avgMSE'] = []
if testPoints:
self.history['pointData'] = []
self.testPoints = self.sp.rlh(testPoints)
for point in self.testPoints:
testPrimitive = {}
testPrimitive['point'] = point
if self.testfunction:
testPrimitive['actual'] = self.testfunction(point)[0]
else:
testPrimitive['actual'] = None
testPrimitive['predicted'] = []
testPrimitive['mse'] = []
testPrimitive['gradient'] = []
self.history['pointData'].append(testPrimitive)
else:
self.history['pointData'] = None
matrixops.__init__(self)
def normX(self, X):
'''
:param X: An array of points (self.k long) in physical world units
:return X: An array normed to our model range of [0,1] for each dimension
'''
X = copy.deepcopy(X)
if type(X) is np.float64:
# print self.normRange
return np.array( (X - self.normRange[0][0]) / float(self.normRange[0][1] - self.normRange[0][0]) )
else:
for i in range(self.k):
X[i] = (X[i] - self.normRange[i][0]) / float(self.normRange[i][1] - self.normRange[i][0])
return X
def inversenormX(self, X):
'''
:param X: An array of points (self.k long) in normalized model units
:return X : An array of real world units
'''
X = copy.deepcopy(X)
for i in range(self.k):
X[i] = (X[i] * float(self.normRange[i][1] - self.normRange[i][0] )) + self.normRange[i][0]
return X
def normy(self, y):
'''
:param y: An array of observed values in real-world units
:return y: A normalized array of model units in the range of [0,1]
'''
return (y - self.ynormRange[0]) / (self.ynormRange[1] - self.ynormRange[0])
def inversenormy(self, y):
'''
:param y: A normalized array of model units in the range of [0,1]
:return: An array of observed values in real-world units
'''
return (y * (self.ynormRange[1] - self.ynormRange[0])) + self.ynormRange[0]
def normalizeData(self):
'''
This function is called when the initial data in the model is set.
We find the max and min of each dimension and norm that axis to a range of [0,1]
'''
for i in range(self.k):
self.normRange.append([min(self.X[:, i]), max(self.X[:, i])])
# print self.X
for i in range(self.n):
self.X[i] = self.normX(self.X[i])
self.ynormRange.append(min(self.y))
self.ynormRange.append(max(self.y))
for i in range(self.n):
self.y[i] = self.normy(self.y[i])
def addPoint(self, newX, newy, norm=True):
'''
This add points to the model.
:param newX: A new design vector point
:param newy: The new observed value at the point of X
:param norm: A boolean value. For adding real-world values, this should be True. If doing something in model units, this should be False
'''
if norm:
newX = self.normX(newX)
newy = self.normy(newy)
self.X = np.append(self.X, [newX], axis=0)
self.y = np.append(self.y, newy)
self.n = self.X.shape[0]
self.updateData()
while True:
try:
self.updateModel()
except:
self.train()
else:
break
def update(self, values):
'''
The function sets new hyperparameters
:param values: the new theta and p values to set for the model
'''
for i in range(self.k):
self.theta[i] = values[i]
for i in range(self.k):
self.pl[i] = values[i + self.k]
self.updateModel()
def updateModel(self):
'''
The function rebuilds the Psi matrix to reflect new data or a change in hyperparamters
'''
try:
self.updatePsi()
except Exception as err:
#pass
# print Exception, err
raise Exception("bad params")
def predict(self, X):
'''
This function returns the prediction of the model at the real world coordinates of X
:param X: Design variable to evaluate
:return: Returns the 'real world' predicted value
'''
X = copy.deepcopy(X)
X = self.normX(X)
return self.inversenormy(self.predict_normalized(X))
def predict_var(self, X):
'''
The function returns the model's predicted 'error' at this point in the model.
:param X: new design variable to evaluate, in physical world units
:return: Returns the posterior variance (model error prediction)
'''
X = copy.deepcopy(X)
X = self.normX(X)
# print X, self.predict_normalized(X), self.inversenormy(self.predict_normalized(X))
return self.predicterr_normalized(X)
def expimp(self, x):
'''
Returns the expected improvement at the design vector X in the model
:param x: A real world coordinates design vector
:return EI: The expected improvement value at the point x in the model
'''
S = self.predicterr_normalized(x)
y_min = np.min(self.y)
if S <= 0.:
EI = 0.
elif S > 0.:
EI_one = ((y_min - self.predict_normalized(x)) * (0.5 + 0.5*m.erf((
1./np.sqrt(2.))*((y_min - self.predict_normalized(x)) /
S))))
EI_two = ((S * (1. / np.sqrt(2. * np.pi))) * (np.exp(-(1./2.) *
((y_min - self.predict_normalized(x))**2. / S**2.))))
EI = EI_one + EI_two
return EI
def weightedexpimp(self, x, w):
"""weighted expected improvement (Sobester et al. 2005)"""
S = self.predicterr_normalized(x)
y_min = np.min(self.y)
if S <= 0.:
EI = 0.
elif S > 0.:
EI_one = w*((y_min - self.predict_normalized(x)) * (0.5 +
0.5*m.erf((1./np.sqrt(2.))*((y_min -
self.predict_normalized(x)) / S))))
EI_two = ((1. - w)*(S * (1. / np.sqrt(2. * np.pi))) *
(np.exp(-(1./2.) * ((y_min -
self.predict_normalized(x))**2. / S**2.))))
EI = EI_one + EI_two
return EI
def infill_objective_mse(self,candidates, args):
'''
This acts
:param candidates: An array of candidate design vectors from the infill global optimizer
:param args: args from the optimizer
:return fitness: An array of evaluated MSE values for the candidate population
'''
fitness = []
for entry in candidates:
fitness.append(-1 * self.predicterr_normalized(entry))
return fitness
def infill_objective_ei(self,candidates, args):
'''
The infill objective for a series of candidates from infill global search
:param candidates: An array of candidate design vectors from the infill global optimizer
:param args: args from the optimizer
:return fitness: An array of evaluated Expected Improvement values for the candidate population
'''
fitness = []
for entry in candidates:
fitness.append(-1 * self.expimp(entry))
return fitness
def infill(self, points, method='error', addPoint=True):
'''
The function identifies where new points are needed in the model.
:param points: The number of points to add to the model. Multiple points are added via imputation.
:param method: Two choices: EI (for expected improvement) or Error (for general error reduction)
:return: An array of coordinates identified by the infill
'''
# We'll be making non-permanent modifications to self.X and self.y here, so lets make a copy just in case
initX = np.copy(self.X)
inity = np.copy(self.y)
# This array will hold the new values we add
returnValues = np.zeros([points, self.k], dtype=float)
for i in range(points):
rand = Random()
rand.seed(int(time()))
ea = inspyred.swarm.PSO(Random())
ea.terminator = self.no_improvement_termination
ea.topology = inspyred.swarm.topologies.ring_topology
if method=='ei':
evaluator = self.infill_objective_ei
else:
evaluator = self.infill_objective_mse
final_pop = ea.evolve(generator=self.generate_population,
evaluator=evaluator,
pop_size=155,
maximize=False,
bounder=ec.Bounder([0] * self.k, [1] * self.k),
max_evaluations=20000,
neighborhood_size=30,
num_inputs=self.k)
final_pop.sort(reverse=True)
newpoint = final_pop[0].candidate
returnValues[i][:] = self.inversenormX(newpoint)
if addPoint:
self.addPoint(returnValues[i], self.predict(returnValues[i]), norm=True)
self.X = np.copy(initX)
self.y = np.copy(inity)
self.n = len(self.X)
self.updateData()
while True:
try:
self.updateModel()
except:
self.train()
else:
break
return returnValues
def generate_population(self, random, args):
'''
Generates an initial population for any global optimization that occurs in pyKriging
:param random: A random seed
:param args: Args from the optimizer, like population size
:return chromosome: The new generation for our global optimizer to use
'''
size = args.get('num_inputs', None)
bounder = args["_ec"].bounder
chromosome = []
for lo, hi in zip(bounder.lower_bound, bounder.upper_bound):
chromosome.append(random.uniform(lo, hi))
return chromosome
def no_improvement_termination(self, population, num_generations, num_evaluations, args):
"""Return True if the best fitness does not change for a number of generations of if the max number
of evaluations is exceeded.
.. Arguments:
population -- the population of Individuals
num_generations -- the number of elapsed generations
num_evaluations -- the number of candidate solution evaluations
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *max_generations* -- the number of generations allowed for no change in fitness (default 10)
"""
max_generations = args.setdefault('max_generations', 10)
previous_best = args.setdefault('previous_best', None)
max_evaluations = args.setdefault('max_evaluations', 30000)
current_best = np.around(max(population).fitness, decimals=4)
if previous_best is None or previous_best != current_best:
args['previous_best'] = current_best
args['generation_count'] = 0
return False or (num_evaluations >= max_evaluations)
else:
if args['generation_count'] >= max_generations:
return True
else:
args['generation_count'] += 1
return False or (num_evaluations >= max_evaluations)
def train(self, optimizer='pso'):
'''
The function trains the hyperparameters of the Kriging model.
:param optimizer: Two optimizers are implemented, a Particle Swarm Optimizer or a GA
'''
# First make sure our data is up-to-date
self.updateData()
# Establish the bounds for optimization for theta and p values
lowerBound = [self.thetamin] * self.k + [self.pmin] * self.k
upperBound = [self.thetamax] * self.k + [self.pmax] * self.k
#Create a random seed for our optimizer to use
rand = Random()
rand.seed(int(time()))
# If the optimizer option is PSO, run the PSO algorithm
if optimizer == 'pso':
ea = inspyred.swarm.PSO(Random())
ea.terminator = self.no_improvement_termination
ea.topology = inspyred.swarm.topologies.ring_topology
# ea.observer = inspyred.ec.observers.stats_observer
final_pop = ea.evolve(generator=self.generate_population,
evaluator=self.fittingObjective,
pop_size=300,
maximize=False,
bounder=ec.Bounder(lowerBound, upperBound),
max_evaluations=30000,
neighborhood_size=20,
num_inputs=self.k)
# Sort and print the best individual, who will be at index 0.
final_pop.sort(reverse=True)
# If not using a PSO search, run the GA
elif optimizer == 'ga':
ea = inspyred.ec.GA(Random())
ea.terminator = self.no_improvement_termination
final_pop = ea.evolve(generator=self.generate_population,
evaluator=self.fittingObjective,
pop_size=300,
maximize=False,
bounder=ec.Bounder(lowerBound, upperBound),
max_evaluations=30000,
num_elites=10,
mutation_rate=.05)
# This code updates the model with the hyperparameters found in the global search
for entry in final_pop:
newValues = entry.candidate
preLOP = copy.deepcopy(newValues)
locOP_bounds = []
for i in range(self.k):
locOP_bounds.append( [self.thetamin, self.thetamax] )
for i in range(self.k):
locOP_bounds.append( [self.pmin, self.pmax] )
# Let's quickly double check that we're at the optimal value by running a quick local optimizaiton
lopResults = minimize(self.fittingObjective_local, newValues, method='SLSQP', bounds=locOP_bounds, options={'disp': False})
newValues = lopResults['x']
# Finally, set our new theta and pl values and update the model again
for i in range(self.k):
self.theta[i] = newValues[i]
for i in range(self.k):
self.pl[i] = newValues[i + self.k]
try:
self.updateModel()
except:
pass
else:
break
def fittingObjective(self,candidates, args):
'''
The objective for a series of candidates from the hyperparameter global search.
:param candidates: An array of candidate design vectors from the global optimizer
:param args: args from the optimizer
:return fitness: An array of evaluated NegLNLike values for the candidate population
'''
fitness = []
for entry in candidates:
f=10000
for i in range(self.k):
self.theta[i] = entry[i]
for i in range(self.k):
self.pl[i] = entry[i + self.k]
try:
self.updateModel()
self.neglikelihood()
f = self.NegLnLike
except Exception as e:
# print 'Failure in NegLNLike, failing the run'
# print Exception, e
f = 10000
fitness.append(f)
return fitness
def fittingObjective_local(self,entry):
'''
:param entry: The same objective function as the global optimizer, but formatted for the local optimizer
:return: The fitness of the surface at the hyperparameters specified in entry
'''
f=10000
for i in range(self.k):
self.theta[i] = entry[i]
for i in range(self.k):
self.pl[i] = entry[i + self.k]
try:
self.updateModel()
self.neglikelihood()
f = self.NegLnLike
except Exception as e:
# print 'Failure in NegLNLike, failing the run'
# print Exception, e
f = 10000
return f
def plot(self, labels=False, show=True):
'''
This function plots 2D and 3D models
:param labels:
:param show: If True, the plots are displayed at the end of this call. If False, plt.show() should be called outside this function
:return:
'''
if self.k == 3:
import mayavi.mlab as mlab
predictFig = mlab.figure(figure='predict')
# errorFig = mlab.figure(figure='error')
if self.testfunction:
truthFig = mlab.figure(figure='test')
dx = 1
pts = 25j
X, Y, Z = np.mgrid[0:dx:pts, 0:dx:pts, 0:dx:pts]
scalars = np.zeros(X.shape)
errscalars = np.zeros(X.shape)
for i in range(X.shape[0]):
for j in range(X.shape[1]):
for k1 in range(X.shape[2]):
# errscalars[i][j][k1] = self.predicterr_normalized([X[i][j][k1], Y[i][j][k1], Z[i][j][k1]])
scalars[i][j][k1] = self.predict_normalized([X[i][j][k1], Y[i][j][k1], Z[i][j][k1]])
if self.testfunction:
tfscalars = np.zeros(X.shape)
for i in range(X.shape[0]):
for j in range(X.shape[1]):
for k1 in range(X.shape[2]):
tfplot = tfscalars[i][j][k1] = self.testfunction([X[i][j][k1], Y[i][j][k1], Z[i][j][k1]])
plot = mlab.contour3d(tfscalars, contours=15, transparent=True, figure=truthFig)
plot.compute_normals = False
# obj = mlab.contour3d(scalars, contours=10, transparent=True)
plot = mlab.contour3d(scalars, contours=15, transparent=True, figure=predictFig)
plot.compute_normals = False
# errplt = mlab.contour3d(errscalars, contours=15, transparent=True, figure=errorFig)
# errplt.compute_normals = False
if show:
mlab.show()
if self.k==2:
fig = pylab.figure(figsize=(8,6))
samplePoints = list(zip(*self.X))
# Create a set of data to plot
plotgrid = 61
x = np.linspace(self.normRange[0][0], self.normRange[0][1], num=plotgrid)
y = np.linspace(self.normRange[1][0], self.normRange[1][1], num=plotgrid)
# x = np.linspace(0, 1, num=plotgrid)
# y = np.linspace(0, 1, num=plotgrid)
X, Y = np.meshgrid(x, y)
# Predict based on the optimized results
zs = np.array([self.predict([x,y]) for x,y in zip(np.ravel(X), np.ravel(Y))])
Z = zs.reshape(X.shape)
# Z = (Z*(self.ynormRange[1]-self.ynormRange[0]))+self.ynormRange[0]
#Calculate errors
zse = np.array([self.predict_var([x,y]) for x,y in zip(np.ravel(X), np.ravel(Y))])
Ze = zse.reshape(X.shape)
spx = (self.X[:,0] * (self.normRange[0][1] - self.normRange[0][0])) + self.normRange[0][0]
spy = (self.X[:,1] * (self.normRange[1][1] - self.normRange[1][0])) + self.normRange[1][0]
contour_levels = 25
ax = fig.add_subplot(222)
CS = pylab.contourf(X,Y,Ze, contour_levels)
pylab.colorbar()
pylab.plot(spx, spy,'ow')
ax = fig.add_subplot(221)
if self.testfunction:
# Setup the truth function
zt = self.testfunction( np.array(list(zip(np.ravel(X), np.ravel(Y)))) )
ZT = zt.reshape(X.shape)
CS = pylab.contour(X,Y,ZT,contour_levels ,colors='k',zorder=2)
# contour_levels = np.linspace(min(zt), max(zt),50)
if self.testfunction:
contour_levels = CS.levels
delta = np.abs(contour_levels[0]-contour_levels[1])
contour_levels = np.insert(contour_levels, 0, contour_levels[0]-delta)
contour_levels = np.append(contour_levels, contour_levels[-1]+delta)
CS = plt.contourf(X,Y,Z,contour_levels,zorder=1)
pylab.plot(spx, spy,'ow', zorder=3)
pylab.colorbar()
ax = fig.add_subplot(212, projection='3d')
# fig = plt.gcf()
#ax = fig.gca(projection='3d')
ax.plot_surface(X, Y, Z, rstride=3, cstride=3, alpha=0.4)
if self.testfunction:
ax.plot_wireframe(X, Y, ZT, rstride=3, cstride=3)
if show:
pylab.show()
def saveFigure(self, name=None):
'''
Similar to plot, except that figures are saved to file
:param name: the file name of the plot image
'''
if self.k == 3:
import mayavi.mlab as mlab
mlab.options.offscreen = True
predictFig = mlab.figure(figure='predict')
mlab.clf(figure='predict')
errorFig = mlab.figure(figure='error')
mlab.clf(figure='error')
if self.testfunction:
truthFig = mlab.figure(figure='test')
mlab.clf(figure='test')
dx = 1
pts = 75j
X, Y, Z = np.mgrid[0:dx:pts, 0:dx:pts, 0:dx:pts]
scalars = np.zeros(X.shape)
errscalars = np.zeros(X.shape)
for i in range(X.shape[0]):
for j in range(X.shape[1]):
for k1 in range(X.shape[2]):
errscalars[i][j][k1] = self.predicterr_normalized([X[i][j][k1], Y[i][j][k1], Z[i][j][k1]])
scalars[i][j][k1] = self.predict_normalized([X[i][j][k1], Y[i][j][k1], Z[i][j][k1]])
if self.testfunction:
tfscalars = np.zeros(X.shape)
for i in range(X.shape[0]):
for j in range(X.shape[1]):
for k1 in range(X.shape[2]):
tfscalars[i][j][k1] = self.testfunction([X[i][j][k1], Y[i][j][k1], Z[i][j][k1]])
mlab.contour3d(tfscalars, contours=15, transparent=True, figure=truthFig, compute_normals=False)
# obj = mlab.contour3d(scalars, contours=10, transparent=True)
pred = mlab.contour3d(scalars, contours=15, transparent=True, figure=predictFig)
pred.compute_normals = False
errpred = mlab.contour3d(errscalars, contours=15, transparent=True, figure=errorFig)
errpred.compute_normals = False
mlab.savefig('%s_prediction.wrl' % name, figure=predictFig)
mlab.savefig('%s_error.wrl' % name, figure=errorFig)
if self.testfunction:
mlab.savefig('%s_actual.wrl' % name, figure=truthFig)
mlab.close(all=True)
if self.k == 2:
samplePoints = list(zip(*self.X))
# Create a set of data to plot
plotgrid = 61
x = np.linspace(0, 1, num=plotgrid)
y = np.linspace(0, 1, num=plotgrid)
X, Y = np.meshgrid(x, y)
# Predict based on the optimized results
zs = np.array([self.predict_normalized([x, y]) for x, y in zip(np.ravel(X), np.ravel(Y))])
Z = zs.reshape(X.shape)
Z = (Z * (self.ynormRange[1] - self.ynormRange[0])) + self.ynormRange[0]
# Calculate errors
zse = np.array([self.predicterr_normalized([x, y]) for x, y in zip(np.ravel(X), np.ravel(Y))])
Ze = zse.reshape(X.shape)
if self.testfunction:
# Setup the truth function
zt = self.testfunction(np.array(
list(zip(np.ravel((X * (self.normRange[0][1] - self.normRange[0][0])) + self.normRange[0][0]),
np.ravel((Y * (self.normRange[1][1] - self.normRange[1][0])) + self.normRange[1][0])))))
ZT = zt.reshape(X.shape)
# Plot real world values
X = (X * (self.normRange[0][1] - self.normRange[0][0])) + self.normRange[0][0]
Y = (Y * (self.normRange[1][1] - self.normRange[1][0])) + self.normRange[1][0]
spx = (self.X[:, 0] * (self.normRange[0][1] - self.normRange[0][0])) + self.normRange[0][0]
spy = (self.X[:, 1] * (self.normRange[1][1] - self.normRange[1][0])) + self.normRange[1][0]
return spx, spy, X, Y, Z, Ze
# fig = plt.figure(figsize=(8, 6))
# # contour_levels = np.linspace(min(zt), max(zt),50)
# contour_levels = 15
# plt.plot(spx, spy, 'ow')
# cs = plt.colorbar()
#
# if self.testfunction:
# pass
# plt.plot(spx, spy, 'ow')
#
# cs = plt.colorbar()
# plt.plot(spx, spy, 'ow')
#
# ax = fig.add_subplot(212, projection='3d')
# ax.plot_surface(X, Y, Z, rstride=3, cstride=3, alpha=0.4)
#
# if self.testfunction:
# ax.plot_wireframe(X, Y, ZT, rstride=3, cstride=3)
# if name:
# plt.savefig(name)
# else:
# plt.savefig('pyKrigingResult.png')
def calcuatemeanMSE(self, p2s=200, points=None):
'''
This function calculates the mean MSE metric of the model by evaluating MSE at a number of points.
:param p2s: Points to Sample, the number of points to sample the mean squared error at. Ignored if the points argument is specified
:param points: an array of points to sample the model at
:return: the mean value of MSE and the standard deviation of the MSE points
'''
if points is None:
points = self.sp.rlh(p2s)
values = np.zeros(len(points))
for enu, point in enumerate(points):
values[enu] = self.predict_var(point)
return np.mean(values), np.std(values)
def snapshot(self):
'''
This function saves a 'snapshot' of the model when the function is called. This allows for a playback of the training process
'''
self.history['points'].append(self.n)
self.history['neglnlike'].append(self.NegLnLike)
self.history['theta'].append(copy.deepcopy(self.theta))
self.history['p'].append(copy.deepcopy(self.pl))
self.history['avgMSE'].append(self.calcuatemeanMSE(points=self.testPoints)[0])
currentPredictions = []
if self.history['pointData']!=None:
for pointprim in self.history['pointData']:
predictedPoint = self.predict(pointprim['point'])
currentPredictions.append(copy.deepcopy( predictedPoint) )
pointprim['predicted'].append( predictedPoint )
pointprim['mse'].append( self.predict_var(pointprim['point']) )
try:
pointprim['gradient'] = np.gradient( pointprim['predicted'] )
except:
pass
if self.history['lastPredictedPoints'] != []:
self.history['chisquared'].append( self.chisquared( self.history['lastPredictedPoints'], currentPredictions ) )
self.history['rsquared'].append( self.rsquared( self.history['lastPredictedPoints'], currentPredictions ) )
self.history['adjrsquared'].append( self.adjrsquares( self.history['rsquared'][-1], len( self.history['pointData'] ) ) )
self.history[ 'lastPredictedPoints' ] = copy.deepcopy(currentPredictions)
def rsquared(self,actual, observed):
return np.corrcoef(observed, actual)[0,1] ** 2
def adjrsquares(self, rsquared, obs):
return 1-(1-rsquared)*((obs-1)/(obs-self.k)) # adjusted R-square
def chisquared(self, actual, observed):
actual = np.array(actual)
observed = np.array(observed)
return np.sum( np.abs( np.power( (observed-actual) ,2)/actual ) )
|
capaulson/pyKriging
|
pyKriging/krige.py
|
Python
|
mit
| 30,061
|
[
"Mayavi"
] |
796d0a454cbfe8f92c266bea0fd611ab131016013e89a88f984dedce0aacd73d
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import linalg
from tensorflow.contrib.distributions.python.ops import mvn_linear_operator as mvn_linop
from tensorflow.python.framework import ops
from tensorflow.python.ops.distributions import util as distribution_util
__all__ = [
"MultivariateNormalTriL",
]
class MultivariateNormalTriL(
mvn_linop.MultivariateNormalLinearOperator):
"""The multivariate normal distribution on `R^k`.
The Multivariate Normal distribution is defined over `R^k` and parameterized
by a (batch of) length-`k` `loc` vector (aka "mu") and a (batch of) `k x k`
`scale` matrix; `covariance = scale @ scale.T` where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-0.5 ||y||**2) / Z,
y = inv(scale) @ (x - loc),
Z = (2 pi)**(0.5 k) |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a matrix in `R^{k x k}`, `covariance = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||**2` denotes the squared Euclidean norm of `y`.
A (non-batch) `scale` matrix is:
```none
scale = scale_tril
```
where `scale_tril` is lower-triangular `k x k` matrix with non-zero diagonal,
i.e., `tf.diag_part(scale_tril) != 0`.
Additional leading dimensions (if any) will index batches.
The MultivariateNormal distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ MultivariateNormal(loc=0, scale=1) # Identity scale, zero shift.
Y = scale @ X + loc
```
Trainable (batch) lower-triangular matrices can be created with
`ds.matrix_diag_transform()` and/or `ds.fill_lower_triangular()`
#### Examples
```python
ds = tf.contrib.distributions
# Initialize a single 3-variate Gaussian.
mu = [1., 2, 3]
cov = [[ 0.36, 0.12, 0.06],
[ 0.12, 0.29, -0.13],
[ 0.06, -0.13, 0.26]]
scale = tf.cholesky(cov)
# ==> [[ 0.6, 0. , 0. ],
# [ 0.2, 0.5, 0. ],
# [ 0.1, -0.3, 0.4]])
mvn = ds.MultivariateNormalTriL(
loc=mu,
scale_tril=scale)
mvn.mean().eval()
# ==> [1., 2, 3]
# Covariance agrees with cholesky(cov) parameterization.
mvn.covariance().eval()
# ==> [[ 0.36, 0.12, 0.06],
# [ 0.12, 0.29, -0.13],
# [ 0.06, -0.13, 0.26]]
# Compute the pdf of an observation in `R^3` ; return a scalar.
mvn.prob([-1., 0, 1]).eval() # shape: []
# Initialize a 2-batch of 3-variate Gaussians.
mu = [[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
tril = ... # shape: [2, 3, 3], lower triangular, non-zero diagonal.
mvn = ds.MultivariateNormalTriL(
loc=mu,
scale_tril=tril)
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[-0.9, 0, 0.1],
[-10, 0, 9]] # shape: [2, 3]
mvn.prob(x).eval() # shape: [2]
```
"""
def __init__(self,
loc=None,
scale_tril=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalTriL"):
"""Construct Multivariate Normal distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = scale @ scale.T`. A (non-batch) `scale` matrix is:
```none
scale = scale_tril
```
where `scale_tril` is lower-triangular `k x k` matrix with non-zero
diagonal, i.e., `tf.diag_part(scale_tril) != 0`.
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale_tril: Floating-point, lower-triangular `Tensor` with non-zero
diagonal elements. `scale_tril` has shape `[B1, ..., Bb, k, k]` where
`b >= 0` and `k` is the event size.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if neither `loc` nor `scale_tril` are specified.
"""
parameters = locals()
def _convert_to_tensor(x, name):
return None if x is None else ops.convert_to_tensor(x, name=name)
if loc is None and scale_tril is None:
raise ValueError("Must specify one or both of `loc`, `scale_tril`.")
with ops.name_scope(name):
with ops.name_scope("init", values=[loc, scale_tril]):
loc = _convert_to_tensor(loc, name="loc")
scale_tril = _convert_to_tensor(scale_tril, name="scale_tril")
if scale_tril is None:
scale = linalg.LinearOperatorIdentity(
num_rows=distribution_util.dimension_size(loc, -1),
dtype=loc.dtype,
is_self_adjoint=True,
is_positive_definite=True,
assert_proper_shapes=validate_args)
else:
# No need to validate that scale_tril is non-singular.
# LinearOperatorTriL has an assert_non_singular method that is called
# by the Bijector.
scale = linalg.LinearOperatorTriL(
scale_tril,
is_non_singular=True,
is_self_adjoint=False,
is_positive_definite=False)
super(MultivariateNormalTriL, self).__init__(
loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/tensorflow/contrib/distributions/python/ops/mvn_tril.py
|
Python
|
bsd-2-clause
| 7,102
|
[
"Gaussian"
] |
09bb83c4de480421274561ba9a41eb867926c8fbccedaa3edab8d28402c15fda
|
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/1')
from data_1 import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_1 = ['Rigid-Fixed']*35 + ['Rigid-Movable']*35 + ['Soft-Fixed']*35 + ['Soft-Movable']*35
PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((140,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
|
tapomayukh/projects_in_python
|
classification/Classification_with_kNN/Single_Contact_Classification/Final/best_kNN_PCA/4-categories/1/test11_cross_validate_categories_1_1200ms.py
|
Python
|
mit
| 4,730
|
[
"Mayavi"
] |
f8355b961449c205782e40f86918cca58288564305c0a748d44f90d6c0bb3da3
|
#!/usr/bin/env python
from Bio.Blast import NCBIWWW, NCBIXML
def callpBLAST(sequence):
result_handle = NCBIWWW.qblast("blastn", "nr", sequence)
blast_record = NCBIXML.read(result_handle)
return blast_record
|
gbugaisky/bimm_185_conotoxin
|
submodules/callpBLAST.py
|
Python
|
gpl-2.0
| 211
|
[
"BLAST"
] |
9bece3f63db42d70e3bfd61a32afb45d8d8f1dc511e226eddeab66988c2b4305
|
"""
Base classes used by studio tests.
"""
from bok_choy.web_app_test import WebAppTest
from bok_choy.page_object import XSS_INJECTION
from ...pages.studio.auto_auth import AutoAuthPage
from ...fixtures.course import CourseFixture
from ...fixtures.library import LibraryFixture
from ..helpers import UniqueCourseTest
from ...pages.studio.overview import CourseOutlinePage
from ...pages.studio.utils import verify_ordering
class StudioCourseTest(UniqueCourseTest):
"""
Base class for all Studio course tests.
"""
def setUp(self, is_staff=False, test_xss=True): # pylint: disable=arguments-differ
"""
Install a course with no content using a fixture.
"""
super(StudioCourseTest, self).setUp()
self.test_xss = test_xss
self.install_course_fixture(is_staff)
def install_course_fixture(self, is_staff=False):
"""
Install a course fixture
"""
self.course_fixture = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name'],
)
if self.test_xss:
xss_injected_unique_id = XSS_INJECTION + self.unique_id
test_improper_escaping = {u"value": xss_injected_unique_id}
self.course_fixture.add_advanced_settings({
"advertised_start": test_improper_escaping,
"info_sidebar_name": test_improper_escaping,
"cert_name_short": test_improper_escaping,
"cert_name_long": test_improper_escaping,
"display_organization": test_improper_escaping,
"display_coursenumber": test_improper_escaping,
})
self.course_info['display_organization'] = xss_injected_unique_id
self.course_info['display_coursenumber'] = xss_injected_unique_id
self.populate_course_fixture(self.course_fixture)
self.course_fixture.install()
self.user = self.course_fixture.user
self.log_in(self.user, is_staff)
def populate_course_fixture(self, course_fixture):
"""
Populate the children of the test course fixture.
"""
pass
def log_in(self, user, is_staff=False):
"""
Log in as the user that created the course. The user will be given instructor access
to the course and enrolled in it. By default the user will not have staff access unless
is_staff is passed as True.
Args:
user(dict): dictionary containing user data: {'username': ..., 'email': ..., 'password': ...}
is_staff(bool): register this user as staff
"""
self.auth_page = AutoAuthPage(
self.browser,
staff=is_staff,
username=user.get('username'),
email=user.get('email'),
password=user.get('password')
)
self.auth_page.visit()
class ContainerBase(StudioCourseTest):
"""
Base class for tests that do operations on the container page.
"""
def setUp(self, is_staff=False):
"""
Create a unique identifier for the course used in this test.
"""
# Ensure that the superclass sets up
super(ContainerBase, self).setUp(is_staff=is_staff)
self.outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def go_to_nested_container_page(self):
"""
Go to the nested container page.
"""
unit = self.go_to_unit_page()
# The 0th entry is the unit page itself.
container = unit.xblocks[1].go_to_container()
return container
def go_to_unit_page(self, section_name='Test Section', subsection_name='Test Subsection', unit_name='Test Unit'):
"""
Go to the test unit page.
If make_draft is true, the unit page will be put into draft mode.
"""
self.outline.visit()
subsection = self.outline.section(section_name).subsection(subsection_name)
return subsection.expand_subsection().unit(unit_name).go_to()
def do_action_and_verify(self, action, expected_ordering):
"""
Perform the supplied action and then verify the resulting ordering.
"""
container = self.go_to_nested_container_page()
action(container)
verify_ordering(self, container, expected_ordering)
# Reload the page to see that the change was persisted.
container = self.go_to_nested_container_page()
verify_ordering(self, container, expected_ordering)
class StudioLibraryTest(WebAppTest):
"""
Base class for all Studio library tests.
"""
as_staff = True
def setUp(self):
"""
Install a library with no content using a fixture.
"""
super(StudioLibraryTest, self).setUp()
fixture = LibraryFixture(
'test_org',
self.unique_id,
'Test Library {}'.format(self.unique_id),
)
self.populate_library_fixture(fixture)
fixture.install()
self.library_fixture = fixture
self.library_info = fixture.library_info
self.library_key = fixture.library_key
self.user = fixture.user
self.log_in(self.user, self.as_staff)
def populate_library_fixture(self, library_fixture):
"""
Populate the children of the test course fixture.
"""
pass
def log_in(self, user, is_staff=False):
"""
Log in as the user that created the library.
By default the user will not have staff access unless is_staff is passed as True.
"""
auth_page = AutoAuthPage(
self.browser,
staff=is_staff,
username=user.get('username'),
email=user.get('email'),
password=user.get('password')
)
auth_page.visit()
|
solashirai/edx-platform
|
common/test/acceptance/tests/studio/base_studio_test.py
|
Python
|
agpl-3.0
| 6,036
|
[
"VisIt"
] |
766b9402baea07780c4e64d3dde7fb0c736a3ba0f1ea7a22d6e574d61509176b
|
#!/usr/bin/env python3
"""
Copyright 2020 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import dbShared
import cgi
import pymysql
import time
from datetime import timedelta, datetime
import ghShared
import dbShared
PAGE_SIZE = 42
def getHistorySQL(uid, timeCriteria, galaxy):
if uid == '':
return 'SELECT userID, eventTime, spawnName, eventType, planetName, tResources.resourceType, tResourceType.resourceTypeName, containerType, tResources.galaxy FROM tResourceEvents INNER JOIN tResources ON tResourceEvents.spawnID = tResources.spawnID INNER JOIN tResourceType ON tResources.resourceType = tResourceType.resourceType LEFT JOIN tPlanet ON tResourceEvents.planetID = tPlanet.planetID WHERE tResourceEvents.galaxy=' + galaxy + timeCriteria + ' ORDER BY eventTime DESC LIMIT ' + str(PAGE_SIZE) + ';'
else:
return 'SELECT galaxyName, eventTime, spawnName, eventType, planetName, tResources.resourceType, tResourceType.resourceTypeName, containerType, tResources.galaxy FROM tResourceEvents INNER JOIN tResources ON tResourceEvents.spawnID = tResources.spawnID INNER JOIN tGalaxy ON tResourceEvents.galaxy = tGalaxy.galaxyID INNER JOIN tResourceType ON tResources.resourceType = tResourceType.resourceType LEFT JOIN tPlanet ON tResourceEvents.planetID = tPlanet.planetID WHERE userID="' + uid + '"' + timeCriteria + ' ORDER BY eventTime DESC LIMIT ' + str(PAGE_SIZE) + ';'
form = cgi.FieldStorage()
galaxy = form.getfirst('galaxy', '')
uid = form.getfirst('uid', '')
lastTime = form.getfirst('lastTime', '')
formatType = form.getfirst('formatType', '')
# escape input to prevent sql injection
galaxy = dbShared.dbInsertSafe(galaxy)
uid = dbShared.dbInsertSafe(uid)
lastTime = dbShared.dbInsertSafe(lastTime)
timeCriteria = ''
errors = ''
responseData = ''
# Main program
firstCol = 'Member'
if uid != '':
firstCol = 'Galaxy'
if formatType == 'json':
print('Content-type: text/json\n')
else:
print('Content-type: text/html\n')
if uid == '' and galaxy == '':
errors = 'Error: you must specify a user id or galaxy to get history for.'
if (len(lastTime) > 5):
timeCriteria = " AND eventTime < '" + lastTime + "'"
conn = dbShared.ghConn()
cursor = conn.cursor()
if (cursor and errors == ''):
if formatType == 'json':
responseData = '{\n'
responseData += ' "server_time" : "' + datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d %H:%M:%S") + '",\n'
responseData += ' "events" : [\n'
else:
responseData = '<table class="userData" width="620">\n'
responseData += '<thead><tr class="tableHead"><td width="100">' + firstCol + '</td><td width="140">Time</td><td width="85">Spawn</td><td width="175">Resource Type</td><td width="70">Action</td><td width="50">Planet</td></th></thead>\n'
cursor.execute(getHistorySQL(uid, timeCriteria, galaxy))
row = cursor.fetchone()
while (row != None):
if formatType == 'json':
responseData += ' {\n'
responseData += ' "' + firstCol + '" : "' + row[0] + '",\n'
responseData += ' "time" : "' + str(row[1]) + '",\n'
responseData += ' "spawn_name" : "' + row[2] + '",\n'
responseData += ' "event_type" : "' + row[3] + '",\n'
responseData += ' "planet_name" : "' + str(row[4]) + '",\n'
responseData += ' "resource_type" : "' + row[5] + '",\n'
responseData += ' "resource_type_name" : "' + row[6] + '",\n'
responseData += ' "container_type" : "' + row[7] + '"\n'
responseData += ' },\n'
else:
responseData += ' <tr class="statRow"><td>' + row[0] + '</td><td>' + str(row[1]) + '</td><td><a href="' + ghShared.BASE_SCRIPT_URL + 'resource.py/' + str(row[8]) + '/' + row[2] + '" class="nameLink">' + row[2] + '</a></td><td><a href="' + ghShared.BASE_SCRIPT_URL + 'resourceType.py/' + row[5] + '" class="nameLink">' + row[6] + '</a></td><td>' + ghShared.getActionName(row[3]) + '</td><td>' + str(row[4]) + '</td>'
responseData += ' </tr>'
lastTime = row[1]
row = cursor.fetchone()
if formatType == 'json':
responseData += '],\n'
else:
responseData += ' </table>'
if (cursor.rowcount == PAGE_SIZE):
if formatType == 'json':
responseData += ' "more_events" : "yes"\n'
else:
responseData += '<div style="text-align:center;"><button id="moreButton" class="ghButton" onclick="moreHistory(\''+ str(lastTime) + '\');">More</button></div>'
cursor.close()
if formatType == 'json':
responseData += '}'
else:
errors = "Error: Database unavailable"
conn.close()
if errors == '':
print(responseData)
sys.exit(200)
else:
if formatType == 'json':
print('{ "response" : "' + errors + '"}')
else:
print(errors)
sys.exit(500)
|
pwillworth/galaxyharvester
|
html/getUserHistory.py
|
Python
|
gpl-3.0
| 5,273
|
[
"Galaxy"
] |
092fbd51496070151003abb67390cf6c52040189991d2228f2b31d045b9f7939
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import kullback_leibler
from tensorflow.contrib.distributions.python.ops import operator_pd_cholesky
from tensorflow.contrib.distributions.python.ops import operator_pd_diag
from tensorflow.contrib.distributions.python.ops import operator_pd_full
from tensorflow.contrib.distributions.python.ops import operator_pd_vdvt_update
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
__all__ = [
"MultivariateNormalDiag",
"MultivariateNormalDiagWithSoftplusStDev",
"MultivariateNormalCholesky",
"MultivariateNormalFull",
"MultivariateNormalDiagPlusVDVT",
]
_mvn_prob_note = """
`x` is a batch vector with compatible shape if `x` is a `Tensor` whose
shape can be broadcast up to either:
```
self.batch_shape + self.event_shape
```
or
```
[M1,...,Mm] + self.batch_shape + self.event_shape
```
"""
class _MultivariateNormalOperatorPD(distribution.Distribution):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and an instance of
`OperatorPDBase`, which provides access to a symmetric positive definite
operator, which defines the covariance.
#### Mathematical details
With `C` the covariance matrix represented by the operator, the PDF of this
distribution is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian.
mu = [1, 2, 3]
chol = [[1, 0, 0.], [1, 3, 0], [1, 2, 3]]
cov = tf.contrib.distributions.OperatorPDCholesky(chol)
dist = tf.contrib.distributions._MultivariateNormalOperatorPD(mu, cov)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1.])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33.]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
cov = tf.contrib.distributions.OperatorPDCholesky(chol)
dist = tf.contrib.distributions._MultivariateNormalOperatorPD(mu, cov)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11.]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(self,
mu,
cov,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalCov"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu`, and an instance of `OperatorPDBase`, `cov`,
which determines the covariance.
Args:
mu: Floating point tensor with shape `[N1,...,Nb, k]`, `b >= 0`.
cov: Instance of `OperatorPDBase` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `cov` are different dtypes.
"""
parameters = locals()
with ops.name_scope(name) as ns:
with ops.name_scope("init", values=[mu] + cov.inputs):
self._mu = array_ops.identity(mu, name="mu")
self._cov = cov
self._validate_args = validate_args # Needed by _assert_valid_mu.
self._mu = self._assert_valid_mu(self._mu)
super(_MultivariateNormalOperatorPD, self).__init__(
dtype=self._mu.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
is_continuous=True,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._mu] + cov.inputs,
name=ns)
def _assert_valid_mu(self, mu):
"""Return `mu` after validity checks and possibly with assertations."""
cov = self._cov
if mu.dtype != cov.dtype:
raise TypeError(
"mu and cov must have the same dtype. Found mu.dtype = %s, "
"cov.dtype = %s" % (mu.dtype, cov.dtype))
# Try to validate with static checks.
mu_shape = mu.get_shape()
cov_shape = cov.get_shape()
if mu_shape.is_fully_defined() and cov_shape.is_fully_defined():
if mu_shape != cov_shape[:-1]:
raise ValueError(
"mu.shape and cov.shape[:-1] should match. Found: mu.shape=%s, "
"cov.shape=%s" % (mu_shape, cov_shape))
else:
return mu
# Static checks could not be run, so possibly do dynamic checks.
if not self.validate_args:
return mu
else:
assert_same_rank = check_ops.assert_equal(
array_ops.rank(mu) + 1,
cov.rank(),
data=["mu should have rank 1 less than cov. Found: rank(mu) = ",
array_ops.rank(mu), " rank(cov) = ", cov.rank()],
)
with ops.control_dependencies([assert_same_rank]):
assert_same_shape = check_ops.assert_equal(
array_ops.shape(mu),
cov.vector_shape(),
data=["mu.shape and cov.shape[:-1] should match. "
"Found: shape(mu) = "
, array_ops.shape(mu), " shape(cov) = ", cov.shape()],
)
return control_flow_ops.with_dependencies([assert_same_shape], mu)
@property
def mu(self):
return self._mu
@property
def sigma(self):
"""Dense (batch) covariance matrix, if available."""
with ops.name_scope(self.name):
return self._cov.to_dense()
def log_sigma_det(self, name="log_sigma_det"):
"""Log of determinant of covariance matrix."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=self._cov.inputs):
return self._cov.log_det()
def sigma_det(self, name="sigma_det"):
"""Determinant of covariance matrix."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=self._cov.inputs):
return math_ops.exp(self._cov.log_det())
def _batch_shape_tensor(self):
return self._cov.batch_shape()
def _batch_shape(self):
return self._cov.get_batch_shape()
def _event_shape_tensor(self):
return array_ops.stack([self._cov.vector_space_dimension()])
def _event_shape(self):
return self._cov.get_shape()[-1:]
def _sample_n(self, n, seed=None):
# Recall _assert_valid_mu ensures mu and self._cov have same batch shape.
shape = array_ops.concat([self._cov.vector_shape(), [n]], 0)
white_samples = random_ops.random_normal(shape=shape,
mean=0.,
stddev=1.,
dtype=self.dtype,
seed=seed)
correlated_samples = self._cov.sqrt_matmul(white_samples)
# Move the last dimension to the front
perm = array_ops.concat(
(array_ops.stack([array_ops.rank(correlated_samples) - 1]),
math_ops.range(0, array_ops.rank(correlated_samples) - 1)), 0)
# TODO(ebrevdo): Once we get a proper tensor contraction op,
# perform the inner product using that instead of batch_matmul
# and this slow transpose can go away!
correlated_samples = array_ops.transpose(correlated_samples, perm)
samples = correlated_samples + self.mu
return samples
@distribution_util.AppendDocstring(_mvn_prob_note)
def _log_prob(self, x):
# Q: Why are shape requirements as stated above?
# A: The compatible shapes are precisely the ones that will broadcast to
# a shape compatible with self._cov.
# See Operator base class for notes about shapes compatible with self._cov.
x = ops.convert_to_tensor(x)
contrib_tensor_util.assert_same_float_dtype((self._mu, x))
# _assert_valid_mu asserts that self.mu has same batch shape as self.cov.
# so batch shape of self.mu = that of self._cov and self, and the
# batch shape of x_centered is a broadcast version of these. If this
# broadcast results in a shape like
# [M1,...,Mm] + self.batch_shape + self.event_shape
# OR
# self.batch_shape + self.event_shape
# then subsequent operator calls are guaranteed to work.
x_centered = x - self.mu
# Compute the term x^{-1} sigma^{-1} x which appears in the exponent of
# the pdf.
x_whitened_norm = self._cov.inv_quadratic_form_on_vectors(x_centered)
k = math_ops.cast(self._cov.vector_space_dimension(), self.dtype)
log_prob_value = -0.5 * (self.log_sigma_det() +
k * math.log(2. * math.pi) +
x_whitened_norm)
output_static_shape = x_centered.get_shape()[:-1]
log_prob_value.set_shape(output_static_shape)
return log_prob_value
@distribution_util.AppendDocstring(_mvn_prob_note)
def _prob(self, x):
return math_ops.exp(self.log_prob(x))
def _entropy(self):
log_sigma_det = self.log_sigma_det()
one_plus_log_two_pi = constant_op.constant(1 + math.log(2 * math.pi),
dtype=self.dtype)
# Use broadcasting rules to calculate the full broadcast sigma.
k = math_ops.cast(self._cov.vector_space_dimension(), dtype=self.dtype)
entropy_value = (k * one_plus_log_two_pi + log_sigma_det) / 2
entropy_value.set_shape(log_sigma_det.get_shape())
return entropy_value
def _mean(self):
return array_ops.identity(self._mu)
def _covariance(self):
return self.sigma
def _variance(self):
return array_ops.matrix_diag_part(self.sigma)
def _mode(self):
return array_ops.identity(self._mu)
class MultivariateNormalDiag(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and a 1-D diagonal
`diag_stddev`, representing the standard deviations. This distribution
assumes the random variables, `(X_1,...,X_k)` are independent, thus no
non-diagonal terms of the covariance matrix are needed.
This allows for `O(k)` pdf evaluation, sampling, and storage.
#### Mathematical details
The PDF of this distribution is defined in terms of the diagonal covariance
determined by `diag_stddev`: `C_{ii} = diag_stddev[i]**2`.
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and the square roots of the (independent) random variables.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal standard deviation.
mu = [1, 2, 3.]
diag_stddev = [4, 5, 6.]
dist = tf.contrib.distributions.MultivariateNormalDiag(mu, diag_stddev)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33]] # shape 2 x 3
diag_stddev = ... # shape 2 x 3, positive.
dist = tf.contrib.distributions.MultivariateNormalDiag(mu, diag_stddev)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(
self,
mu,
diag_stddev,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiag"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and standard deviations `diag_stddev`.
Each batch member represents a random vector `(X_1,...,X_k)` of independent
random normals.
The mean of `X_i` is `mu[i]`, and the standard deviation is
`diag_stddev[i]`.
Args:
mu: Rank `N + 1` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
diag_stddev: Rank `N + 1` `Tensor` with same `dtype` and shape as `mu`,
representing the standard deviations. Must be positive.
validate_args: `Boolean`, default `False`. Whether to validate
input with asserts. If `validate_args` is `False`,
and the inputs are invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `diag_stddev` are different dtypes.
"""
parameters = locals()
with ops.name_scope(name, values=[diag_stddev]) as ns:
cov = operator_pd_diag.OperatorPDSqrtDiag(diag_stddev,
verify_pd=validate_args)
super(MultivariateNormalDiag, self).__init__(
mu, cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=ns)
self._parameters = parameters
class MultivariateNormalDiagWithSoftplusStDev(MultivariateNormalDiag):
"""MultivariateNormalDiag with `diag_stddev = softplus(diag_stddev)`."""
def __init__(self,
mu,
diag_stddev,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiagWithSoftplusStdDev"):
parameters = locals()
with ops.name_scope(name, values=[diag_stddev]) as ns:
super(MultivariateNormalDiagWithSoftplusStDev, self).__init__(
mu=mu,
diag_stddev=nn.softplus(diag_stddev),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
class MultivariateNormalDiagPlusVDVT(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
Every batch member of this distribution is defined by a mean and a lightweight
covariance matrix `C`.
#### Mathematical details
The PDF of this distribution in terms of the mean `mu` and covariance `C` is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
For every batch member, this distribution represents `k` random variables
`(X_1,...,X_k)`, with mean `E[X_i] = mu[i]`, and covariance matrix
`C_{ij} := E[(X_i - mu[i])(X_j - mu[j])]`
The user initializes this class by providing the mean `mu`, and a lightweight
definition of `C`:
```
C = SS^T = SS = (M + V D V^T) (M + V D V^T)
M is diagonal (k x k)
V = is shape (k x r), typically r << k
D = is diagonal (r x r), optional (defaults to identity).
```
This allows for `O(kr + r^3)` pdf evaluation and determinant, and `O(kr)`
sampling and storage (per batch member).
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and square root of the covariance `S = M + V D V^T`. Extra
leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with covariance square root
# S = M + V D V^T, where V D V^T is a matrix-rank 2 update.
mu = [1, 2, 3.]
diag_large = [1.1, 2.2, 3.3]
v = ... # shape 3 x 2
diag_small = [4., 5.]
dist = tf.contrib.distributions.MultivariateNormalDiagPlusVDVT(
mu, diag_large, v, diag_small=diag_small)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians. This time, don't provide
# diag_small. This means S = M + V V^T.
mu = [[1, 2, 3], [11, 22, 33]] # shape 2 x 3
diag_large = ... # shape 2 x 3
v = ... # shape 2 x 3 x 1, a matrix-rank 1 update.
dist = tf.contrib.distributions.MultivariateNormalDiagPlusVDVT(
mu, diag_large, v)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(
self,
mu,
diag_large,
v,
diag_small=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiagPlusVDVT"):
"""Multivariate Normal distributions on `R^k`.
For every batch member, this distribution represents `k` random variables
`(X_1,...,X_k)`, with mean `E[X_i] = mu[i]`, and covariance matrix
`C_{ij} := E[(X_i - mu[i])(X_j - mu[j])]`
The user initializes this class by providing the mean `mu`, and a
lightweight definition of `C`:
```
C = SS^T = SS = (M + V D V^T) (M + V D V^T)
M is diagonal (k x k)
V = is shape (k x r), typically r << k
D = is diagonal (r x r), optional (defaults to identity).
```
Args:
mu: Rank `n + 1` floating point tensor with shape `[N1,...,Nn, k]`,
`n >= 0`. The means.
diag_large: Optional rank `n + 1` floating point tensor, shape
`[N1,...,Nn, k]` `n >= 0`. Defines the diagonal matrix `M`.
v: Rank `n + 1` floating point tensor, shape `[N1,...,Nn, k, r]`
`n >= 0`. Defines the matrix `V`.
diag_small: Rank `n + 1` floating point tensor, shape
`[N1,...,Nn, k]` `n >= 0`. Defines the diagonal matrix `D`. Default
is `None`, which means `D` will be the identity matrix.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`,
and the inputs are invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
"""
parameters = locals()
with ops.name_scope(name, values=[diag_large, v, diag_small]) as ns:
cov = operator_pd_vdvt_update.OperatorPDSqrtVDVTUpdate(
operator_pd_diag.OperatorPDDiag(
diag_large, verify_pd=validate_args),
v,
diag=diag_small,
verify_pd=validate_args,
verify_shapes=validate_args)
super(MultivariateNormalDiagPlusVDVT, self).__init__(
mu, cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=ns)
self._parameters = parameters
class MultivariateNormalCholesky(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and a Cholesky factor `chol`.
Providing the Cholesky factor allows for `O(k^2)` pdf evaluation and sampling,
and requires `O(k^2)` storage.
#### Mathematical details
The Cholesky factor `chol` defines the covariance matrix: `C = chol chol^T`.
The PDF of this distribution is then:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal covariance.
# Note, this would be more efficient with MultivariateNormalDiag.
mu = [1, 2, 3.]
chol = [[1, 0, 0], [0, 3, 0], [0, 0, 2]]
dist = tf.contrib.distributions.MultivariateNormalCholesky(mu, chol)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
dist = tf.contrib.distributions.MultivariateNormalCholesky(mu, chol)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
Trainable (batch) Cholesky matrices can be created with
`tf.contrib.distributions.matrix_diag_transform()`
"""
def __init__(self,
mu,
chol,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalCholesky"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and `chol` which holds the (batch) Cholesky
factors, such that the covariance of each batch member is `chol chol^T`.
Args:
mu: `(N+1)-D` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
chol: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`. The upper triangular part is ignored (treated as
though it is zero), and the diagonal must be positive.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `chol` are different dtypes.
"""
parameters = locals()
with ops.name_scope(name, values=[chol]) as ns:
cov = operator_pd_cholesky.OperatorPDCholesky(chol,
verify_pd=validate_args)
super(MultivariateNormalCholesky, self).__init__(
mu, cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=ns)
self._parameters = parameters
class MultivariateNormalFull(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and covariance matrix `sigma`.
Evaluation of the pdf, determinant, and sampling are all `O(k^3)` operations.
#### Mathematical details
With `C = sigma`, the PDF of this distribution is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal covariance.
mu = [1, 2, 3.]
sigma = [[1, 0, 0], [0, 3, 0], [0, 0, 2.]]
dist = tf.contrib.distributions.MultivariateNormalFull(mu, chol)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33.]]
sigma = ... # shape 2 x 3 x 3, positive definite.
dist = tf.contrib.distributions.MultivariateNormalFull(mu, sigma)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11.]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(self,
mu,
sigma,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalFull"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and `sigma`, the mean and covariance.
Args:
mu: `(N+1)-D` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
sigma: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`. Each batch member must be positive definite.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `sigma` are different dtypes.
"""
parameters = locals()
with ops.name_scope(name, values=[sigma]) as ns:
cov = operator_pd_full.OperatorPDFull(sigma, verify_pd=validate_args)
super(MultivariateNormalFull, self).__init__(
mu, cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=ns)
self._parameters = parameters
@kullback_leibler.RegisterKL(
_MultivariateNormalOperatorPD, _MultivariateNormalOperatorPD)
def _kl_mvn_mvn_brute_force(mvn_a, mvn_b, name=None):
"""Batched KL divergence `KL(mvn_a || mvn_b)` for multivariate normals.
With `X`, `Y` both multivariate normals in `R^k` with means `mu_x`, `mu_y` and
covariance `C_x`, `C_y` respectively,
```
KL(X || Y) = 0.5 * ( T + Q + - k + L ),
T := trace(C_b^{-1} C_a),
Q := (mu_b - mu_a)^T C_b^{-1} (mu_b - mu_a),
L := Log[Det(C_b)] - Log[Det(C_a)]
```
This `Op` computes the trace by solving `C_b^{-1} C_a`. Although efficient
methods for solving systems with `C_b` may be available, a dense version of
(the square root of) `C_a` is used, so performance is `O(B s k^2)` where `B`
is the batch size, and `s` is the cost of solving `C_b x = y` for vectors `x`
and `y`.
Args:
mvn_a: Instance of subclass of `_MultivariateNormalOperatorPD`.
mvn_b: Instance of subclass of `_MultivariateNormalOperatorPD`.
name: (optional) name to use for created ops. Default "kl_mvn_mvn".
Returns:
Batchwise `KL(mvn_a || mvn_b)`.
"""
# Access the "private" OperatorPD that each mvn is built from.
cov_a = mvn_a._cov # pylint: disable=protected-access
cov_b = mvn_b._cov # pylint: disable=protected-access
mu_a = mvn_a.mu
mu_b = mvn_b.mu
inputs = [mu_a, mu_b] + cov_a.inputs + cov_b.inputs
with ops.name_scope(name, "kl_mvn_mvn", inputs):
# If Ca = AA', Cb = BB', then
# tr[inv(Cb) Ca] = tr[inv(B)' inv(B) A A']
# = tr[inv(B) A A' inv(B)']
# = tr[(inv(B) A) (inv(B) A)']
# = sum_{ik} (inv(B) A)_{ik}^2
# The second equality follows from the cyclic permutation property.
b_inv_a = cov_b.sqrt_solve(cov_a.sqrt_to_dense())
t = math_ops.reduce_sum(
math_ops.square(b_inv_a),
reduction_indices=[-1, -2])
q = cov_b.inv_quadratic_form_on_vectors(mu_b - mu_a)
k = math_ops.cast(cov_a.vector_space_dimension(), mvn_a.dtype)
one_half_l = cov_b.sqrt_log_det() - cov_a.sqrt_log_det()
return 0.5 * (t + q - k) + one_half_l
|
nikste/tensorflow
|
tensorflow/contrib/distributions/python/ops/mvn.py
|
Python
|
apache-2.0
| 28,657
|
[
"Gaussian"
] |
f4dea9150ebb6928a3069aee48ecab01a624b652cd71bba0fbb94b88221ae079
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 17 10:59:20 2016
@author: huliqun
"""
import random, string
import mimetypes
import os
import uuid
import json
import traceback
from selenium import webdriver
import datetime
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import DeclarativeMeta
from sqlalchemy.pool import QueuePool
from PIL import Image
from jinja2 import Environment, PackageLoader
import pdfkit
import falcon
import workserver.settings as settings
from workserver.module.models import UserLog
#http://stackoverflow.com/questions/5022066/how-to-serialize-sqlalchemy-result-to-json
def json_alchemy_encoder(revisit_self = False, fields_excape = []):
_visited_objs = []
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
# don't re-visit self
if revisit_self:
if obj in _visited_objs:
return None
_visited_objs.append(obj)
# go through each field in this SQLalchemy class
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
val = obj.__getattribute__(field)
# is this field another SQLalchemy object, or a list of SQLalchemy objects?
if isinstance(val.__class__, DeclarativeMeta) or (isinstance(val, list) and len(val) > 0 and isinstance(val[0].__class__, DeclarativeMeta)):
# unless we're expanding this field, stop here
if field in fields_excape:
# not expanding this field: set it to None and continue
continue
try:
json.dumps(val) # this will fail on non-encodable values, like other classes
fields[field] = val
except TypeError: # 添加了对datetime的处理
if isinstance(val, datetime.datetime):
if field == 'modifytime':
fields[field] = val.strftime('%Y-%m-%d %H:%M')
else:
fields[field] = val.strftime('%Y-%m-%d')
elif isinstance(val, datetime.date):
fields[field] = val.isoformat()
elif isinstance(val, datetime.timedelta):
fields[field] = (datetime.datetime.min + val).time().isoformat()
else:
fields[field] = None
if fields[field] is None:
fields[field] = ''
# a json-encodable dict
return fields
return json.JSONEncoder.default(self, obj)
return AlchemyEncoder
def schema2Json(schemaIns):
return json.loads(json.dumps(schemaIns, cls=json_alchemy_encoder(False, []), check_circular=False))
class GlobalVar:
engine_handle = None
enginer_handle = None
db_handle = None
glb_browser = None
glb_browser_in_use = 0
jinja2env = Environment(loader=PackageLoader('workserver', 'templates'))
def global_init():
GlobalVar.engine_handle = create_engine(settings.dbUrl, encoding="utf-8",poolclass=QueuePool, pool_size=50, pool_recycle=3600, echo=settings.dbEchoFlag)
GlobalVar.db_handle = sessionmaker(bind=GlobalVar.engine_handle)
def get_engine_handle():
return GlobalVar.engine_handle
def get_db_handle():
return GlobalVar.db_handle
def get_glb_browser():
if GlobalVar.glb_browser is None:
GlobalVar.glb_browser = webdriver.PhantomJS()
if GlobalVar.glb_browser_in_use > 0:
GlobalVar.glb_browser_in_use += 1
if GlobalVar.glb_browser_in_use > 20:
free_glb_browser()
return 'delay'
else:
GlobalVar.glb_browser_in_use = 1
return GlobalVar.glb_browser
def get_jinja2_env():
return GlobalVar.jinja2env
def release_glb_browser():
GlobalVar.glb_browser_in_use = 0
def free_glb_browser():
if GlobalVar.glb_browser is not None:
GlobalVar.glb_browser.quit()
GlobalVar.glb_browser = None
def random_word(length):
return ''.join(random.choice(string.ascii_lowercase) for i in range(length))
def genUserID():
return random_word(5) + str(uuid.uuid1()).replace('-','')
def fileSave(req,logger):
filename = ''
if 'multipart/form-data' in (req.content_type or ''):
uploadfile = req.params['files'].getlist('file')
if uploadfile:
ext = mimetypes.guess_extension(uploadfile[0].mimetype)
filename = '{uuid}{ext}'.format(uuid=str(uuid.uuid4()).replace('-',''), ext=ext)
file_path = os.path.join(settings.temp_path, filename)
logger.info(file_path)
with open(file_path, 'wb') as upload_file:
while True:
chunk = uploadfile[0].stream.read(4096)
if not chunk:
break
upload_file.write(chunk)
avatar_file = req.params['files'].getlist('avatar_file')
if avatar_file:
img = Image.open(avatar_file[0].stream)
avatar_data = req.params['form'].getlist('avatar_data')
if avatar_data:
img_trans = json.loads(avatar_data[0])
rotate_img = img.rotate(-1 *img_trans['rotate'])
crop_img = rotate_img.crop((img_trans['x'], img_trans['y'], img_trans['x'] + img_trans['width'], img_trans['y'] + img_trans['height']))
filename = '{uuid}.jpg'.format(uuid=str(uuid.uuid4()).replace('-',''))
image_path = os.path.join(settings.temp_path, filename)
crop_img.save(image_path)
# ext = mimetypes.guess_extension(req.content_type)
else:
ext = mimetypes.guess_extension(req.content_type)
filename = '{uuid}{ext}'.format(uuid=str(uuid.uuid4()).replace('-',''), ext=ext)
image_path = os.path.join(settings.temp_path, filename)
logger.info(image_path)
with open(image_path, 'wb') as image_file:
while True:
chunk = req.stream.read(4096)
if not chunk:
break
image_file.write(chunk)
return settings.tmp_url_base + filename
def getRandomPdf(template, pagePara, pdfkit_options, logger):
try:
env = get_jinja2_env()
template = env.get_template(template)
htmlString = template.render(pagePara=pagePara)
# output = open('/home/putbox/11.html', 'w')
# output = open('E:/11.html', 'w')
# output.write(htmlString)
# output.close()
filename = '{uuid}.pdf'.format(uuid=str(uuid.uuid4()).replace('-',''))
file_path = os.path.join(settings.temp_path, filename)
file_url = settings.tmp_url_base + filename
pdfkit.from_string(htmlString, file_path, pdfkit_options)
return file_path, file_url
except Exception as ex:
exceptionPrint(logger, ex)
return None, None
def fileMmove(url, mode='date', reladir=''):
filename = url.split('/')[-1]
if mode == 'date':
nowDate = datetime.datetime.now()
relpath = nowDate.strftime('%Y') + '/' + nowDate.strftime('%m') + '/' + nowDate.strftime('%d') + '/'
if mode == 'file':
nowDate = datetime.datetime.now()
relpath = 'files/' + nowDate.strftime('%Y') + '/' + nowDate.strftime('%m') + '/' + nowDate.strftime('%d') + '/'
elif mode == 'dir':
relpath = reladir + '/'
svpath = os.path.join(settings.files_storage_path, relpath)
if not os.path.exists(svpath):
os.makedirs(svpath)
os.rename(os.path.join(settings.temp_path, filename), os.path.join(svpath, filename))
return settings.images_url_base + relpath + filename
def exceptionPrint(logger, ex):
logger.error(traceback.print_exc())
logger.error(ex)
def genLogID():
db = get_db_handle()
session = db()
currentIndex = int(session.execute('select nextval(\'LogSeq\')').first()[0])
index = '%011d' % (currentIndex)
today = datetime.datetime.now().strftime('LG%Y%m%d')
session.commit()
return today + index
def createOperateLog(req):
methodApi = req.path.split('/')[-1].upper()
if methodApi == 'AUTH':
return
req_para = falcon.util.uri.parse_query_string(req.query_string)
if req_para['method'] == 'init':
return
elif req_para['method'] == 'search':
return
db = get_db_handle()
session = db()
userLog = UserLog(uid = genLogID(),
userID = req.context['user'].userID,
API = req.relative_uri,
paras = str(req.context['doc']))
session.add(userLog)
session.commit()
def numMoneyFormat(num):
if num:
return '%.2f' % (num/100.00)
else:
return '0.00'
def moneyNumFormat(money):
return round(float(money) * 100)
def readAppVersion():
result = {}
file_object = open('appVersion.txt')
try:
code = file_object.readline().strip('\n')
name = file_object.readline().strip('\n')
path = file_object.readline().strip('\n')
result['code'] = code
result['name'] = name
result['path'] = path
finally:
file_object.close()
return result
|
LiqunHu/MVPN
|
workserver/util/SysUtil.py
|
Python
|
gpl-3.0
| 9,796
|
[
"VisIt"
] |
979a3f86d3db5c3b37e8581b2692ff8b47f30511095b956ef3216e19f1d2538c
|
# $HeadURL$
"""
Extremely simple utility class to send mails
"""
__RCSID__ = "$Id$"
import socket
from smtplib import SMTP
from email.mime.text import MIMEText
from getpass import getuser
from DIRAC import gLogger, S_OK, S_ERROR
class Mail:
def __init__( self ):
self._subject = ''
self._message = ''
self._mailAddress = ''
self._fromAddress = getuser() + '@' + socket.getfqdn()
self.esmtp_features = {}
def _send( self ):
if not self._mailAddress:
gLogger.warn( "No mail address was provided. Mail not sent." )
return S_ERROR( "No mail address was provided. Mail not sent." )
if not self._message:
gLogger.warn( "Message body is empty" )
if not self._subject:
gLogger.warn( "Subject and body empty. Mail not sent" )
return S_ERROR ( "Subject and body empty. Mail not sent" )
mail = MIMEText( self._message , "plain" )
addresses = self._mailAddress
if not type( self._mailAddress ) == type( [] ):
addresses = [self._mailAddress]
mail[ "Subject" ] = self._subject
mail[ "From" ] = self._fromAddress
mail[ "To" ] = ', '.join( addresses )
smtp = SMTP()
smtp.set_debuglevel( 0 )
try:
smtp.connect()
smtp.sendmail( self._fromAddress, addresses, mail.as_string() )
except Exception, x:
return S_ERROR( "Sending mail failed %s" % str( x ) )
smtp.quit()
return S_OK( "The mail was succesfully sent" )
|
Sbalbp/DIRAC
|
Core/Utilities/Mail.py
|
Python
|
gpl-3.0
| 1,450
|
[
"DIRAC"
] |
40bcbf3b1771993d672d09df93c5d84bd125589d6906fc8ec7ec6b43d889946f
|
#!/usr/bin/env python
import sys
import vtk
def main(argv):
if len(argv) < 2:
print "usage:",argv[0]," data.nrrd data.cmap"
exit(1)
data_fn = argv[1]
cmap_fn = argv[2]
reader = vtk.vtkPNrrdReader()
reader.SetFileName(data_fn)
reader.Update()
data = reader.GetOutput()
# opacity function
opacityFunction = vtk.vtkPiecewiseFunction()
# color function
colorFunction = vtk.vtkColorTransferFunction()
cmap = open(cmap_fn, 'r')
for line in cmap.readlines():
parts = line.split()
value = float(parts[0])
r = float(parts[1])
g = float(parts[2])
b = float(parts[3])
a = float(parts[4])
opacityFunction.AddPoint(value, a)
colorFunction.AddRGBPoint(value, r, g, b)
# volume setup:
volumeProperty = vtk.vtkVolumeProperty()
volumeProperty.SetColor(colorFunction)
volumeProperty.SetScalarOpacity(opacityFunction)
# composite function (using ray tracing)
compositeFunction = vtk.vtkVolumeRayCastMIPFunction()
volumeMapper = vtk.vtkVolumeRayCastMapper()
volumeMapper.SetVolumeRayCastFunction(compositeFunction)
volumeMapper.SetInput(data)
# make the volume
volume = vtk.vtkVolume()
volume.SetMapper(volumeMapper)
volume.SetProperty(volumeProperty)
# renderer
renderer = vtk.vtkRenderer()
renderWin = vtk.vtkRenderWindow()
renderWin.AddRenderer(renderer)
renderInteractor = vtk.vtkRenderWindowInteractor()
renderInteractor.SetRenderWindow(renderWin)
renderer.AddVolume(volume)
renderer.SetBackground(0,0,0)
renderWin.SetSize(400, 400)
renderInteractor.Initialize()
renderWin.Render()
renderInteractor.Start()
if __name__ == '__main__':
main(sys.argv)
|
daniel-perry/rt
|
data/volume_render_mip.py
|
Python
|
mit
| 1,654
|
[
"VTK"
] |
4d2c040acf7e15e9d765a23fbcebf8a55e34f376f2a6d7feb3fe2db1935b0546
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import dataclasses
import json
from typing import Sequence
from google.api import field_behavior_pb2
from google.api import http_pb2
from google.api import routing_pb2
from google.cloud import extended_operations_pb2 as ex_ops_pb2
from google.protobuf import descriptor_pb2
from gapic.schema import metadata
from gapic.schema import wrappers
from test_utils.test_utils import (
make_enum,
make_field,
make_message,
make_method,
)
def test_method_types():
input_msg = make_message(name='Input', module='baz')
output_msg = make_message(name='Output', module='baz')
method = make_method('DoSomething', input_msg, output_msg,
package='foo.bar', module='bacon')
assert method.name == 'DoSomething'
assert method.input.name == 'Input'
assert method.output.name == 'Output'
def test_method_void():
empty = make_message(name='Empty', package='google.protobuf')
method = make_method('Meh', output_message=empty)
assert method.void
def test_method_not_void():
not_empty = make_message(name='OutputMessage', package='foo.bar.v1')
method = make_method('Meh', output_message=not_empty)
assert not method.void
def test_method_deprecated():
method = make_method('DeprecatedMethod', is_deprecated=True)
assert method.is_deprecated
def test_method_client_output():
output = make_message(name='Input', module='baz')
method = make_method('DoStuff', output_message=output)
assert method.client_output is method.output
def test_method_client_output_empty():
empty = make_message(name='Empty', package='google.protobuf')
method = make_method('Meh', output_message=empty)
assert method.client_output == wrappers.PrimitiveType.build(None)
def test_method_client_output_paged():
paged = make_field(name='foos', message=make_message('Foo'), repeated=True)
parent = make_field(name='parent', type=9) # str
page_size = make_field(name='page_size', type=5) # int
page_token = make_field(name='page_token', type=9) # str
input_msg = make_message(name='ListFoosRequest', fields=(
parent,
page_size,
page_token,
))
output_msg = make_message(name='ListFoosResponse', fields=(
paged,
make_field(name='next_page_token', type=9), # str
))
method = make_method(
'ListFoos',
input_message=input_msg,
output_message=output_msg,
)
assert method.paged_result_field == paged
assert method.client_output.ident.name == 'ListFoosPager'
max_results = make_field(name='max_results', type=5) # int
input_msg = make_message(name='ListFoosRequest', fields=(
parent,
max_results,
page_token,
))
method = make_method(
'ListFoos',
input_message=input_msg,
output_message=output_msg,
)
assert method.paged_result_field == paged
assert method.client_output.ident.name == 'ListFoosPager'
def test_method_client_output_async_empty():
empty = make_message(name='Empty', package='google.protobuf')
method = make_method('Meh', output_message=empty)
assert method.client_output_async == wrappers.PrimitiveType.build(None)
def test_method_paged_result_field_not_first():
paged = make_field(name='foos', message=make_message('Foo'), repeated=True)
input_msg = make_message(name='ListFoosRequest', fields=(
make_field(name='parent', type=9), # str
make_field(name='page_size', type=5), # int
make_field(name='page_token', type=9), # str
))
output_msg = make_message(name='ListFoosResponse', fields=(
make_field(name='next_page_token', type=9), # str
paged,
))
method = make_method('ListFoos',
input_message=input_msg,
output_message=output_msg,
)
assert method.paged_result_field == paged
def test_method_paged_result_field_no_page_field():
input_msg = make_message(name='ListFoosRequest', fields=(
make_field(name='parent', type=9), # str
make_field(name='page_size', type=5), # int
make_field(name='page_token', type=9), # str
))
output_msg = make_message(name='ListFoosResponse', fields=(
make_field(name='foos', message=make_message('Foo'), repeated=False),
make_field(name='next_page_token', type=9), # str
))
method = make_method('ListFoos',
input_message=input_msg,
output_message=output_msg,
)
assert method.paged_result_field is None
method = make_method(
name='Foo',
input_message=make_message(
name='FooRequest',
fields=(make_field(name='page_token', type=9),) # str
),
output_message=make_message(
name='FooResponse',
fields=(make_field(name='next_page_token', type=9),) # str
)
)
assert method.paged_result_field is None
def test_method_paged_result_ref_types():
input_msg = make_message(
name='ListSquidsRequest',
fields=(
make_field(name='parent', type=9), # str
make_field(name='page_size', type=5), # int
make_field(name='page_token', type=9), # str
),
module='squid',
)
mollusc_msg = make_message('Mollusc', module='mollusc')
output_msg = make_message(
name='ListMolluscsResponse',
fields=(
make_field(name='molluscs', message=mollusc_msg, repeated=True),
make_field(name='next_page_token', type=9) # str
),
module='mollusc'
)
method = make_method(
'ListSquids',
input_message=input_msg,
output_message=output_msg,
module='squid'
)
ref_type_names = {t.name for t in method.ref_types}
assert ref_type_names == {
'ListSquidsRequest',
'ListSquidsPager',
'ListSquidsAsyncPager',
'Mollusc',
}
def test_flattened_ref_types():
method = make_method(
'IdentifyMollusc',
input_message=make_message(
'IdentifyMolluscRequest',
fields=(
make_field(
'cephalopod',
message=make_message(
'Cephalopod',
fields=(
make_field('mass_kg', type='TYPE_INT32'),
make_field(
'squid',
number=2,
message=make_message('Squid'),
),
make_field(
'clam',
number=3,
message=make_message('Clam'),
),
),
),
),
make_field(
'stratum',
enum=make_enum(
'Stratum',
)
),
),
),
signatures=('cephalopod.squid,stratum',),
output_message=make_message('Mollusc'),
)
expected_flat_ref_type_names = {
'IdentifyMolluscRequest',
'Squid',
'Stratum',
'Mollusc',
}
actual_flat_ref_type_names = {t.name for t in method.flat_ref_types}
assert expected_flat_ref_type_names == actual_flat_ref_type_names
def test_method_paged_result_primitive():
paged = make_field(name='squids', type=9, repeated=True) # str
input_msg = make_message(
name='ListSquidsRequest',
fields=(
make_field(name='parent', type=9), # str
make_field(name='page_size', type=5), # int
make_field(name='page_token', type=9), # str
),
)
output_msg = make_message(name='ListFoosResponse', fields=(
paged,
make_field(name='next_page_token', type=9), # str
))
method = make_method(
'ListSquids',
input_message=input_msg,
output_message=output_msg,
)
assert method.paged_result_field == paged
assert method.client_output.ident.name == 'ListSquidsPager'
def test_method_field_headers_none():
method = make_method('DoSomething')
assert isinstance(method.field_headers, collections.abc.Sequence)
def test_method_field_headers_present():
verbs = [
'get',
'put',
'post',
'delete',
'patch',
]
for v in verbs:
rule = http_pb2.HttpRule(**{v: '/v1/{parent=projects/*}/topics'})
method = make_method('DoSomething', http_rule=rule)
assert method.field_headers == (wrappers.FieldHeader('parent'),)
assert method.field_headers[0].raw == 'parent'
assert method.field_headers[0].disambiguated == 'parent'
# test that reserved keyword in field header is disambiguated
rule = http_pb2.HttpRule(**{v: '/v1/{object=objects/*}/topics'})
method = make_method('DoSomething', http_rule=rule)
assert method.field_headers == (wrappers.FieldHeader('object'),)
assert method.field_headers[0].raw == 'object'
assert method.field_headers[0].disambiguated == 'object_'
def test_method_routing_rule():
routing_rule = routing_pb2.RoutingRule()
param = routing_rule.routing_parameters.add()
param.field = 'table_name'
param.path_template = 'projects/*/{table_location=instances/*}/tables/*'
method = make_method('DoSomething', routing_rule=routing_rule)
assert method.explicit_routing
assert method.routing_rule.routing_parameters == [wrappers.RoutingParameter(
x.field, x.path_template) for x in routing_rule.routing_parameters]
assert method.routing_rule.routing_parameters[0].sample_request is not None
def test_method_routing_rule_empty_routing_parameters():
routing_rule = routing_pb2.RoutingRule()
method = make_method('DoSomething', routing_rule=routing_rule)
assert method.routing_rule is None
def test_method_routing_rule_not_set():
method = make_method('DoSomething')
assert method.routing_rule is None
def test_method_http_opt():
http_rule = http_pb2.HttpRule(
post='/v1/{parent=projects/*}/topics',
body='*'
)
method = make_method('DoSomething', http_rule=http_rule)
assert method.http_opt == {
'verb': 'post',
'url': '/v1/{parent=projects/*}/topics',
'body': '*'
}
# TODO(yon-mg) to test: grpc transcoding,
# correct handling of path/query params
# correct handling of body & additional binding
def test_method_http_opt_no_body():
http_rule = http_pb2.HttpRule(post='/v1/{parent=projects/*}/topics')
method = make_method('DoSomething', http_rule=http_rule)
assert method.http_opt == {
'verb': 'post',
'url': '/v1/{parent=projects/*}/topics'
}
def test_method_http_opt_no_http_rule():
method = make_method('DoSomething')
assert method.http_opt == None
def test_method_path_params():
# tests only the basic case of grpc transcoding
http_rule = http_pb2.HttpRule(post='/v1/{project}/topics')
method = make_method('DoSomething', http_rule=http_rule)
assert method.path_params == ['project']
http_rule2 = http_pb2.HttpRule(post='/v1beta1/{name=rooms/*/blurbs/*}')
method2 = make_method("DoSomething", http_rule=http_rule2)
assert method2.path_params == ["name"]
def test_method_path_params_no_http_rule():
method = make_method('DoSomething')
assert method.path_params == []
def test_body_fields():
http_rule = http_pb2.HttpRule(
post='/v1/{arms_shape=arms/*}/squids',
body='mantle'
)
mantle_stuff = make_field(name='mantle_stuff', type=9)
message = make_message('Mantle', fields=(mantle_stuff,))
mantle = make_field('mantle', type=11, type_name='Mantle', message=message)
arms_shape = make_field('arms_shape', type=9)
input_message = make_message('Squid', fields=(mantle, arms_shape))
method = make_method(
'PutSquid', input_message=input_message, http_rule=http_rule)
assert set(method.body_fields) == {'mantle'}
mock_value = method.body_fields['mantle'].mock_value
assert mock_value == "baz.Mantle(mantle_stuff='mantle_stuff_value')"
def test_body_fields_no_body():
http_rule = http_pb2.HttpRule(
post='/v1/{arms_shape=arms/*}/squids',
)
method = make_method(
'PutSquid', http_rule=http_rule)
assert not method.body_fields
def test_method_http_options():
verbs = [
'get',
'put',
'post',
'delete',
'patch'
]
for v in verbs:
http_rule = http_pb2.HttpRule(**{v: '/v1/{parent=projects/*}/topics'})
method = make_method('DoSomething', http_rule=http_rule)
assert [dataclasses.asdict(http) for http in method.http_options] == [{
'method': v,
'uri': '/v1/{parent=projects/*}/topics',
'body': None
}]
def test_method_http_options_empty_http_rule():
http_rule = http_pb2.HttpRule()
method = make_method('DoSomething', http_rule=http_rule)
assert method.http_options == []
http_rule = http_pb2.HttpRule(get='')
method = make_method('DoSomething', http_rule=http_rule)
assert method.http_options == []
def test_method_http_options_no_http_rule():
method = make_method('DoSomething')
assert method.path_params == []
def test_method_http_options_body_star():
http_rule = http_pb2.HttpRule(
post='/v1/{parent=projects/*}/topics',
body='*'
)
method = make_method('DoSomething', http_rule=http_rule)
assert [dataclasses.asdict(http) for http in method.http_options] == [{
'method': 'post',
'uri': '/v1/{parent=projects/*}/topics',
'body': '*'
}]
def test_method_http_options_body_field():
http_rule = http_pb2.HttpRule(
post='/v1/{parent=projects/*}/topics',
body='body_field'
)
method = make_method('DoSomething', http_rule=http_rule)
assert [dataclasses.asdict(http) for http in method.http_options] == [{
'method': 'post',
'uri': '/v1/{parent=projects/*}/topics',
'body': 'body_field'
}]
def test_method_http_options_additional_bindings():
http_rule = http_pb2.HttpRule(
post='/v1/{parent=projects/*}/topics',
body='*',
additional_bindings=[
http_pb2.HttpRule(
post='/v1/{parent=projects/*/regions/*}/topics',
body='*',
),
http_pb2.HttpRule(
post='/v1/projects/p1/topics',
body='body_field',
),
]
)
method = make_method('DoSomething', http_rule=http_rule)
assert [dataclasses.asdict(http) for http in method.http_options] == [
{
'method': 'post',
'uri': '/v1/{parent=projects/*}/topics',
'body': '*'
},
{
'method': 'post',
'uri': '/v1/{parent=projects/*/regions/*}/topics',
'body': '*'
},
{
'method': 'post',
'uri': '/v1/projects/p1/topics',
'body': 'body_field'
}]
def test_method_http_options_reserved_name_in_url():
http_rule = http_pb2.HttpRule(
post='/v1/license/{license=lic/*}',
body='*'
)
method = make_method('DoSomething', http_rule=http_rule)
assert [dataclasses.asdict(http) for http in method.http_options] == [{
'method': 'post',
'uri': '/v1/license/{license_=lic/*}',
'body': '*'
}]
def test_method_http_options_generate_sample():
http_rule = http_pb2.HttpRule(
get='/v1/{resource.id=projects/*/regions/*/id/**}/stuff',
)
method = make_method(
'DoSomething',
make_message(
name="Input",
fields=[
make_field(
name="resource",
number=1,
type=11,
message=make_message(
"Resource",
fields=[
make_field(name="id", type=9),
],
),
),
],
),
http_rule=http_rule,
)
sample = method.http_options[0].sample_request(method)
assert sample == {'resource': {
'id': 'projects/sample1/regions/sample2/id/sample3'}}
def test_method_http_options_generate_sample_implicit_template():
http_rule = http_pb2.HttpRule(
get='/v1/{resource.id}/stuff',
)
method = make_method(
'DoSomething',
make_message(
name="Input",
fields=[
make_field(
name="resource",
number=1,
message=make_message(
"Resource",
fields=[
make_field(name="id", type=9),
],
),
),
],
),
http_rule=http_rule,
)
sample = method.http_options[0].sample_request(method)
assert sample == {'resource': {
'id': 'sample1'}}
def test_method_query_params():
# tests only the basic case of grpc transcoding
http_rule = http_pb2.HttpRule(
post='/v1/{project}/topics',
body='address'
)
input_message = make_message(
'MethodInput',
fields=(
make_field('region'),
make_field('project'),
make_field('address')
)
)
method = make_method('DoSomething', http_rule=http_rule,
input_message=input_message)
assert method.query_params == {'region'}
def test_method_query_params_no_body():
# tests only the basic case of grpc transcoding
http_rule = http_pb2.HttpRule(post='/v1/{project}/topics')
input_message = make_message(
'MethodInput',
fields=(
make_field('region'),
make_field('project'),
)
)
method = make_method('DoSomething', http_rule=http_rule,
input_message=input_message)
assert method.query_params == {'region'}
def test_method_query_params_star_body():
# tests only the basic case of grpc transcoding
http_rule = http_pb2.HttpRule(
post='/v1/{project}/topics',
body='*'
)
input_message = make_message(
'MethodInput',
fields=(
make_field('region'),
make_field('project'),
make_field('address')
)
)
method = make_method('DoSomething', http_rule=http_rule,
input_message=input_message)
assert method.query_params == set()
def test_method_query_params_no_http_rule():
method = make_method('DoSomething')
assert method.query_params == set()
def test_method_idempotent_yes():
http_rule = http_pb2.HttpRule(get='/v1/{parent=projects/*}/topics')
method = make_method('DoSomething', http_rule=http_rule)
assert method.idempotent is True
def test_method_idempotent_no():
http_rule = http_pb2.HttpRule(post='/v1/{parent=projects/*}/topics')
method = make_method('DoSomething', http_rule=http_rule)
assert method.idempotent is False
def test_method_idempotent_no_http_rule():
method = make_method('DoSomething')
assert method.idempotent is False
def test_method_unary_unary():
method = make_method('F', client_streaming=False, server_streaming=False)
assert method.grpc_stub_type == 'unary_unary'
def test_method_unary_stream():
method = make_method('F', client_streaming=False, server_streaming=True)
assert method.grpc_stub_type == 'unary_stream'
def test_method_stream_unary():
method = make_method('F', client_streaming=True, server_streaming=False)
assert method.grpc_stub_type == 'stream_unary'
def test_method_stream_stream():
method = make_method('F', client_streaming=True, server_streaming=True)
assert method.grpc_stub_type == 'stream_stream'
def test_method_flattened_fields():
a = make_field('a', type=5) # int
b = make_field('b', type=5)
input_msg = make_message('Z', fields=(a, b))
method = make_method('F', input_message=input_msg, signatures=('a,b',))
assert len(method.flattened_fields) == 2
assert 'a' in method.flattened_fields
assert 'b' in method.flattened_fields
def test_method_flattened_fields_empty_sig():
a = make_field('a', type=5) # int
b = make_field('b', type=5)
input_msg = make_message('Z', fields=(a, b))
method = make_method('F', input_message=input_msg, signatures=('',))
assert len(method.flattened_fields) == 0
def test_method_flattened_fields_different_package_non_primitive():
# This test verifies that method flattening handles a special case where:
# * the method's request message type lives in a different package and
# * a field in the method_signature is a non-primitive.
#
# If the message is defined in a different package it is not guaranteed to
# be a proto-plus wrapped type, which puts restrictions on assigning
# directly to its fields, which complicates request construction.
# The easiest solution in this case is to just prohibit these fields
# in the method flattening.
message = make_message('Mantle',
package="mollusc.cephalopod.v1", module="squid")
mantle = make_field('mantle', type=11, type_name='Mantle',
message=message, meta=message.meta)
arms_count = make_field('arms_count', type=5, meta=message.meta)
input_message = make_message(
'Squid', fields=(mantle, arms_count),
package=".".join(message.meta.address.package),
module=message.meta.address.module
)
method = make_method('PutSquid', input_message=input_message,
package="remote.package.v1", module="module", signatures=("mantle,arms_count",))
assert set(method.flattened_fields) == {'arms_count'}
def test_method_include_flattened_message_fields():
a = make_field('a', type=5)
b = make_field('b', type=11, type_name='Eggs',
message=make_message('Eggs'))
input_msg = make_message('Z', fields=(a, b))
method = make_method('F', input_message=input_msg, signatures=('a,b',))
assert len(method.flattened_fields) == 2
def test_method_legacy_flattened_fields():
required_options = descriptor_pb2.FieldOptions()
required_options.Extensions[field_behavior_pb2.field_behavior].append(
field_behavior_pb2.FieldBehavior.Value("REQUIRED"))
# Cephalopods are required.
squid = make_field(name="squid", options=required_options)
octopus = make_field(
name="octopus",
message=make_message(
name="Octopus",
fields=[make_field(name="mass", options=required_options)]
),
options=required_options)
# Bivalves are optional.
clam = make_field(name="clam")
oyster = make_field(
name="oyster",
message=make_message(
name="Oyster",
fields=[make_field(name="has_pearl")]
)
)
# Interleave required and optional fields to make sure
# that, in the legacy flattening, required fields are always first.
request = make_message("request", fields=[squid, clam, octopus, oyster])
method = make_method(
name="CreateMolluscs",
input_message=request,
# Signatures should be ignored.
signatures=[
"squid,octopus.mass",
"squid,octopus,oyster.has_pearl"
]
)
# Use an ordered dict because ordering is important:
# required fields should come first.
expected = collections.OrderedDict([
("squid", squid),
("octopus", octopus),
("clam", clam),
("oyster", oyster)
])
assert method.legacy_flattened_fields == expected
def test_flattened_oneof_fields():
mass_kg = make_field(name="mass_kg", oneof="mass", type=5)
mass_lbs = make_field(name="mass_lbs", oneof="mass", type=5)
length_m = make_field(name="length_m", oneof="length", type=5)
length_f = make_field(name="length_f", oneof="length", type=5)
color = make_field(name="color", type=5)
mantle = make_field(
name="mantle",
message=make_message(
name="Mantle",
fields=(
make_field(name="color", type=5),
mass_kg,
mass_lbs,
),
),
)
request = make_message(
name="CreateMolluscReuqest",
fields=(
length_m,
length_f,
color,
mantle,
),
)
method = make_method(
name="CreateMollusc",
input_message=request,
signatures=[
"length_m,",
"length_f,",
"mantle.mass_kg,",
"mantle.mass_lbs,",
"color",
]
)
expected = {"mass": [mass_kg, mass_lbs], "length": [length_m, length_f]}
actual = method.flattened_oneof_fields()
assert expected == actual
# Check this method too becasue the setup is a lot of work.
expected = {
"color": "color",
"length_m": "length_m",
"length_f": "length_f",
"mass_kg": "mantle.mass_kg",
"mass_lbs": "mantle.mass_lbs",
}
actual = method.flattened_field_to_key
assert expected == actual
def test_is_operation_polling_method():
T = descriptor_pb2.FieldDescriptorProto.Type
operation = make_message(
name="Operation",
fields=(
make_field(name=name, type=T.Value("TYPE_STRING"), number=i)
for i, name in enumerate(("name", "status", "error_code", "error_message"), start=1)
),
)
for f in operation.field:
options = descriptor_pb2.FieldOptions()
# Note: The field numbers were carefully chosen to be the corresponding enum values.
options.Extensions[ex_ops_pb2.operation_field] = f.number
f.options.MergeFrom(options)
request = make_message(
name="GetOperation",
fields=[
make_field(name="name", type=T.Value("TYPE_STRING"), number=1)
],
)
# Correct positive
options = descriptor_pb2.MethodOptions()
options.Extensions[ex_ops_pb2.operation_polling_method] = True
polling_method = make_method(
name="Get",
input_message=request,
output_message=operation,
options=options,
)
assert polling_method.is_operation_polling_method
# Normal method that returns operation
normal_method = make_method(
name="Get",
input_message=request,
output_message=operation,
)
assert not normal_method.is_operation_polling_method
# Method with invalid options combination
response = make_message(name="Response", fields=[make_field(name="name")])
invalid_method = make_method(
name="Get",
input_message=request,
output_message=response,
options=options, # Reuse options from the actual polling method
)
assert not invalid_method.is_operation_polling_method
def test_transport_safe_name():
unsafe_methods = {
name: make_method(name=name)
for name in ["CreateChannel", "GrpcChannel", "OperationsClient"]
}
safe_methods = {
name: make_method(name=name)
for name in ["Call", "Put", "Hold", "Raise"]
}
for name, method in safe_methods.items():
assert method.transport_safe_name == name
for name, method in unsafe_methods.items():
assert method.transport_safe_name == f"{name}_"
|
googleapis/gapic-generator-python
|
tests/unit/schema/wrappers/test_method.py
|
Python
|
apache-2.0
| 28,578
|
[
"Octopus"
] |
326359fabc58b56a290505076037f259ebc7a2206eea3d05333cbb105993e07b
|
'''
This module is pretty experimental and uses pythonnet to laod a C#-DLL
The OpenHardwareMonitorLib is a pretty sophisticated library to get system metrics
For more information about the project/code visit the GitHub Repository
https://github.com/openhardwaremonitor/openhardwaremonitor
It maps the Hardware and Sensors concept from the OHMLib onto the components/metrics system
'''
import atexit
import logging
import os
import sys
from gathering.measuring.MeasuringSource import MeasuringSource
from misc.constants import Operating_System
from misc.standalone_helper import import_if_exists, get_path_to_app
log = logging.getLogger("opserv.gathering.ohm")
# These type definitions come directly from the OHM source
SENSORTYPES = [
"Voltage", # V
"Clock", # MHz
"Temperature", # °C
"Load", # %
"Fan", # RPM
"Flow", # L/h
"Control", # %
"Level", # %
"Factor", # 1
"Power", # W
"Data", # GB = 2^30 Bytes
]
# Map sensor types to dict keys
# These two dicts are necessary to bind the opserv component system to the OHM hardware/sensors
TYPE_MAP = {
"Load": ("usage", "usage_sensor"),
"Temperature": ("temperature", "temperature_sensor"),
"Clock": ("frequency", "frequency_sensor")
}
TYPE_MAP_REVERSE = {
"usage": ("Load", "usage_sensor"),
"frequency": ("Clock", "frequency_sensor"),
"temperature": ("Temperature", "temperature_sensor")
}
HARDWARETYPES = [
"Mainboard",
"SuperIO",
"CPU",
"RAM",
"GpuNvidia",
"GpuAti",
"TBalancer",
"Heatmaster",
"HDD"
]
# Append DLL path to the sys path array
sys.path.append(os.path.join(get_path_to_app(), "extern_dependency"))
class OHMSource(MeasuringSource):
'''
Source description
'''
_supported_os = [Operating_System.windows]
_supported_comps = {
}
cpu_list = []
core_list = []
gpu_list = []
memory_data = None
disk_list = []
def __init__(self):
self.clr = None
self.hardware = []
self._init_complete = False
atexit.register(self.deinit)
self.init()
def init(self):
'''
Initializes the measuring source (opening hardware connections etc.)
If initialization is successful, it will return True
If errors occured, the return value will be False
'''
self.clr = import_if_exists("clr")
if not self.clr:
return
try:
self.clr.AddReference("OpenHardwareMonitorLib")
except Exception as err:
log.error(err)
log.error("Error during addReference to the OHM Lib")
return
try:
# Ignore PyLint error since this module is being loaded at runtime
from OpenHardwareMonitor import Hardware # pylint: disable=E0401
except Exception as err:
log.error(err)
log.error("Error during importing of hardware class from OHM Lib")
return
# Open PC Connection
self.hardware_class = Hardware
self.computer = Hardware.Computer()
self.computer.MotherboardEnabled = True
self.computer.RAMEnabled = True
self.computer.GPUEnabled = True
self.computer.CPUEnabled = True
self.computer.HDDEnabled = True
# Set Handlers
self.computer.HardwareAdded += self.ohm_hardware_added_handler
self.computer.HardwareRemoved += self.ohm_hardware_removed_handler
self.computer.Open()
self._init_complete = True
def deinit(self):
'''
De-Initializes the measuring source, removing connections etc.
Returns True if deinit was successfull, False if it errord
'''
if self._init_complete:
self.computer.Close()
self._init_complete = False
def get_measurement(self, component, metric, args):
'''
Retrieves a measurement from the measuring source
given the component, metric and optionally arguments
'''
if not self._init_complete:
raise ValueError("MeasuringSource is not initialized")
result = None
if component == "cpu":
result = self.get_cpu_measurement(metric, args)
if component == "cpucore":
result = self.get_core_measurement(metric, args)
elif component == "gpu":
result = self.get_gpu_measurement(metric, args)
elif component == "memory":
result = self.get_memory_measurement(metric)
elif component == "disk":
result = self.get_disk_measurement(metric, args)
elif component == "system":
result = self.get_system_measurement(metric)
return result
def ohm_hardware_added_handler(self, hardware):
"""
Hardware Added Handler
This is called with the given hardware when a new hardware on the computer is found
"""
log.info("Adding Hardware")
log.info(type(hardware))
log.info(hardware.name)
log.info(HARDWARETYPES[hardware.get_HardwareType()])
log.info(hardware.active)
log.info(hardware.settings)
current_hw = HARDWARETYPES[hardware.get_HardwareType()]
if current_hw == "CPU":
log.info("Found a CPU %s", hardware.name)
self.add_cpu(hardware)
elif current_hw == "GpuNvidia" or current_hw == "GpuAti":
log.info("Found a GPU %s", hardware.name)
self.add_gpu(hardware)
elif current_hw == "RAM":
log.info("Found a RAM %s", hardware.name)
self.add_memory(hardware)
elif current_hw == "HDD":
log.info("Found an HDD %s", hardware.name)
self.add_disk(hardware)
for sub_hw in hardware.SubHardware:
self.ohm_hardware_added_handler(sub_hw)
def ohm_hardware_removed_handler(self, hardware):
"""
Hardware Removal handler for the OHM library
This is called when a given hardware is disconnected or the
Computer object is closed
"""
log.info("Removing Hardware")
log.info(type(hardware))
log.info(hardware.name)
log.info(HARDWARETYPES[hardware.get_HardwareType()])
log.info(hardware.active)
log.info(hardware.settings)
def add_cpu(self, hardware):
'''
Adds the given CPU (as OHM Hardware Object) to the cpu list
Also adds all its cores and updates the supported metrics
'''
# sensor.Index does not represent the core number
# It has to be parsed from the sensor name
# First check whether the processor is already added
for cpu in self.cpu_list:
if cpu["id"] == hardware.processorIndex:
return
new_cpu = {}
# Get CPU Index
log.info("Processor Index: %d", hardware.processorIndex)
new_cpu["id"] = int(hardware.processorIndex)
# Get CPU Cores
log.info("Core Count: %d", hardware.coreCount)
new_cpu["cpucores"] = int(hardware.coreCount)
# Get CPU Name
new_cpu["info"] = hardware.name
new_cores = []
for i in range(new_cpu["cpucores"]):
new_cores.append({
# Assumes all CPUs have the same corecount
"id": i + (new_cpu["id"] * new_cpu["cpucores"]),
"info": "CPU #{0} Core #{1}".format(new_cpu["id"], i)
})
# Update the supported_comps dict
self.add_supported_metric("cpu", "info")
self.add_supported_metric("cpucore", "info")
self.add_supported_metric("system", "cpus")
self.add_supported_metric("system", "cpucores")
for sensor in hardware.Sensors:
sens_type = SENSORTYPES[sensor.SensorType]
# Get Core Number, value is -1 if its for the cpu package
sensor_id = parse_cpu_sensor_name(sensor.Name)
log.info("Got new sensor %s ID: %d, Type: %s", sensor.Name, sensor_id, sens_type)
# Ignore Bus Speed
if sensor.Name.find("Bus Speed") != -1:
break
if sens_type in TYPE_MAP:
if sensor_id == -1:
self.add_supported_metric("cpu", TYPE_MAP[sens_type][0])
new_cpu[TYPE_MAP[sens_type][1]] = (hardware, sensor)
else:
self.add_supported_metric("cpucore", TYPE_MAP[sens_type][0])
new_cores[sensor_id][TYPE_MAP[sens_type][1]] = (hardware, sensor)
# Add the newly found CPU and its cores into the lists
self.cpu_list.append(new_cpu)
self.core_list.extend(new_cores)
def add_gpu(self, hardware):
"""
Sub-Handler to process GPUs in the system
"""
pass
def add_memory(self, hardware):
"""
Sub-Handler to process memory in the system
"""
log.info("Got a new RAM hardware")
self.add_supported_metric("memory", "free")
self.add_supported_metric("memory", "used")
self.add_supported_metric("memory", "total")
self.memory_data = hardware
def add_disk(self, hardware):
"""
Sub-Handler to process newly found disks in the system
"""
# Currently Deactivated since there is no easy way to uniquely identify disks
return
self.add_supported_metric("disk", "info")
self.add_supported_metric("system", "disks")
log.info(hardware)
log.info(hardware.name)
log.info(hardware.GetReport())
for smart in hardware.SmartAttributes:
log.info(smart.Name)
for sensor in hardware.Sensors:
if str(sensor.Name) == "Used Space":
self.add_supported_metric("disk", "usage")
log.info(sensor)
log.info(sensor.Name)
log.info(sensor.SensorType)
def get_cpu_measurement(self, metric, args):
"""
Updates the hardware class for the specified cpu (not cores, whole cpu socket)
and returns the value for the specified metric
"""
for cpu in self.cpu_list:
log.info(cpu)
if int(cpu["id"]) == int(args):
if metric in TYPE_MAP_REVERSE:
sens_type = TYPE_MAP_REVERSE[metric][1]
cpu[sens_type][0].Update()
log.info("Taking measurement, Type: %s, Value: %f",
metric, cpu[sens_type][1].Value)
return cpu[sens_type][1].Value
elif metric == "info":
return cpu["info"]
raise ValueError("CPU given in args not found! {}".format(args))
def get_core_measurement(self, metric, args):
"""
Updates the hardware of the given cpu core to get a measurement
for the specified metric
"""
args = int(args)
for core in self.core_list:
if core["id"] == args:
if metric in TYPE_MAP_REVERSE:
sens_type = TYPE_MAP_REVERSE[metric][1]
core[sens_type][0].Update()
log.info("Taking measurement, Type: %s, Value: %s",
metric, str(core[sens_type][1].Value))
return core[sens_type][1].Value
raise ValueError("Core given in args not found! {}".format(args))
def get_gpu_measurement(self, metric, args):
"""
Updates the hardware object for the specified GPU and returns the value for metric
"""
pass
def get_disk_measurement(self, metric, args):
"""
Gets a measurement from the specified disk (not as in partition, but physical disks)
And returns the value of the specified metric
"""
pass
def get_memory_measurement(self, metric):
"""
Updates the memory hardware object and returns the value of the specified metric
"""
self.memory_data.Update()
byte_multiplier = 1024 * 1024 * 1024
if metric == "free":
return self.memory_data.availableMemory.Value * byte_multiplier
elif metric == "used":
return self.memory_data.usedMemory.Value * byte_multiplier
elif metric == "total":
return (self.memory_data.usedMemory.Value
+ self.memory_data.availableMemory.Value) * byte_multiplier
def get_system_measurement(self, metric):
"""
Gets the measurement from specified metric for the system component
"""
if metric == "cpus":
return list(range(len(self.cpu_list)))
elif metric == "cpucores":
return list(range(len(self.core_list)))
def parse_cpu_sensor_name(name):
"""
Tries to resolve the Name attribute from the OHM CPU class into a
core number or into -1 (which is the whole cpu package)
"""
num_pos = name.find("#") + 1
try:
core_number = int(name[num_pos:])
return core_number - 1 # Minus one to make it zero based
except ValueError:
# Core number cannot be parsed (so it is either total load or not in the name)
return -1
|
OpServ-Monitoring/opserv-backend
|
app/gathering/measuring/ohm_source.py
|
Python
|
gpl-3.0
| 13,382
|
[
"VisIt"
] |
06dad6220b86730df73ff979f82d4386a84905370e491e6f6a6b49e8ba862308
|
# coding: utf-8
# # Convert all daily JSON log files for a deployment to a single NetCDF file
# using CF-1.6, Discrete Sampling Geometry (DSG) conventions, **`featureType=timeSeries`**
# In[1]:
get_ipython().magic('matplotlib inline')
import json
import pandas as pd
import numpy as np
import glob
from pyaxiom.netcdf.sensors import TimeSeries
# In[2]:
path = '/sand/usgs/users/rsignell/data/ooi/endurance/cg_proc/ce02shsm/D00004/buoy/pwrsys/*.pwrsys.json'
odir = '/usgs/data2/notebook/data/nc'
ofile = 'ce02shsm_pwrsys_D00004.nc'
# In[3]:
def json2df(infile):
with open(infile) as jf:
df = pd.DataFrame(json.load(jf))
return df
# In[4]:
# single dataframe from all JSONs
df = pd.concat([json2df(file) for file in glob.glob(path)])
# In[5]:
df['time'] = pd.to_datetime(df.time, unit='s')
df.index = df['time']
df['depth'] = 0.0
# In[6]:
df['solar_panel4_voltage'].plot();
# ### Define the NetCDF global attributes
# In[7]:
global_attributes = {
'institution':'Oregon State University',
'title':'OOI CE02SHSM Pwrsys Data',
'summary':'OOI Pwrsys data from Coastal Endurance Oregon Shelf Surface Mooring',
'creator_name':'Chris Wingard',
'creator_email':'cwingard@coas.oregonstate.edu',
'creator_url':'http://ceoas.oregonstate.edu/ooi'
}
# ### Create initial file
# In[8]:
ts = TimeSeries(
output_directory=odir,
latitude=44.64,
longitude=-124.31,
station_name='ce02shsm',
global_attributes=global_attributes,
times=df.time.values.astype(np.int64) // 10**9,
verticals=df.depth.values,
output_filename=ofile,
vertical_positive='down'
)
# ### Add data variables
# In[9]:
df.columns.tolist()
# In[10]:
for c in df.columns:
if c in ts._nc.variables:
print("Skipping '{}' (already in file)".format(c))
continue
if c in ['time', 'lat', 'lon', 'depth', 'cpm_date_time_string']:
print("Skipping axis '{}' (already in file)".format(c))
continue
print("Adding {}".format(c))
try:
ts.add_variable(c, df[c].values)
except:
print('skipping, hit object')
# In[13]:
df['error_flag1'].dtype.name
# In[ ]:
|
rsignell-usgs/notebook
|
OOI/from_ooi_json-pwrsys-deployment.py
|
Python
|
mit
| 2,187
|
[
"NetCDF"
] |
4e76521eac4273b152fb646e4597f9dfd0b738cceeee2ec7eefa345f8e316618
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
# Configuration, please edit
# Data about this site
BLOG_AUTHOR = "wcmckee"
BLOG_TITLE = "artctrl-tech"
# This is the main URL for your site. It will be used
# in a prominent link
SITE_URL = "http://artctrl.me/tech"
# This is the URL where nikola's output will be deployed.
# If not set, defaults to SITE_URL
# BASE_URL = "http://getnikola.com/"
BLOG_EMAIL = "will@artcontrol.me"
BLOG_DESCRIPTION = "Tech Blog for artctrl"
# Nikola is multilingual!
#
# Currently supported languages are:
# bg Bulgarian
# ca Catalan
# de German
# el Greek [NOT gr!]
# en English
# eo Esperanto
# es Spanish
# fa Persian
# fi Finnish
# fr French
# hr Croatian
# it Italian
# jp Japanese
# nl Dutch
# pt_br Portuguese (Brasil)
# pl Polish
# ru Russian
# sl Slovenian [NOT sl_si!]
# tr_tr Turkish (Turkey)
# zh_cn Chinese (Simplified)
#
# If you want to use Nikola with a non-supported language you have to provide
# a module containing the necessary translations
# (p.e. look at the modules at: ./nikola/data/themes/default/messages/fr.py).
# If a specific post is not translated to a language, then the version
# in the default language will be shown instead.
# What is the default language?
DEFAULT_LANG = "en"
# What other languages do you have?
# The format is {"translationcode" : "path/to/translation" }
# the path will be used as a prefix for the generated pages location
TRANSLATIONS = {
DEFAULT_LANG: "",
# Example for another language:
# "es": "./es",
}
# Links for the sidebar / navigation bar.
# You should provide a key-value pair for each used language.
NAVIGATION_LINKS = {
DEFAULT_LANG: (
('/archive.html', 'Archives'),
('/categories/index.html', 'Tags'),
('/rss.xml', 'RSS'),
('http://artctrl.me', 'Art'),
('http://gamejolt.com/profile/brobeur/39197', 'Games'),
('https://www.flickr.com/photos/133257056@N06', 'Photography'),
('https://github.com/wcmckee', 'Code'),
('https://twitter.com/wcmckeedotcom', 'Twitter'),
),
}
# Below this point, everything is optional
# While nikola can select a sensible locale for each language,
# sometimes explicit control can come handy.
# In this file we express locales in the string form that
# python's locales will accept in your OS, by example
# "en_US.utf8" in unix-like OS, "English_United States" in Windows.
# LOCALES = dict mapping language --> explicit locale for the languages
# in TRANSLATIONS. You can ommit one or more keys.
# LOCALE_FALLBACK = locale to use when an explicit locale is unavailable
# LOCALE_DEFAULT = locale to use for languages not mentioned in LOCALES; if
# not set the default Nikola mapping is used.
# POSTS and PAGES contains (wildcard, destination, template) tuples.
#
# The wildcard is used to generate a list of reSt source files
# (whatever/thing.txt).
#
# That fragment could have an associated metadata file (whatever/thing.meta),
# and optionally translated files (example for spanish, with code "es"):
# whatever/thing.txt.es and whatever/thing.meta.es
#
# From those files, a set of HTML fragment files will be generated:
# cache/whatever/thing.html (and maybe cache/whatever/thing.html.es)
#
# These files are combinated with the template to produce rendered
# pages, which will be placed at
# output / TRANSLATIONS[lang] / destination / pagename.html
#
# where "pagename" is the "slug" specified in the metadata file.
#
# The difference between POSTS and PAGES is that POSTS are added
# to feeds and are considered part of a blog, while PAGES are
# just independent HTML pages.
#
POSTS = (
("posts/*.ipynb", "posts", "post.tmpl"),
("posts/*.ipynb", "posts", "post.tmpl"),
)
PAGES = (
("stories/*.rst", "stories", "story.tmpl"),
("stories/*.txt", "stories", "story.tmpl"),
)
# One or more folders containing files to be copied as-is into the output.
# The format is a dictionary of "source" "relative destination".
# Default is:
# FILES_FOLDERS = {'files': '' }
# Which means copy 'files' into 'output'
# A mapping of languages to file-extensions that represent that language.
# Feel free to add or delete extensions to any list, but don't add any new
# compilers unless you write the interface for it yourself.
#
# 'rest' is reStructuredText
# 'markdown' is MarkDown
# 'html' assumes the file is html and just copies it
COMPILERS = {
"rest": ('.rst', '.txt'),
"markdown": ('.md', '.mdown', '.markdown'),
"textile": ('.textile',),
"txt2tags": ('.t2t',),
"bbcode": ('.bb',),
"wiki": ('.wiki',),
"ipynb": ('.ipynb',),
"html": ('.html', '.htm'),
# Pandoc detects the input from the source filename
# but is disabled by default as it would conflict
# with many of the others.
# "pandoc": ('.rst', '.md', '.txt'),
}
# Create by default posts in one file format?
# Set to False for two-file posts, with separate metadata.
ONE_FILE_POSTS = False
# If this is set to True, then posts that are not translated to a language
# LANG will not be visible at all in the pages in that language.
# If set to False, the DEFAULT_LANG version will be displayed for
# untranslated posts.
# HIDE_UNTRANSLATED_POSTS = False
# Paths for different autogenerated bits. These are combined with the
# translation paths.
# Final locations are:
# output / TRANSLATION[lang] / TAG_PATH / index.html (list of tags)
# output / TRANSLATION[lang] / TAG_PATH / tag.html (list of posts for a tag)
# output / TRANSLATION[lang] / TAG_PATH / tag.xml (RSS feed for a tag)
# TAG_PATH = "categories"
# If TAG_PAGES_ARE_INDEXES is set to True, each tag's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# TAG_PAGES_ARE_INDEXES = True
# Final location is output / TRANSLATION[lang] / INDEX_PATH / index-*.html
# INDEX_PATH = ""
# Create per-month archives instead of per-year
# CREATE_MONTHLY_ARCHIVE = False
# Create one large archive instead of per-year
# CREATE_SINGLE_ARCHIVE = False
# Final locations for the archives are:
# output / TRANSLATION[lang] / ARCHIVE_PATH / ARCHIVE_FILENAME
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / index.html
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / index.html
# ARCHIVE_PATH = ""
# ARCHIVE_FILENAME = "archive.html"
# Final locations are:
# output / TRANSLATION[lang] / RSS_PATH / rss.xml
# RSS_PATH = ""
# Number of posts in RSS feeds
FEED_LENGTH = 10
# Slug the Tag URL easier for users to type, special characters are
# often removed or replaced as well.
# SLUG_TAG_PATH = True
# A list of redirection tuples, [("foo/from.html", "/bar/to.html")].
#
# A HTML file will be created in output/foo/from.html that redirects
# to the "/bar/to.html" URL. notice that the "from" side MUST be a
# relative URL.
#
# If you don't need any of these, just set to []
# REDIRECTIONS = []
# Commands to execute to deploy. Can be anything, for example,
# you may use rsync:
# "rsync -rav output/* joe@my.site:/srv/www/site"
# And then do a backup, or run `nikola ping` from the `ping`
# plugin (`nikola install_plugin ping`).
# To do manual deployment, set it to []
# DEPLOY_COMMANDS = []
# Where the output site should be located
# If you don't use an absolute path, it will be considered as relative
# to the location of conf.py
# OUTPUT_FOLDER = 'output'
# where the "cache" of partial generated content should be located
# default: 'cache'
# CACHE_FOLDER = 'cache'
# Filters to apply to the output.
# A directory where the keys are either: a file extensions, or
# a tuple of file extensions.
#
# And the value is a list of commands to be applied in order.
#
# Each command must be either:
#
# A string containing a '%s' which will
# be replaced with a filename. The command *must* produce output
# in place.
#
# Or:
#
# A python callable, which will be called with the filename as
# argument.
#
# By default, there are no filters.
#
# Many filters are shipped with Nikola. A list is available in the manual:
# <http://getnikola.com/handbook.html#post-processing-filters>
# FILTERS = {
# ".jpg": ["jpegoptim --strip-all -m75 -v %s"],
# }
# Expert setting! Create a gzipped copy of each generated file. Cheap server-
# side optimization for very high traffic sites or low memory servers.
# GZIP_FILES = False
# File extensions that will be compressed
# GZIP_EXTENSIONS = ('.txt', '.htm', '.html', '.css', '.js', '.json', '.xml')
# Use an external gzip command? None means no.
# Example: GZIP_COMMAND = "pigz -k {filename}"
# GZIP_COMMAND = None
# Make sure the server does not return a "Accept-Ranges: bytes" header for
# files compressed by this option! OR make sure that a ranged request does not
# return partial content of another representation for these resources. Do not
# use this feature if you do not understand what this means.
# #############################################################################
# Image Gallery Options
# #############################################################################
# Galleries are folders in galleries/
# Final location of galleries will be output / GALLERY_PATH / gallery_name
# GALLERY_PATH = "galleries"
# THUMBNAIL_SIZE = 180
# MAX_IMAGE_SIZE = 1280
# USE_FILENAME_AS_TITLE = True
# EXTRA_IMAGE_EXTENSIONS = []
#
# If set to False, it will sort by filename instead. Defaults to True
# GALLERY_SORT_BY_DATE = True
# #############################################################################
# HTML fragments and diverse things that are used by the templates
# #############################################################################
# Data about post-per-page indexes
# INDEXES_TITLE = "" # If this is empty, the default is BLOG_TITLE
# INDEXES_PAGES = "" # If this is empty, the default is 'old posts page %d'
# translated
# Name of the theme to use.
THEME = "custom"
# Color scheme to be used for code blocks. If your theme provides
# "assets/css/code.css" this is ignored.
# Can be any of autumn borland bw colorful default emacs friendly fruity manni
# monokai murphy native pastie perldoc rrt tango trac vim vs
# CODE_COLOR_SCHEME = 'default'
# If you use 'site-reveal' theme you can select several subthemes
# THEME_REVEAL_CONFIG_SUBTHEME = 'sky'
# You can also use: beige/serif/simple/night/default
# Again, if you use 'site-reveal' theme you can select several transitions
# between the slides
# THEME_REVEAL_CONFIG_TRANSITION = 'cube'
# You can also use: page/concave/linear/none/default
# date format used to display post dates.
# (str used by datetime.datetime.strftime)
# DATE_FORMAT = '%Y-%m-%d %H:%M'
# FAVICONS contains (name, file, size) tuples.
# Used for create favicon link like this:
# <link rel="name" href="file" sizes="size"/>
# For creating favicons, take a look at:
# http://www.netmagazine.com/features/create-perfect-favicon
# FAVICONS = {
# ("icon", "/favicon.ico", "16x16"),
# ("icon", "/icon_128x128.png", "128x128"),
# }
# Show only teasers in the index pages? Defaults to False.
# INDEX_TEASERS = False
# A HTML fragment with the Read more... link.
# The following tags exist and are replaced for you:
# {link} A link to the full post page.
# {read_more} The string “Read more” in the current language.
# {{ A literal { (U+007B LEFT CURLY BRACKET)
# }} A literal } (U+007D RIGHT CURLY BRACKET)
# READ_MORE_LINK = '<p class="more"><a href="{link}">{read_more}…</a></p>'
# A HTML fragment describing the license, for the sidebar.
# I recommend using the Creative Commons' wizard:
# http://creativecommons.org/choose/
LICENSE = """
<a rel="license" href="https://creativecommons.org/licenses/by/4.0/">
<img alt="Creative Commons License BY"
style="border-width:0; margin-bottom:12px;"
src="/https://upload.wikimedia.org/wikipedia/commons/1/16/CC-BY_icon.svg"></a>
<p>Except where otherwise noted, copyright content on this site is licensed under a Creative Commons Attribution 4.0 International Licence.</p>
"""
# A small copyright notice for the page footer (in HTML).
# Default is ''
CONTENT_FOOTER = 'Contents © {date} <a href="mailto:{email}">{author}</a>{license}'
CONTENT_FOOTER = CONTENT_FOOTER.format(email=BLOG_EMAIL,
author=BLOG_AUTHOR,
date=time.gmtime().tm_year,
license=LICENSE)
# To use comments, you can choose between different third party comment
# systems, one of "disqus", "livefyre", "intensedebate", "moot",
# "googleplus" or "facebook"
# COMMENT_SYSTEM = "disqus"
# And you also need to add your COMMENT_SYSTEM_ID which
# depends on what comment system you use. The default is
# "nikolademo" which is a test account for Disqus. More information
# is in the manual.
# COMMENT_SYSTEM_ID = "nikolademo"
# Enable annotations using annotateit.org?
# If set to False, you can still enable them for individual posts and pages
# setting the "annotations" metadata.
# If set to True, you can disable them for individual posts and pages using
# the "noannotations" metadata.
# ANNOTATIONS = False
# Create index.html for story folders?
# STORY_INDEX = False
# Enable comments on story pages?
# COMMENTS_IN_STORIES = False
# Enable comments on picture gallery pages?
# COMMENTS_IN_GALLERIES = False
# What file should be used for directory indexes?
# Defaults to index.html
# Common other alternatives: default.html for IIS, index.php
# INDEX_FILE = "index.html"
# If a link ends in /index.html, drop the index.html part.
# http://mysite/foo/bar/index.html => http://mysite/foo/bar/
# (Uses the INDEX_FILE setting, so if that is, say, default.html,
# it will instead /foo/default.html => /foo)
# (Note: This was briefly STRIP_INDEX_HTML in v 5.4.3 and 5.4.4)
# Default = False
# STRIP_INDEXES = False
# Should the sitemap list directories which only include other directories
# and no files.
# Default to True
# If this is False
# e.g. /2012 includes only /01, /02, /03, /04, ...: don't add it to the sitemap
# if /2012 includes any files (including index.html)... add it to the sitemap
# SITEMAP_INCLUDE_FILELESS_DIRS = True
# Instead of putting files in <slug>.html, put them in
# <slug>/index.html. Also enables STRIP_INDEXES
# This can be disabled on a per-page/post basis by adding
# .. pretty_url: False
# to the metadata
# PRETTY_URLS = False
# If True, publish future dated posts right away instead of scheduling them.
# Defaults to False.
# FUTURE_IS_NOW = False
# If True, future dated posts are allowed in deployed output
# Only the individual posts are published/deployed; not in indexes/sitemap
# Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value.
# DEPLOY_FUTURE = False
# If False, draft posts will not be deployed
# DEPLOY_DRAFTS = True
# Allows scheduling of posts using the rule specified here (new_post -s)
# Specify an iCal Recurrence Rule: http://www.kanzaki.com/docs/ical/rrule.html
# SCHEDULE_RULE = ''
# If True, use the scheduling rule to all posts by default
# SCHEDULE_ALL = False
# If True, schedules post to today if possible, even if scheduled hour is over
# SCHEDULE_FORCE_TODAY = False
# Do you want a add a Mathjax config file?
# MATHJAX_CONFIG = ""
# If you are using the compile-ipynb plugin, just add this one:
#MATHJAX_CONFIG = """
#<script type="text/x-mathjax-config">
#MathJax.Hub.Config({
# tex2jax: {
# inlineMath: [ ['$','$'], ["\\\(","\\\)"] ],
# displayMath: [ ['$$','$$'], ["\\\[","\\\]"] ]
# },
# displayAlign: 'left', // Change this to 'center' to center equations.
# "HTML-CSS": {
# styles: {'.MathJax_Display': {"margin": 0}}
# }
#});
#</script>
#"""
# Do you want to customize the nbconversion of your IPython notebook?
# IPYNB_CONFIG = {}
# With the following example configuracion you can use a custom jinja template
# called `toggle.tpl` which has to be located in your site/blog main folder:
# IPYNB_CONFIG = {'Exporter':{'template_file': 'toggle'}}
# What MarkDown extensions to enable?
# You will also get gist, nikola and podcast because those are
# done in the code, hope you don't mind ;-)
# MARKDOWN_EXTENSIONS = ['fenced_code', 'codehilite']
# Social buttons. This is sample code for AddThis (which was the default for a
# long time). Insert anything you want here, or even make it empty.
# SOCIAL_BUTTONS_CODE = """
# <!-- Social buttons -->
# <div id="addthisbox" class="addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style">
# <a class="addthis_button_more">Share</a>
# <ul><li><a class="addthis_button_facebook"></a>
# <li><a class="addthis_button_google_plusone_share"></a>
# <li><a class="addthis_button_linkedin"></a>
# <li><a class="addthis_button_twitter"></a>
# </ul>
# </div>
# <script type="text/javascript" src="//s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798"></script>
# <!-- End of social buttons -->
# """
# Hide link to source for the posts?
# HIDE_SOURCELINK = False
# Copy the source files for your pages?
# Setting it to False implies HIDE_SOURCELINK = True
# COPY_SOURCES = True
# Modify the number of Post per Index Page
# Defaults to 10
# INDEX_DISPLAY_POST_COUNT = 10
# RSS_LINK is a HTML fragment to link the RSS or Atom feeds. If set to None,
# the base.tmpl will use the feed Nikola generates. However, you may want to
# change it for a feedburner feed or something else.
# RSS_LINK = None
# Show only teasers in the RSS feed? Default to True
# RSS_TEASERS = True
# A search form to search this site, for the sidebar. You can use a google
# custom search (http://www.google.com/cse/)
# Or a duckduckgo search: https://duckduckgo.com/search_box.html
# Default is no search form.
# SEARCH_FORM = ""
#
# This search form works for any site and looks good in the "site" theme where
# it appears on the navigation bar:
#
#SEARCH_FORM = """
#<!-- Custom search -->
#<form method="get" id="search" action="http://duckduckgo.com/"
# class="navbar-form pull-left">
#<input type="hidden" name="sites" value="%s"/>
#<input type="hidden" name="k8" value="#444444"/>
#<input type="hidden" name="k9" value="#D51920"/>
#<input type="hidden" name="kt" value="h"/>
#<input type="text" name="q" maxlength="255"
# placeholder="Search…" class="span2" style="margin-top: 4px;"/>
#<input type="submit" value="DuckDuckGo Search" style="visibility: hidden;" />
#</form>
#<!-- End of custom search -->
#""" % SITE_URL
#
# If you prefer a google search form, here's an example that should just work:
#SEARCH_FORM = """
#<!-- Custom search with google-->
#<form id="search" action="http://google.com/search" method="get" class="navbar-form pull-left">
#<input type="hidden" name="q" value="site:%s" />
#<input type="text" name="q" maxlength="255" results="0" placeholder="Search"/>
#</form>
#<!-- End of custom search -->
#""" % SITE_URL
# Also, there is a local search plugin you can use, based on Tipue, but it requires setting several
# options:
# SEARCH_FORM = """
# <span class="navbar-form pull-left">
# <input type="text" id="tipue_search_input">
# </span>"""
#
# BODY_END = """
# <script type="text/javascript" src="/assets/js/tipuesearch_set.js"></script>
# <script type="text/javascript" src="/assets/js/tipuesearch.js"></script>
# <script type="text/javascript">
# $(document).ready(function() {
# $('#tipue_search_input').tipuesearch({
# 'mode': 'json',
# 'contentLocation': '/assets/js/tipuesearch_content.json',
# 'showUrl': false
# });
# });
# </script>
# """
# EXTRA_HEAD_DATA = """
# <link rel="stylesheet" type="text/css" href="/assets/css/tipuesearch.css">
# <div id="tipue_search_content" style="margin-left: auto; margin-right: auto; padding: 20px;"></div>
# """
# ENABLED_EXTRAS = ['local_search']
#
# Use content distribution networks for jquery and twitter-bootstrap css and js
# If this is True, jquery is served from the Google CDN and twitter-bootstrap
# is served from the NetDNA CDN
# Set this to False if you want to host your site without requiring access to
# external resources.
# USE_CDN = False
# Extra things you want in the pages HEAD tag. This will be added right
# before </HEAD>
# EXTRA_HEAD_DATA = ""
# Google analytics or whatever else you use. Added to the bottom of <body>
# in the default template (base.tmpl).
# BODY_END = ""
# The possibility to extract metadata from the filename by using a
# regular expression.
# To make it work you need to name parts of your regular expression.
# The following names will be used to extract metadata:
# - title
# - slug
# - date
# - tags
# - link
# - description
#
# An example re is the following:
# '(?P<date>\d{4}-\d{2}-\d{2})-(?P<slug>.*)-(?P<title>.*)\.md'
# FILE_METADATA_REGEXP = None
# Additional metadata that is added to a post when creating a new_post
# ADDITIONAL_METADATA = {}
# Nikola supports Twitter Card summaries / Open Graph.
# Twitter cards make it possible for you to attach media to Tweets
# that link to your content.
#
# IMPORTANT:
# Please note, that you need to opt-in for using Twitter Cards!
# To do this please visit
# https://dev.twitter.com/form/participate-twitter-cards
#
# Uncomment and modify to following lines to match your accounts.
# Specifying the id for either 'site' or 'creator' will be preferred
# over the cleartext username. Specifying an ID is not necessary.
# Displaying images is currently not supported.
# TWITTER_CARD = {
# # 'use_twitter_cards': True, # enable Twitter Cards / Open Graph
# # 'site': '@website', # twitter nick for the website
# # 'site:id': 123456, # Same as site, but the website's Twitter user ID
# # instead.
# # 'creator': '@username', # Username for the content creator / author.
# # 'creator:id': 654321, # Same as creator, but the Twitter user's ID.
# }
# Post's dates are considered in GMT by default, if you want to use
# another timezone, please set TIMEZONE to match. Check the available
# list from Wikipedia:
# http://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# Also, if you want to use a different timezone in some of your posts,
# you can use W3C-DTF Format (ex. 2012-03-30T23:00:00+02:00)
#
# TIMEZONE = 'Europe/Zurich'
# If webassets is installed, bundle JS and CSS to make site loading faster
# USE_BUNDLES = True
# Plugins you don't want to use. Be careful :-)
# DISABLED_PLUGINS = ["render_galleries"]
# Experimental plugins - use at your own risk.
# They probably need some manual adjustments - please see their respective
# readme.
# ENABLED_EXTRAS = [
# 'planetoid',
# 'ipynb',
# 'local_search',
# 'render_mustache',
# ]
# List of regular expressions, links matching them will always be considered
# valid by "nikola check -l"
# LINK_CHECK_WHITELIST = []
# If set to True, enable optional hyphenation in your posts (requires pyphen)
# HYPHENATE = False
# You can configure the logging handlers installed as plugins or change the
# log level of the default stdout handler.
LOGGING_HANDLERS = {
'stderr': {'loglevel': 'WARNING', 'bubble': True},
#'smtp': {
# 'from_addr': 'test-errors@example.com',
# 'recipients': ('test@example.com'),
# 'credentials':('testusername', 'password'),
# 'server_addr': ('127.0.0.1', 25),
# 'secure': (),
# 'level': 'DEBUG',
# 'bubble': True
#}
}
# Put in global_context things you want available on all your templates.
# It can be anything, data, functions, modules, etc.
GLOBAL_CONTEXT = {}
|
wcmckee/wcmckee.com
|
conf.py
|
Python
|
mit
| 23,584
|
[
"VisIt"
] |
dbf4f4b2b43796b7b5b9d98231165daf9d2f6d01474b63e24cd1f5d54c73322b
|
########################################################################
# This example demonstrates a multiscale model with synaptic input, Ca
# entry to the spine, receptor modulation following phosphorylation and
# Ca diffusion from spine to the dendrite. Lots going on.
# System switches to a potentiated state after a 1s strong synaptic input.
#
# Ca+CaM <===> Ca_CaM; Ca_CaM + CaMKII <===> Ca_CaM_CaMKII (all in
# spine head, except that the Ca_CaM_CaMKII translocates to the PSD)
# chan ------Ca_CaM_CaMKII-----> chan_p; chan_p ------> chan (all in PSD)
#
# Copyright (C) Upinder S. Bhalla NCBS 2018
# Released under the terms of the GNU Public License V3.
########################################################################
import moose
import rdesigneur as rd
rdes = rd.rdesigneur(
elecDt = 50e-6,
chemDt = 0.002,
diffDt = 0.002,
chemPlotDt = 0.02,
useGssa = False,
# cellProto syntax: ['ballAndStick', 'name', somaDia, somaLength, dendDia, dendLength, numDendSegments ]
cellProto = [['ballAndStick', 'soma', 12e-6, 12e-6, 4e-6, 100e-6, 2 ]],
chemProto = [['./chem/chanPhosph3compt.g', 'chem']],
spineProto = [['makeActiveSpine()', 'spine']],
chanProto = [
['make_Na()', 'Na'],
['make_K_DR()', 'K_DR'],
['make_K_A()', 'K_A' ],
['make_Ca()', 'Ca' ],
['make_Ca_conc()', 'Ca_conc' ]
],
passiveDistrib = [['soma', 'CM', '0.01', 'Em', '-0.06']],
spineDistrib = [['spine', '#dend#', '50e-6', '1e-6']],
chemDistrib = [['chem', '#', 'install', '1' ]],
chanDistrib = [
['Na', 'soma', 'Gbar', '300' ],
['K_DR', 'soma', 'Gbar', '250' ],
['K_A', 'soma', 'Gbar', '200' ],
['Ca_conc', 'soma', 'tau', '0.0333' ],
['Ca', 'soma', 'Gbar', '40' ]
],
adaptorList = [
[ 'psd/chan_p', 'n', 'glu', 'modulation', 0.1, 1.0 ],
[ 'Ca_conc', 'Ca', 'spine/Ca', 'conc', 0.00008, 0.8 ]
],
# Syn input basline 1 Hz, and 40Hz burst for 1 sec at t=20. Syn weight
# is 0.5, specified in 2nd argument as a special case stimLists.
stimList = [['head#', '0.5','glu', 'periodicsyn', '1 + 40*(t>10 && t<11)']],
plotList = [
['soma', '1', '.', 'Vm', 'Membrane potential'],
['#', '1', 'spine/Ca', 'conc', 'Ca in Spine'],
['#', '1', 'dend/DEND/Ca', 'conc', 'Ca in Dend'],
['#', '1', 'spine/Ca_CaM', 'conc', 'Ca_CaM'],
['head#', '1', 'psd/chan_p', 'conc', 'Phosph gluR'],
['head#', '1', 'psd/Ca_CaM_CaMKII', 'conc', 'Active CaMKII'],
]
)
moose.seed(123)
rdes.buildModel()
moose.reinit()
moose.start( 25 )
rdes.display()
|
BhallaLab/moose-examples
|
tutorials/Rdesigneur/ex8.2_multiscale_gluR_phosph_3compt.py
|
Python
|
gpl-2.0
| 2,648
|
[
"MOOSE"
] |
d8ee28055b98cb305e19d6a7dce2af9b0b2034859d5076c536a06100fff610c8
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2020 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
"""The HRIT msg reader tests package."""
from datetime import datetime
from unittest import mock
import numpy as np
import pytest
import xarray as xr
from satpy.readers.seviri_l1b_nc import NCSEVIRIFileHandler
from satpy.tests.reader_tests.test_seviri_base import ORBIT_POLYNOMIALS
from satpy.tests.reader_tests.test_seviri_l1b_calibration import TestFileHandlerCalibrationBase
from satpy.tests.utils import assert_attrs_equal, make_dataid
def to_cds_time(time):
"""Convert datetime to (days, msecs) since 1958-01-01."""
if isinstance(time, datetime):
time = np.datetime64(time)
t0 = np.datetime64('1958-01-01 00:00')
delta = time - t0
days = (delta / np.timedelta64(1, 'D')).astype(int)
msecs = delta / np.timedelta64(1, 'ms') - days * 24 * 3600 * 1E3
return days, msecs
class TestNCSEVIRIFileHandler(TestFileHandlerCalibrationBase):
"""Unit tests for SEVIRI netCDF reader."""
def _get_fake_dataset(self, counts):
"""Create a fake dataset."""
acq_time_day = np.repeat([1, 1], 11).reshape(2, 11)
acq_time_msec = np.repeat([1000, 2000], 11).reshape(2, 11)
orbit_poly_start_day, orbit_poly_start_msec = to_cds_time(
np.array([datetime(2019, 12, 31, 18),
datetime(2019, 12, 31, 22)],
dtype='datetime64')
)
orbit_poly_end_day, orbit_poly_end_msec = to_cds_time(
np.array([datetime(2019, 12, 31, 22),
datetime(2020, 1, 1, 2)],
dtype='datetime64')
)
counts = counts.rename({
'y': 'num_rows_vis_ir',
'x': 'num_columns_vis_ir'
})
scan_time_days, scan_time_msecs = to_cds_time(self.scan_time)
ds = xr.Dataset(
{
'VIS006': counts.copy(),
'IR_108': counts.copy(),
'HRV': (('num_rows_hrv', 'num_columns_hrv'), [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]),
'planned_chan_processing': self.radiance_types,
'channel_data_visir_data_l10_line_mean_acquisition_time_day': (
('num_rows_vis_ir', 'channels_vis_ir_dim'),
acq_time_day
),
'channel_data_visir_data_l10_line_mean_acquisition_msec': (
('num_rows_vis_ir', 'channels_vis_ir_dim'),
acq_time_msec
),
'orbit_polynomial_x': (
('orbit_polynomial_dim_row',
'orbit_polynomial_dim_col'),
ORBIT_POLYNOMIALS['X'][0:2]
),
'orbit_polynomial_y': (
('orbit_polynomial_dim_row',
'orbit_polynomial_dim_col'),
ORBIT_POLYNOMIALS['Y'][0:2]
),
'orbit_polynomial_z': (
('orbit_polynomial_dim_row',
'orbit_polynomial_dim_col'),
ORBIT_POLYNOMIALS['Z'][0:2]
),
'orbit_polynomial_start_time_day': (
'orbit_polynomial_dim_row',
orbit_poly_start_day
),
'orbit_polynomial_start_time_msec': (
'orbit_polynomial_dim_row',
orbit_poly_start_msec
),
'orbit_polynomial_end_time_day': (
'orbit_polynomial_dim_row',
orbit_poly_end_day
),
'orbit_polynomial_end_time_msec': (
'orbit_polynomial_dim_row',
orbit_poly_end_msec
),
},
attrs={
'equatorial_radius': 6378.169,
'north_polar_radius': 6356.5838,
'south_polar_radius': 6356.5838,
'longitude_of_SSP': 0.0,
'nominal_longitude': -3.5,
'satellite_id': self.platform_id,
'true_repeat_cycle_start_day': scan_time_days,
'true_repeat_cycle_start_mi_sec': scan_time_msecs,
'planned_repeat_cycle_end_day': scan_time_days,
'planned_repeat_cycle_end_mi_sec': scan_time_msecs,
'north_most_line': 3712,
'east_most_pixel': 1,
'west_most_pixel': 3712,
'south_most_line': 1,
'vis_ir_column_dir_grid_step': 3.0004032,
'vis_ir_line_dir_grid_step': 3.0004032,
'type_of_earth_model': '0x02',
}
)
ds['VIS006'].attrs.update({
'scale_factor': self.gains_nominal[0],
'add_offset': self.offsets_nominal[0]
})
ds['IR_108'].attrs.update({
'scale_factor': self.gains_nominal[8],
'add_offset': self.offsets_nominal[8],
})
# Add some attributes so that the reader can strip them
strip_attrs = {
'comment': None,
'long_name': None,
'valid_min': None,
'valid_max': None
}
for name in ['VIS006', 'IR_108']:
ds[name].attrs.update(strip_attrs)
return ds
@pytest.fixture(name='file_handler')
def file_handler(self, counts):
"""Create a mocked file handler."""
with mock.patch(
'satpy.readers.seviri_l1b_nc.xr.open_dataset',
return_value=self._get_fake_dataset(counts)
):
return NCSEVIRIFileHandler(
'filename',
{'platform_shortname': 'MSG3',
'start_time': self.scan_time,
'service': 'MSG'},
{'filetype': 'info'}
)
@pytest.mark.parametrize(
('channel', 'calibration', 'use_ext_coefs'),
[
# VIS channel, internal coefficients
('VIS006', 'counts', False),
('VIS006', 'radiance', False),
('VIS006', 'reflectance', False),
# VIS channel, external coefficients
('VIS006', 'radiance', True),
('VIS006', 'reflectance', True),
# IR channel, internal coefficients
('IR_108', 'counts', False),
('IR_108', 'radiance', False),
('IR_108', 'brightness_temperature', False),
# IR channel, external coefficients
('IR_108', 'radiance', True),
('IR_108', 'brightness_temperature', True),
# FUTURE: Enable once HRV reading has been fixed.
# # HRV channel, internal coefficiens
# ('HRV', 'counts', False),
# ('HRV', 'radiance', False),
# ('HRV', 'reflectance', False),
# # HRV channel, external coefficients (mode should have no effect)
# ('HRV', 'radiance', True),
# ('HRV', 'reflectance', True),
]
)
def test_calibrate(
self, file_handler, channel, calibration, use_ext_coefs
):
"""Test the calibration."""
file_handler.nc = file_handler.nc.rename({
'num_rows_vis_ir': 'y',
'num_columns_vis_ir': 'x'
})
external_coefs = self.external_coefs if use_ext_coefs else {}
expected = self._get_expected(
channel=channel,
calibration=calibration,
calib_mode='NOMINAL',
use_ext_coefs=use_ext_coefs
)
fh = file_handler
fh.ext_calib_coefs = external_coefs
dataset_id = make_dataid(name=channel, calibration=calibration)
res = fh.calibrate(fh.nc[channel], dataset_id)
xr.testing.assert_allclose(res, expected)
@pytest.mark.parametrize(
('channel', 'calibration'),
[
('VIS006', 'reflectance'),
('IR_108', 'brightness_temperature')
]
)
def test_get_dataset(self, file_handler, channel, calibration):
"""Test getting the dataset."""
dataset_id = make_dataid(name=channel, calibration=calibration)
dataset_info = {
'nc_key': channel,
'units': 'units',
'wavelength': 'wavelength',
'standard_name': 'standard_name'
}
res = file_handler.get_dataset(dataset_id, dataset_info)
# Test scanline acquisition times
expected = self._get_expected(
channel=channel,
calibration=calibration,
calib_mode='NOMINAL',
use_ext_coefs=False
)
expected.attrs = {
'orbital_parameters': {
'satellite_actual_longitude': -3.541742131915741,
'satellite_actual_latitude': -0.5203765167594427,
'satellite_actual_altitude': 35783419.16135868,
'satellite_nominal_longitude': -3.5,
'satellite_nominal_latitude': 0.0,
'projection_longitude': 0.0,
'projection_latitude': 0.0,
'projection_altitude': 35785831.0
},
'georef_offset_corrected': True,
'platform_name': 'Meteosat-11',
'sensor': 'seviri',
'units': 'units',
'wavelength': 'wavelength',
'standard_name': 'standard_name'
}
expected['acq_time'] = ('y', [np.datetime64('1958-01-02 00:00:01'),
np.datetime64('1958-01-02 00:00:02')])
expected = expected[::-1] # reader flips data upside down
xr.testing.assert_allclose(res, expected)
for key in ['sun_earth_distance_correction_applied',
'sun_earth_distance_correction_factor']:
res.attrs.pop(key, None)
assert_attrs_equal(res.attrs, expected.attrs, tolerance=1e-4)
def test_satpos_no_valid_orbit_polynomial(self, file_handler):
"""Test satellite position if there is no valid orbit polynomial."""
dataset_id = make_dataid(name='VIS006', calibration='counts')
dataset_info = {
'nc_key': 'VIS006',
'units': 'units',
'wavelength': 'wavelength',
'standard_name': 'standard_name'
}
file_handler.nc['orbit_polynomial_start_time_day'] = 0
file_handler.nc['orbit_polynomial_end_time_day'] = 0
res = file_handler.get_dataset(dataset_id, dataset_info)
assert 'satellite_actual_longitude' not in res.attrs[
'orbital_parameters']
|
pytroll/satpy
|
satpy/tests/reader_tests/test_seviri_l1b_nc.py
|
Python
|
gpl-3.0
| 11,322
|
[
"NetCDF"
] |
9e3195bc1aece4b9b5db69bec18113f1b63d5427787a82ccde0614a526409012
|
#
# Copyright 2007 Diane Trout
# This software is covered by the GNU Lesser Public License 2.1
# (exciting copyrighting nothing, isn't it)
|
detrout/benderjab
|
benderjab/__init__.py
|
Python
|
lgpl-2.1
| 139
|
[
"exciting"
] |
5637fd5cc8ceaff7f06bd6896193232e4962c5489cc4b2181be4ab3364846fdc
|
#!/usr/bin/env python
#JSON {"lot": "RKS/6-31G(d)",
#JSON "scf": "CDIISSCFSolver",
#JSON "er": "numeric",
#JSON "difficulty": 8,
#JSON "description": "RKS DFT example with LDA and numerical Hartree"}
import numpy as np
from horton import * # pylint: disable=wildcard-import,unused-wildcard-import
# Load the coordinates from file.
# Use the XYZ file from HORTON's test data directory.
fn_xyz = context.get_fn('test/water.xyz')
mol = IOData.from_file(fn_xyz)
# Create a Gaussian basis set
obasis = get_gobasis(mol.coordinates, mol.numbers, '6-31g(d)')
# Compute Gaussian integrals (not the ERI!)
olp = obasis.compute_overlap()
kin = obasis.compute_kinetic()
na = obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers)
# Define a numerical integration grid needed the XC functionals. The mode='keep'
# option is need for the numerical Becke-Poisson solver.
grid = BeckeMolGrid(mol.coordinates, mol.numbers, mol.pseudo_numbers, mode='keep')
# Create alpha orbitals
orb_alpha = Orbitals(obasis.nbasis)
# Initial guess
guess_core_hamiltonian(olp, kin + na, orb_alpha)
# Construct the restricted HF effective Hamiltonian
external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}
terms = [
RTwoIndexTerm(kin, 'kin'),
RGridGroup(obasis, grid, [
RBeckeHartree(lmax=8),
RLibXCLDA('x'),
RLibXCLDA('c_vwn'),
]),
RTwoIndexTerm(na, 'ne'),
]
ham = REffHam(terms, external)
# Decide how to occupy the orbitals (5 alpha electrons)
occ_model = AufbauOccModel(5)
# Converge WFN with CDIIS SCF
# - Construct the initial density matrix (needes for CDIIS).
occ_model.assign(orb_alpha)
dm_alpha = orb_alpha.to_dm()
# - SCF solver
scf_solver = CDIISSCFSolver(1e-6)
scf_solver(ham, olp, occ_model, dm_alpha)
# Derive orbitals (coeffs, energies and occupations) from the Fock and density
# matrices. The energy is also computed to store it in the output file below.
fock_alpha = np.zeros(olp.shape)
ham.reset(dm_alpha)
ham.compute_energy()
ham.compute_fock(fock_alpha)
orb_alpha.from_fock_and_dm(fock_alpha, dm_alpha, olp)
# Assign results to the molecule object and write it to a file, e.g. for
# later analysis. Note that the CDIIS algorithm can only really construct an
# optimized density matrix and no orbitals.
mol.title = 'RKS computation on water'
mol.energy = ham.cache['energy']
mol.obasis = obasis
mol.orb_alpha = orb_alpha
mol.dm_alpha = dm_alpha
# useful for post-processing (results stored in double precision):
mol.to_file('water.h5')
# CODE BELOW IS FOR horton-regression-test.py ONLY. IT IS NOT PART OF THE EXAMPLE.
rt_results = {
'energy': ham.cache['energy'],
'orb_alpha': orb_alpha.energies,
'nn': ham.cache["energy_nn"],
'kin': ham.cache["energy_kin"],
'ne': ham.cache["energy_ne"],
'grid': ham.cache["energy_grid_group"],
}
# BEGIN AUTOGENERATED CODE. DO NOT CHANGE MANUALLY.
rt_previous = {
'energy': -75.839923511126926,
'orb_alpha': np.array([
-18.581721161008389, -0.89646232207227017, -0.47397908099630903,
-0.29933683057491722, -0.22833957809749877, 0.045552499132287858,
0.12826483571630892, 0.76341172647373834, 0.80270997786629805,
0.83063822758516659, 0.86463725805110636, 1.0044402967802988, 1.3161096217154449,
1.6706535715743314, 1.676638581691158, 1.7190068769777869, 2.2330209969429315,
2.5198654726762091
]),
'grid': 38.041932619668316,
'kin': 75.8660255041384,
'ne': -198.90505667136364,
'nn': 9.1571750364299866,
}
|
QuantumElephant/horton
|
data/examples/hf_dft/rks_water_numlda.py
|
Python
|
gpl-3.0
| 3,519
|
[
"Gaussian"
] |
e14c6d8035f29f01dd73ed76fd03d84b1d87354bab725b4f9cdb59ac6c37e1d4
|
#
# Parse tree nodes for expressions
#
from __future__ import absolute_import
import cython
cython.declare(error=object, warning=object, warn_once=object, InternalError=object,
CompileError=object, UtilityCode=object, TempitaUtilityCode=object,
StringEncoding=object, operator=object,
Naming=object, Nodes=object, PyrexTypes=object, py_object_type=object,
list_type=object, tuple_type=object, set_type=object, dict_type=object,
unicode_type=object, str_type=object, bytes_type=object, type_type=object,
Builtin=object, Symtab=object, Utils=object, find_coercion_error=object,
debug_disposal_code=object, debug_temp_alloc=object, debug_coercion=object,
bytearray_type=object, slice_type=object, _py_int_types=object,
IS_PYTHON3=cython.bint)
import sys
import copy
import os.path
import operator
from .Errors import error, warning, warn_once, InternalError, CompileError
from .Errors import hold_errors, release_errors, held_errors, report_error
from .Code import UtilityCode, TempitaUtilityCode
from . import StringEncoding
from . import Naming
from . import Nodes
from .Nodes import Node, utility_code_for_imports
from . import PyrexTypes
from .PyrexTypes import py_object_type, c_long_type, typecast, error_type, \
unspecified_type
from . import TypeSlots
from .Builtin import list_type, tuple_type, set_type, dict_type, type_type, \
unicode_type, str_type, bytes_type, bytearray_type, basestring_type, slice_type
from . import Builtin
from . import Symtab
from .. import Utils
from .Annotate import AnnotationItem
from . import Future
from ..Debugging import print_call_chain
from .DebugFlags import debug_disposal_code, debug_temp_alloc, \
debug_coercion
try:
from __builtin__ import basestring
except ImportError:
# Python 3
basestring = str
any_string_type = (bytes, str)
else:
# Python 2
any_string_type = (bytes, unicode)
if sys.version_info[0] >= 3:
IS_PYTHON3 = True
_py_int_types = int
else:
IS_PYTHON3 = False
_py_int_types = (int, long)
class NotConstant(object):
_obj = None
def __new__(cls):
if NotConstant._obj is None:
NotConstant._obj = super(NotConstant, cls).__new__(cls)
return NotConstant._obj
def __repr__(self):
return "<NOT CONSTANT>"
not_a_constant = NotConstant()
constant_value_not_set = object()
# error messages when coercing from key[0] to key[1]
coercion_error_dict = {
# string related errors
(unicode_type, str_type): ("Cannot convert Unicode string to 'str' implicitly."
" This is not portable and requires explicit encoding."),
(unicode_type, bytes_type): "Cannot convert Unicode string to 'bytes' implicitly, encoding required.",
(unicode_type, PyrexTypes.c_char_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(unicode_type, PyrexTypes.c_const_char_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(unicode_type, PyrexTypes.c_uchar_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(unicode_type, PyrexTypes.c_const_uchar_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(bytes_type, unicode_type): "Cannot convert 'bytes' object to unicode implicitly, decoding required",
(bytes_type, str_type): "Cannot convert 'bytes' object to str implicitly. This is not portable to Py3.",
(bytes_type, basestring_type): ("Cannot convert 'bytes' object to basestring implicitly."
" This is not portable to Py3."),
(bytes_type, PyrexTypes.c_py_unicode_ptr_type): "Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'.",
(bytes_type, PyrexTypes.c_const_py_unicode_ptr_type): (
"Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'."),
(basestring_type, bytes_type): "Cannot convert 'basestring' object to bytes implicitly. This is not portable.",
(str_type, unicode_type): ("str objects do not support coercion to unicode,"
" use a unicode string literal instead (u'')"),
(str_type, bytes_type): "Cannot convert 'str' to 'bytes' implicitly. This is not portable.",
(str_type, PyrexTypes.c_char_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_const_char_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_uchar_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_const_uchar_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_py_unicode_ptr_type): "'str' objects do not support coercion to C types (use 'unicode'?).",
(str_type, PyrexTypes.c_const_py_unicode_ptr_type): (
"'str' objects do not support coercion to C types (use 'unicode'?)."),
(PyrexTypes.c_char_ptr_type, unicode_type): "Cannot convert 'char*' to unicode implicitly, decoding required",
(PyrexTypes.c_const_char_ptr_type, unicode_type): (
"Cannot convert 'char*' to unicode implicitly, decoding required"),
(PyrexTypes.c_uchar_ptr_type, unicode_type): "Cannot convert 'char*' to unicode implicitly, decoding required",
(PyrexTypes.c_const_uchar_ptr_type, unicode_type): (
"Cannot convert 'char*' to unicode implicitly, decoding required"),
}
def find_coercion_error(type_tuple, default, env):
err = coercion_error_dict.get(type_tuple)
if err is None:
return default
elif (env.directives['c_string_encoding'] and
any(t in type_tuple for t in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_uchar_ptr_type,
PyrexTypes.c_const_char_ptr_type, PyrexTypes.c_const_uchar_ptr_type))):
if type_tuple[1].is_pyobject:
return default
elif env.directives['c_string_encoding'] in ('ascii', 'default'):
return default
else:
return "'%s' objects do not support coercion to C types with non-ascii or non-default c_string_encoding" % type_tuple[0].name
else:
return err
def default_str_type(env):
return {
'bytes': bytes_type,
'bytearray': bytearray_type,
'str': str_type,
'unicode': unicode_type
}.get(env.directives['c_string_type'])
def check_negative_indices(*nodes):
"""
Raise a warning on nodes that are known to have negative numeric values.
Used to find (potential) bugs inside of "wraparound=False" sections.
"""
for node in nodes:
if node is None or (
not isinstance(node.constant_result, _py_int_types) and
not isinstance(node.constant_result, float)):
continue
if node.constant_result < 0:
warning(node.pos,
"the result of using negative indices inside of "
"code sections marked as 'wraparound=False' is "
"undefined", level=1)
def infer_sequence_item_type(env, seq_node, index_node=None, seq_type=None):
if not seq_node.is_sequence_constructor:
if seq_type is None:
seq_type = seq_node.infer_type(env)
if seq_type is tuple_type:
# tuples are immutable => we can safely follow assignments
if seq_node.cf_state and len(seq_node.cf_state) == 1:
try:
seq_node = seq_node.cf_state[0].rhs
except AttributeError:
pass
if seq_node is not None and seq_node.is_sequence_constructor:
if index_node is not None and index_node.has_constant_result():
try:
item = seq_node.args[index_node.constant_result]
except (ValueError, TypeError, IndexError):
pass
else:
return item.infer_type(env)
# if we're lucky, all items have the same type
item_types = set([item.infer_type(env) for item in seq_node.args])
if len(item_types) == 1:
return item_types.pop()
return None
def get_exception_handler(exception_value):
if exception_value is None:
return "__Pyx_CppExn2PyErr();"
elif exception_value.type.is_pyobject:
return 'try { throw; } catch(const std::exception& exn) { PyErr_SetString(%s, exn.what()); } catch(...) { PyErr_SetNone(%s); }' % (
exception_value.entry.cname,
exception_value.entry.cname)
else:
return '%s(); if (!PyErr_Occurred()) PyErr_SetString(PyExc_RuntimeError , "Error converting c++ exception.");' % exception_value.entry.cname
def translate_cpp_exception(code, pos, inside, exception_value, nogil):
raise_py_exception = get_exception_handler(exception_value)
code.putln("try {")
code.putln("%s" % inside)
code.putln("} catch(...) {")
if nogil:
code.put_ensure_gil(declare_gilstate=True)
code.putln(raise_py_exception)
if nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(pos))
code.putln("}")
# Used to handle the case where an lvalue expression and an overloaded assignment
# both have an exception declaration.
def translate_double_cpp_exception(code, pos, lhs_type, lhs_code, rhs_code,
lhs_exc_val, assign_exc_val, nogil):
handle_lhs_exc = get_exception_handler(lhs_exc_val)
handle_assignment_exc = get_exception_handler(assign_exc_val)
code.putln("try {")
code.putln(lhs_type.declaration_code("__pyx_local_lvalue = %s;" % lhs_code))
code.putln("try {")
code.putln("__pyx_local_lvalue = %s;" % rhs_code)
# Catch any exception from the overloaded assignment.
code.putln("} catch(...) {")
if nogil:
code.put_ensure_gil(declare_gilstate=True)
code.putln(handle_assignment_exc)
if nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(pos))
code.putln("}")
# Catch any exception from evaluating lhs.
code.putln("} catch(...) {")
if nogil:
code.put_ensure_gil(declare_gilstate=True)
code.putln(handle_lhs_exc)
if nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(pos))
code.putln('}')
class ExprNode(Node):
# subexprs [string] Class var holding names of subexpr node attrs
# type PyrexType Type of the result
# result_code string Code fragment
# result_ctype string C type of result_code if different from type
# is_temp boolean Result is in a temporary variable
# is_sequence_constructor
# boolean Is a list or tuple constructor expression
# is_starred boolean Is a starred expression (e.g. '*a')
# saved_subexpr_nodes
# [ExprNode or [ExprNode or None] or None]
# Cached result of subexpr_nodes()
# use_managed_ref boolean use ref-counted temps/assignments/etc.
# result_is_used boolean indicates that the result will be dropped and the
# result_code/temp_result can safely be set to None
result_ctype = None
type = None
temp_code = None
old_temp = None # error checker for multiple frees etc.
use_managed_ref = True # can be set by optimisation transforms
result_is_used = True
# The Analyse Expressions phase for expressions is split
# into two sub-phases:
#
# Analyse Types
# Determines the result type of the expression based
# on the types of its sub-expressions, and inserts
# coercion nodes into the expression tree where needed.
# Marks nodes which will need to have temporary variables
# allocated.
#
# Allocate Temps
# Allocates temporary variables where needed, and fills
# in the result_code field of each node.
#
# ExprNode provides some convenience routines which
# perform both of the above phases. These should only
# be called from statement nodes, and only when no
# coercion nodes need to be added around the expression
# being analysed. In that case, the above two phases
# should be invoked separately.
#
# Framework code in ExprNode provides much of the common
# processing for the various phases. It makes use of the
# 'subexprs' class attribute of ExprNodes, which should
# contain a list of the names of attributes which can
# hold sub-nodes or sequences of sub-nodes.
#
# The framework makes use of a number of abstract methods.
# Their responsibilities are as follows.
#
# Declaration Analysis phase
#
# analyse_target_declaration
# Called during the Analyse Declarations phase to analyse
# the LHS of an assignment or argument of a del statement.
# Nodes which cannot be the LHS of an assignment need not
# implement it.
#
# Expression Analysis phase
#
# analyse_types
# - Call analyse_types on all sub-expressions.
# - Check operand types, and wrap coercion nodes around
# sub-expressions where needed.
# - Set the type of this node.
# - If a temporary variable will be required for the
# result, set the is_temp flag of this node.
#
# analyse_target_types
# Called during the Analyse Types phase to analyse
# the LHS of an assignment or argument of a del
# statement. Similar responsibilities to analyse_types.
#
# target_code
# Called by the default implementation of allocate_target_temps.
# Should return a C lvalue for assigning to the node. The default
# implementation calls calculate_result_code.
#
# check_const
# - Check that this node and its subnodes form a
# legal constant expression. If so, do nothing,
# otherwise call not_const.
#
# The default implementation of check_const
# assumes that the expression is not constant.
#
# check_const_addr
# - Same as check_const, except check that the
# expression is a C lvalue whose address is
# constant. Otherwise, call addr_not_const.
#
# The default implementation of calc_const_addr
# assumes that the expression is not a constant
# lvalue.
#
# Code Generation phase
#
# generate_evaluation_code
# - Call generate_evaluation_code for sub-expressions.
# - Perform the functions of generate_result_code
# (see below).
# - If result is temporary, call generate_disposal_code
# on all sub-expressions.
#
# A default implementation of generate_evaluation_code
# is provided which uses the following abstract methods:
#
# generate_result_code
# - Generate any C statements necessary to calculate
# the result of this node from the results of its
# sub-expressions.
#
# calculate_result_code
# - Should return a C code fragment evaluating to the
# result. This is only called when the result is not
# a temporary.
#
# generate_assignment_code
# Called on the LHS of an assignment.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the assignment.
# - If the assignment absorbed a reference, call
# generate_post_assignment_code on the RHS,
# otherwise call generate_disposal_code on it.
#
# generate_deletion_code
# Called on an argument of a del statement.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the deletion.
# - Call generate_disposal_code on all sub-expressions.
#
#
is_sequence_constructor = False
is_dict_literal = False
is_set_literal = False
is_string_literal = False
is_attribute = False
is_subscript = False
is_slice = False
is_buffer_access = False
is_memview_index = False
is_memview_slice = False
is_memview_broadcast = False
is_memview_copy_assignment = False
saved_subexpr_nodes = None
is_temp = False
is_target = False
is_starred = False
constant_result = constant_value_not_set
child_attrs = property(fget=operator.attrgetter('subexprs'))
def not_implemented(self, method_name):
print_call_chain(method_name, "not implemented") ###
raise InternalError(
"%s.%s not implemented" %
(self.__class__.__name__, method_name))
def is_lvalue(self):
return 0
def is_addressable(self):
return self.is_lvalue() and not self.type.is_memoryviewslice
def is_ephemeral(self):
# An ephemeral node is one whose result is in
# a Python temporary and we suspect there are no
# other references to it. Certain operations are
# disallowed on such values, since they are
# likely to result in a dangling pointer.
return self.type.is_pyobject and self.is_temp
def subexpr_nodes(self):
# Extract a list of subexpression nodes based
# on the contents of the subexprs class attribute.
nodes = []
for name in self.subexprs:
item = getattr(self, name)
if item is not None:
if type(item) is list:
nodes.extend(item)
else:
nodes.append(item)
return nodes
def result(self):
if self.is_temp:
#if not self.temp_code:
# pos = (os.path.basename(self.pos[0].get_description()),) + self.pos[1:] if self.pos else '(?)'
# raise RuntimeError("temp result name not set in %s at %r" % (
# self.__class__.__name__, pos))
return self.temp_code
else:
return self.calculate_result_code()
def is_c_result_required(self):
"""
Subtypes may return False here if result temp allocation can be skipped.
"""
return True
def result_as(self, type = None):
# Return the result code cast to the specified C type.
if (self.is_temp and self.type.is_pyobject and
type != py_object_type):
# Allocated temporaries are always PyObject *, which may not
# reflect the actual type (e.g. an extension type)
return typecast(type, py_object_type, self.result())
return typecast(type, self.ctype(), self.result())
def py_result(self):
# Return the result code cast to PyObject *.
return self.result_as(py_object_type)
def ctype(self):
# Return the native C type of the result (i.e. the
# C type of the result_code expression).
return self.result_ctype or self.type
def get_constant_c_result_code(self):
# Return the constant value of this node as a result code
# string, or None if the node is not constant. This method
# can be called when the constant result code is required
# before the code generation phase.
#
# The return value is a string that can represent a simple C
# value, a constant C name or a constant C expression. If the
# node type depends on Python code, this must return None.
return None
def calculate_constant_result(self):
# Calculate the constant compile time result value of this
# expression and store it in ``self.constant_result``. Does
# nothing by default, thus leaving ``self.constant_result``
# unknown. If valid, the result can be an arbitrary Python
# value.
#
# This must only be called when it is assured that all
# sub-expressions have a valid constant_result value. The
# ConstantFolding transform will do this.
pass
def has_constant_result(self):
return self.constant_result is not constant_value_not_set and \
self.constant_result is not not_a_constant
def compile_time_value(self, denv):
# Return value of compile-time expression, or report error.
error(self.pos, "Invalid compile-time expression")
def compile_time_value_error(self, e):
error(self.pos, "Error in compile-time expression: %s: %s" % (
e.__class__.__name__, e))
# ------------- Declaration Analysis ----------------
def analyse_target_declaration(self, env):
error(self.pos, "Cannot assign to or delete this")
# ------------- Expression Analysis ----------------
def analyse_const_expression(self, env):
# Called during the analyse_declarations phase of a
# constant expression. Analyses the expression's type,
# checks whether it is a legal const expression,
# and determines its value.
node = self.analyse_types(env)
node.check_const()
return node
def analyse_expressions(self, env):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for a whole
# expression.
return self.analyse_types(env)
def analyse_target_expression(self, env, rhs):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for the LHS of
# an assignment.
return self.analyse_target_types(env)
def analyse_boolean_expression(self, env):
# Analyse expression and coerce to a boolean.
node = self.analyse_types(env)
bool = node.coerce_to_boolean(env)
return bool
def analyse_temp_boolean_expression(self, env):
# Analyse boolean expression and coerce result into
# a temporary. This is used when a branch is to be
# performed on the result and we won't have an
# opportunity to ensure disposal code is executed
# afterwards. By forcing the result into a temporary,
# we ensure that all disposal has been done by the
# time we get the result.
node = self.analyse_types(env)
return node.coerce_to_boolean(env).coerce_to_simple(env)
# --------------- Type Inference -----------------
def type_dependencies(self, env):
# Returns the list of entries whose types must be determined
# before the type of self can be inferred.
if hasattr(self, 'type') and self.type is not None:
return ()
return sum([node.type_dependencies(env) for node in self.subexpr_nodes()], ())
def infer_type(self, env):
# Attempt to deduce the type of self.
# Differs from analyse_types as it avoids unnecessary
# analysis of subexpressions, but can assume everything
# in self.type_dependencies() has been resolved.
if hasattr(self, 'type') and self.type is not None:
return self.type
elif hasattr(self, 'entry') and self.entry is not None:
return self.entry.type
else:
self.not_implemented("infer_type")
def nonlocally_immutable(self):
# Returns whether this variable is a safe reference, i.e.
# can't be modified as part of globals or closures.
return self.is_literal or self.is_temp or self.type.is_array or self.type.is_cfunction
def inferable_item_node(self, index=0):
"""
Return a node that represents the (type) result of an indexing operation,
e.g. for tuple unpacking or iteration.
"""
return IndexNode(self.pos, base=self, index=IntNode(
self.pos, value=str(index), constant_result=index, type=PyrexTypes.c_py_ssize_t_type))
# --------------- Type Analysis ------------------
def analyse_as_module(self, env):
# If this node can be interpreted as a reference to a
# cimported module, return its scope, else None.
return None
def analyse_as_type(self, env):
# If this node can be interpreted as a reference to a
# type, return that type, else None.
return None
def analyse_as_extension_type(self, env):
# If this node can be interpreted as a reference to an
# extension type or builtin type, return its type, else None.
return None
def analyse_types(self, env):
self.not_implemented("analyse_types")
def analyse_target_types(self, env):
return self.analyse_types(env)
def nogil_check(self, env):
# By default, any expression based on Python objects is
# prevented in nogil environments. Subtypes must override
# this if they can work without the GIL.
if self.type and self.type.is_pyobject:
self.gil_error()
def gil_assignment_check(self, env):
if env.nogil and self.type.is_pyobject:
error(self.pos, "Assignment of Python object not allowed without gil")
def check_const(self):
self.not_const()
return False
def not_const(self):
error(self.pos, "Not allowed in a constant expression")
def check_const_addr(self):
self.addr_not_const()
return False
def addr_not_const(self):
error(self.pos, "Address is not constant")
# ----------------- Result Allocation -----------------
def result_in_temp(self):
# Return true if result is in a temporary owned by
# this node or one of its subexpressions. Overridden
# by certain nodes which can share the result of
# a subnode.
return self.is_temp
def target_code(self):
# Return code fragment for use as LHS of a C assignment.
return self.calculate_result_code()
def calculate_result_code(self):
self.not_implemented("calculate_result_code")
# def release_target_temp(self, env):
# # Release temporaries used by LHS of an assignment.
# self.release_subexpr_temps(env)
def allocate_temp_result(self, code):
if self.temp_code:
raise RuntimeError("Temp allocated multiple times in %r: %r" % (self.__class__.__name__, self.pos))
type = self.type
if not type.is_void:
if type.is_pyobject:
type = PyrexTypes.py_object_type
elif not (self.result_is_used or type.is_memoryviewslice or self.is_c_result_required()):
self.temp_code = None
return
self.temp_code = code.funcstate.allocate_temp(
type, manage_ref=self.use_managed_ref)
else:
self.temp_code = None
def release_temp_result(self, code):
if not self.temp_code:
if not self.result_is_used:
# not used anyway, so ignore if not set up
return
pos = (os.path.basename(self.pos[0].get_description()),) + self.pos[1:] if self.pos else '(?)'
if self.old_temp:
raise RuntimeError("temp %s released multiple times in %s at %r" % (
self.old_temp, self.__class__.__name__, pos))
else:
raise RuntimeError("no temp, but release requested in %s at %r" % (
self.__class__.__name__, pos))
code.funcstate.release_temp(self.temp_code)
self.old_temp = self.temp_code
self.temp_code = None
# ---------------- Code Generation -----------------
def make_owned_reference(self, code):
"""
If result is a pyobject, make sure we own a reference to it.
If the result is in a temp, it is already a new reference.
"""
if self.type.is_pyobject and not self.result_in_temp():
code.put_incref(self.result(), self.ctype())
def make_owned_memoryviewslice(self, code):
"""
Make sure we own the reference to this memoryview slice.
"""
if not self.result_in_temp():
code.put_incref_memoryviewslice(self.result(),
have_gil=self.in_nogil_context)
def generate_evaluation_code(self, code):
# Generate code to evaluate this node and
# its sub-expressions, and dispose of any
# temporary results of its sub-expressions.
self.generate_subexpr_evaluation_code(code)
code.mark_pos(self.pos)
if self.is_temp:
self.allocate_temp_result(code)
self.generate_result_code(code)
if self.is_temp and not (self.type.is_string or self.type.is_pyunicode_ptr):
# If we are temp we do not need to wait until this node is disposed
# before disposing children.
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def generate_subexpr_evaluation_code(self, code):
for node in self.subexpr_nodes():
node.generate_evaluation_code(code)
def generate_result_code(self, code):
self.not_implemented("generate_result_code")
def generate_disposal_code(self, code):
if self.is_temp:
if self.type.is_string or self.type.is_pyunicode_ptr:
# postponed from self.generate_evaluation_code()
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
if self.result():
if self.type.is_pyobject:
code.put_decref_clear(self.result(), self.ctype())
elif self.type.is_memoryviewslice:
code.put_xdecref_memoryviewslice(
self.result(), have_gil=not self.in_nogil_context)
else:
# Already done if self.is_temp
self.generate_subexpr_disposal_code(code)
def generate_subexpr_disposal_code(self, code):
# Generate code to dispose of temporary results
# of all sub-expressions.
for node in self.subexpr_nodes():
node.generate_disposal_code(code)
def generate_post_assignment_code(self, code):
if self.is_temp:
if self.type.is_string or self.type.is_pyunicode_ptr:
# postponed from self.generate_evaluation_code()
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
elif self.type.is_pyobject:
code.putln("%s = 0;" % self.result())
elif self.type.is_memoryviewslice:
code.putln("%s.memview = NULL;" % self.result())
code.putln("%s.data = NULL;" % self.result())
else:
self.generate_subexpr_disposal_code(code)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
# Stub method for nodes which are not legal as
# the LHS of an assignment. An error will have
# been reported earlier.
pass
def generate_deletion_code(self, code, ignore_nonexisting=False):
# Stub method for nodes that are not legal as
# the argument of a del statement. An error
# will have been reported earlier.
pass
def free_temps(self, code):
if self.is_temp:
if not self.type.is_void:
self.release_temp_result(code)
else:
self.free_subexpr_temps(code)
def free_subexpr_temps(self, code):
for sub in self.subexpr_nodes():
sub.free_temps(code)
def generate_function_definitions(self, env, code):
pass
# ---------------- Annotation ---------------------
def annotate(self, code):
for node in self.subexpr_nodes():
node.annotate(code)
# ----------------- Coercion ----------------------
def coerce_to(self, dst_type, env):
# Coerce the result so that it can be assigned to
# something of type dst_type. If processing is necessary,
# wraps this node in a coercion node and returns that.
# Otherwise, returns this node unchanged.
#
# This method is called during the analyse_expressions
# phase of the src_node's processing.
#
# Note that subclasses that override this (especially
# ConstNodes) must not (re-)set their own .type attribute
# here. Since expression nodes may turn up in different
# places in the tree (e.g. inside of CloneNodes in cascaded
# assignments), this method must return a new node instance
# if it changes the type.
#
src = self
src_type = self.type
if self.check_for_coercion_error(dst_type, env):
return self
used_as_reference = dst_type.is_reference
if used_as_reference and not src_type.is_reference:
dst_type = dst_type.ref_base_type
if src_type.is_const:
src_type = src_type.const_base_type
if src_type.is_fused or dst_type.is_fused:
# See if we are coercing a fused function to a pointer to a
# specialized function
if (src_type.is_cfunction and not dst_type.is_fused and
dst_type.is_ptr and dst_type.base_type.is_cfunction):
dst_type = dst_type.base_type
for signature in src_type.get_all_specialized_function_types():
if signature.same_as(dst_type):
src.type = signature
src.entry = src.type.entry
src.entry.used = True
return self
if src_type.is_fused:
error(self.pos, "Type is not specialized")
else:
error(self.pos, "Cannot coerce to a type that is not specialized")
self.type = error_type
return self
if self.coercion_type is not None:
# This is purely for error checking purposes!
node = NameNode(self.pos, name='', type=self.coercion_type)
node.coerce_to(dst_type, env)
if dst_type.is_memoryviewslice:
from . import MemoryView
if not src.type.is_memoryviewslice:
if src.type.is_pyobject:
src = CoerceToMemViewSliceNode(src, dst_type, env)
elif src.type.is_array:
src = CythonArrayNode.from_carray(src, env).coerce_to(dst_type, env)
elif not src_type.is_error:
error(self.pos,
"Cannot convert '%s' to memoryviewslice" % (src_type,))
elif not src.type.conforms_to(dst_type, broadcast=self.is_memview_broadcast,
copying=self.is_memview_copy_assignment):
if src.type.dtype.same_as(dst_type.dtype):
msg = "Memoryview '%s' not conformable to memoryview '%s'."
tup = src.type, dst_type
else:
msg = "Different base types for memoryviews (%s, %s)"
tup = src.type.dtype, dst_type.dtype
error(self.pos, msg % tup)
elif dst_type.is_pyobject:
if not src.type.is_pyobject:
if dst_type is bytes_type and src.type.is_int:
src = CoerceIntToBytesNode(src, env)
else:
src = CoerceToPyTypeNode(src, env, type=dst_type)
if not src.type.subtype_of(dst_type):
if src.constant_result is not None:
src = PyTypeTestNode(src, dst_type, env)
elif src.type.is_pyobject:
if used_as_reference and dst_type.is_cpp_class:
warning(
self.pos,
"Cannot pass Python object as C++ data structure reference (%s &), will pass by copy." % dst_type)
src = CoerceFromPyTypeNode(dst_type, src, env)
elif (dst_type.is_complex
and src_type != dst_type
and dst_type.assignable_from(src_type)):
src = CoerceToComplexNode(src, dst_type, env)
else: # neither src nor dst are py types
# Added the string comparison, since for c types that
# is enough, but Cython gets confused when the types are
# in different pxi files.
if not (str(src.type) == str(dst_type) or dst_type.assignable_from(src_type)):
self.fail_assignment(dst_type)
return src
def fail_assignment(self, dst_type):
error(self.pos, "Cannot assign type '%s' to '%s'" % (self.type, dst_type))
def check_for_coercion_error(self, dst_type, env, fail=False, default=None):
if fail and not default:
default = "Cannot assign type '%(FROM)s' to '%(TO)s'"
message = find_coercion_error((self.type, dst_type), default, env)
if message is not None:
error(self.pos, message % {'FROM': self.type, 'TO': dst_type})
return True
if fail:
self.fail_assignment(dst_type)
return True
return False
def coerce_to_pyobject(self, env):
return self.coerce_to(PyrexTypes.py_object_type, env)
def coerce_to_boolean(self, env):
# Coerce result to something acceptable as
# a boolean value.
# if it's constant, calculate the result now
if self.has_constant_result():
bool_value = bool(self.constant_result)
return BoolNode(self.pos, value=bool_value,
constant_result=bool_value)
type = self.type
if type.is_enum or type.is_error:
return self
elif type.is_pyobject or type.is_int or type.is_ptr or type.is_float:
return CoerceToBooleanNode(self, env)
elif type.is_cpp_class:
return SimpleCallNode(
self.pos,
function=AttributeNode(
self.pos, obj=self, attribute='operator bool'),
args=[]).analyse_types(env)
elif type.is_ctuple:
bool_value = len(type.components) == 0
return BoolNode(self.pos, value=bool_value,
constant_result=bool_value)
else:
error(self.pos, "Type '%s' not acceptable as a boolean" % type)
return self
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.type.is_int:
return self
else:
return self.coerce_to(PyrexTypes.c_long_type, env)
def coerce_to_temp(self, env):
# Ensure that the result is in a temporary.
if self.result_in_temp():
return self
else:
return CoerceToTempNode(self, env)
def coerce_to_simple(self, env):
# Ensure that the result is simple (see is_simple).
if self.is_simple():
return self
else:
return self.coerce_to_temp(env)
def is_simple(self):
# A node is simple if its result is something that can
# be referred to without performing any operations, e.g.
# a constant, local var, C global var, struct member
# reference, or temporary.
return self.result_in_temp()
def may_be_none(self):
if self.type and not (self.type.is_pyobject or
self.type.is_memoryviewslice):
return False
if self.has_constant_result():
return self.constant_result is not None
return True
def as_cython_attribute(self):
return None
def as_none_safe_node(self, message, error="PyExc_TypeError", format_args=()):
# Wraps the node in a NoneCheckNode if it is not known to be
# not-None (e.g. because it is a Python literal).
if self.may_be_none():
return NoneCheckNode(self, error, message, format_args)
else:
return self
@classmethod
def from_node(cls, node, **kwargs):
"""Instantiate this node class from another node, properly
copying over all attributes that one would forget otherwise.
"""
attributes = "cf_state cf_maybe_null cf_is_null constant_result".split()
for attr_name in attributes:
if attr_name in kwargs:
continue
try:
value = getattr(node, attr_name)
except AttributeError:
pass
else:
kwargs[attr_name] = value
return cls(node.pos, **kwargs)
class AtomicExprNode(ExprNode):
# Abstract base class for expression nodes which have
# no sub-expressions.
subexprs = []
# Override to optimize -- we know we have no children
def generate_subexpr_evaluation_code(self, code):
pass
def generate_subexpr_disposal_code(self, code):
pass
class PyConstNode(AtomicExprNode):
# Abstract base class for constant Python values.
is_literal = 1
type = py_object_type
def is_simple(self):
return 1
def may_be_none(self):
return False
def analyse_types(self, env):
return self
def calculate_result_code(self):
return self.value
def generate_result_code(self, code):
pass
class NoneNode(PyConstNode):
# The constant value None
is_none = 1
value = "Py_None"
constant_result = None
nogil_check = None
def compile_time_value(self, denv):
return None
def may_be_none(self):
return True
class EllipsisNode(PyConstNode):
# '...' in a subscript list.
value = "Py_Ellipsis"
constant_result = Ellipsis
def compile_time_value(self, denv):
return Ellipsis
class ConstNode(AtomicExprNode):
# Abstract base type for literal constant nodes.
#
# value string C code fragment
is_literal = 1
nogil_check = None
def is_simple(self):
return 1
def nonlocally_immutable(self):
return 1
def may_be_none(self):
return False
def analyse_types(self, env):
return self # Types are held in class variables
def check_const(self):
return True
def get_constant_c_result_code(self):
return self.calculate_result_code()
def calculate_result_code(self):
return str(self.value)
def generate_result_code(self, code):
pass
class BoolNode(ConstNode):
type = PyrexTypes.c_bint_type
# The constant value True or False
def calculate_constant_result(self):
self.constant_result = self.value
def compile_time_value(self, denv):
return self.value
def calculate_result_code(self):
if self.type.is_pyobject:
return self.value and 'Py_True' or 'Py_False'
else:
return str(int(self.value))
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject and self.type.is_int:
return BoolNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=Builtin.bool_type)
if dst_type.is_int and self.type.is_pyobject:
return BoolNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=PyrexTypes.c_bint_type)
return ConstNode.coerce_to(self, dst_type, env)
class NullNode(ConstNode):
type = PyrexTypes.c_null_ptr_type
value = "NULL"
constant_result = 0
def get_constant_c_result_code(self):
return self.value
class CharNode(ConstNode):
type = PyrexTypes.c_char_type
def calculate_constant_result(self):
self.constant_result = ord(self.value)
def compile_time_value(self, denv):
return ord(self.value)
def calculate_result_code(self):
return "'%s'" % StringEncoding.escape_char(self.value)
class IntNode(ConstNode):
# unsigned "" or "U"
# longness "" or "L" or "LL"
# is_c_literal True/False/None creator considers this a C integer literal
unsigned = ""
longness = ""
is_c_literal = None # unknown
def __init__(self, pos, **kwds):
ExprNode.__init__(self, pos, **kwds)
if 'type' not in kwds:
self.type = self.find_suitable_type_for_value()
def find_suitable_type_for_value(self):
if self.constant_result is constant_value_not_set:
try:
self.calculate_constant_result()
except ValueError:
pass
# we ignore 'is_c_literal = True' and instead map signed 32bit
# integers as C long values
if self.is_c_literal or \
self.constant_result in (constant_value_not_set, not_a_constant) or \
self.unsigned or self.longness == 'LL':
# clearly a C literal
rank = (self.longness == 'LL') and 2 or 1
suitable_type = PyrexTypes.modifiers_and_name_to_type[not self.unsigned, rank, "int"]
if self.type:
suitable_type = PyrexTypes.widest_numeric_type(suitable_type, self.type)
else:
# C literal or Python literal - split at 32bit boundary
if -2**31 <= self.constant_result < 2**31:
if self.type and self.type.is_int:
suitable_type = self.type
else:
suitable_type = PyrexTypes.c_long_type
else:
suitable_type = PyrexTypes.py_object_type
return suitable_type
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
elif dst_type.is_float:
if self.has_constant_result():
return FloatNode(self.pos, value='%d.0' % int(self.constant_result), type=dst_type,
constant_result=float(self.constant_result))
else:
return FloatNode(self.pos, value=self.value, type=dst_type,
constant_result=not_a_constant)
if dst_type.is_numeric and not dst_type.is_complex:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type=dst_type, is_c_literal=True,
unsigned=self.unsigned, longness=self.longness)
return node
elif dst_type.is_pyobject:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type=PyrexTypes.py_object_type, is_c_literal=False,
unsigned=self.unsigned, longness=self.longness)
else:
# FIXME: not setting the type here to keep it working with
# complex numbers. Should they be special cased?
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
unsigned=self.unsigned, longness=self.longness)
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def coerce_to_boolean(self, env):
return IntNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=PyrexTypes.c_bint_type,
unsigned=self.unsigned, longness=self.longness)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
# pre-allocate a Python version of the number
plain_integer_string = str(Utils.str_to_number(self.value))
self.result_code = code.get_py_int(plain_integer_string, self.longness)
else:
self.result_code = self.get_constant_c_result_code()
def get_constant_c_result_code(self):
return self.value_as_c_integer_string() + self.unsigned + self.longness
def value_as_c_integer_string(self):
value = self.value
if len(value) > 2:
if value[0] == '0':
literal_type = value[1] # 0'o' - 0'b' - 0'x'
# 0x123 hex literals and 0123 octal literals work nicely in C
# but convert C-incompatible Py3 oct/bin notations
if literal_type in 'oO':
value = '0' + value[2:] # '0o123' => '0123'
elif literal_type in 'bB':
value = int(value[2:], 2)
elif value.isdigit() and not self.unsigned and not self.longness:
# C compilers do not consider unsigned types for decimal literals, but they do for hex
value = '0x%X' % int(value)
return str(value)
def calculate_result_code(self):
return self.result_code
def calculate_constant_result(self):
self.constant_result = Utils.str_to_number(self.value)
def compile_time_value(self, denv):
return Utils.str_to_number(self.value)
class FloatNode(ConstNode):
type = PyrexTypes.c_double_type
def calculate_constant_result(self):
self.constant_result = float(self.value)
def compile_time_value(self, denv):
return float(self.value)
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject and self.type.is_float:
return FloatNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=Builtin.float_type)
if dst_type.is_float and self.type.is_pyobject:
return FloatNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=dst_type)
return ConstNode.coerce_to(self, dst_type, env)
def calculate_result_code(self):
return self.result_code
def get_constant_c_result_code(self):
strval = self.value
assert isinstance(strval, basestring)
cmpval = repr(float(strval))
if cmpval == 'nan':
return "(Py_HUGE_VAL * 0)"
elif cmpval == 'inf':
return "Py_HUGE_VAL"
elif cmpval == '-inf':
return "(-Py_HUGE_VAL)"
else:
return strval
def generate_evaluation_code(self, code):
c_value = self.get_constant_c_result_code()
if self.type.is_pyobject:
self.result_code = code.get_py_float(self.value, c_value)
else:
self.result_code = c_value
def _analyse_name_as_type(name, pos, env):
type = PyrexTypes.parse_basic_type(name)
if type is not None:
return type
hold_errors()
from .TreeFragment import TreeFragment
pos = (pos[0], pos[1], pos[2]-7)
try:
declaration = TreeFragment(u"sizeof(%s)" % name, name=pos[0].filename, initial_pos=pos)
except CompileError:
sizeof_node = None
else:
sizeof_node = declaration.root.stats[0].expr
sizeof_node = sizeof_node.analyse_types(env)
release_errors(ignore=True)
if isinstance(sizeof_node, SizeofTypeNode):
return sizeof_node.arg_type
return None
class BytesNode(ConstNode):
# A char* or bytes literal
#
# value BytesLiteral
is_string_literal = True
# start off as Python 'bytes' to support len() in O(1)
type = bytes_type
def calculate_constant_result(self):
self.constant_result = self.value
def as_sliced_node(self, start, stop, step=None):
value = StringEncoding.bytes_literal(self.value[start:stop:step], self.value.encoding)
return BytesNode(self.pos, value=value, constant_result=value)
def compile_time_value(self, denv):
return self.value.byteencode()
def analyse_as_type(self, env):
return _analyse_name_as_type(self.value.decode('ISO8859-1'), self.pos, env)
def can_coerce_to_char_literal(self):
return len(self.value) == 1
def coerce_to_boolean(self, env):
# This is special because testing a C char* for truth directly
# would yield the wrong result.
bool_value = bool(self.value)
return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
def coerce_to(self, dst_type, env):
if self.type == dst_type:
return self
if dst_type.is_int:
if not self.can_coerce_to_char_literal():
error(self.pos, "Only single-character string literals can be coerced into ints.")
return self
if dst_type.is_unicode_char:
error(self.pos, "Bytes literals cannot coerce to Py_UNICODE/Py_UCS4, use a unicode literal instead.")
return self
return CharNode(self.pos, value=self.value,
constant_result=ord(self.value))
node = BytesNode(self.pos, value=self.value, constant_result=self.constant_result)
if dst_type.is_pyobject:
if dst_type in (py_object_type, Builtin.bytes_type):
node.type = Builtin.bytes_type
else:
self.check_for_coercion_error(dst_type, env, fail=True)
return node
elif dst_type in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type):
node.type = dst_type
return node
elif dst_type in (PyrexTypes.c_uchar_ptr_type, PyrexTypes.c_const_uchar_ptr_type, PyrexTypes.c_void_ptr_type):
node.type = (PyrexTypes.c_const_char_ptr_type if dst_type == PyrexTypes.c_const_uchar_ptr_type
else PyrexTypes.c_char_ptr_type)
return CastNode(node, dst_type)
elif dst_type.assignable_from(PyrexTypes.c_char_ptr_type):
node.type = dst_type
return node
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
result = code.get_py_string_const(self.value)
elif self.type.is_const:
result = code.get_string_const(self.value)
else:
# not const => use plain C string literal and cast to mutable type
literal = self.value.as_c_string_literal()
# C++ may require a cast
result = typecast(self.type, PyrexTypes.c_void_ptr_type, literal)
self.result_code = result
def get_constant_c_result_code(self):
return None # FIXME
def calculate_result_code(self):
return self.result_code
class UnicodeNode(ConstNode):
# A Py_UNICODE* or unicode literal
#
# value EncodedString
# bytes_value BytesLiteral the literal parsed as bytes string
# ('-3' unicode literals only)
is_string_literal = True
bytes_value = None
type = unicode_type
def calculate_constant_result(self):
self.constant_result = self.value
def analyse_as_type(self, env):
return _analyse_name_as_type(self.value, self.pos, env)
def as_sliced_node(self, start, stop, step=None):
if StringEncoding.string_contains_surrogates(self.value[:stop]):
# this is unsafe as it may give different results
# in different runtimes
return None
value = StringEncoding.EncodedString(self.value[start:stop:step])
value.encoding = self.value.encoding
if self.bytes_value is not None:
bytes_value = StringEncoding.bytes_literal(
self.bytes_value[start:stop:step], self.bytes_value.encoding)
else:
bytes_value = None
return UnicodeNode(
self.pos, value=value, bytes_value=bytes_value,
constant_result=value)
def coerce_to(self, dst_type, env):
if dst_type is self.type:
pass
elif dst_type.is_unicode_char:
if not self.can_coerce_to_char_literal():
error(self.pos,
"Only single-character Unicode string literals or "
"surrogate pairs can be coerced into Py_UCS4/Py_UNICODE.")
return self
int_value = ord(self.value)
return IntNode(self.pos, type=dst_type, value=str(int_value),
constant_result=int_value)
elif not dst_type.is_pyobject:
if dst_type.is_string and self.bytes_value is not None:
# special case: '-3' enforced unicode literal used in a
# C char* context
return BytesNode(self.pos, value=self.bytes_value
).coerce_to(dst_type, env)
if dst_type.is_pyunicode_ptr:
node = UnicodeNode(self.pos, value=self.value)
node.type = dst_type
return node
error(self.pos,
"Unicode literals do not support coercion to C types other "
"than Py_UNICODE/Py_UCS4 (for characters) or Py_UNICODE* "
"(for strings).")
elif dst_type not in (py_object_type, Builtin.basestring_type):
self.check_for_coercion_error(dst_type, env, fail=True)
return self
def can_coerce_to_char_literal(self):
return len(self.value) == 1
## or (len(self.value) == 2
## and (0xD800 <= self.value[0] <= 0xDBFF)
## and (0xDC00 <= self.value[1] <= 0xDFFF))
def coerce_to_boolean(self, env):
bool_value = bool(self.value)
return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
def contains_surrogates(self):
return StringEncoding.string_contains_surrogates(self.value)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
if self.contains_surrogates():
# surrogates are not really portable and cannot be
# decoded by the UTF-8 codec in Py3.3
self.result_code = code.get_py_const(py_object_type, 'ustring')
data_cname = code.get_pyunicode_ptr_const(self.value)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
code.putln(
"%s = PyUnicode_FromUnicode(%s, (sizeof(%s) / sizeof(Py_UNICODE))-1); %s" % (
self.result_code,
data_cname,
data_cname,
code.error_goto_if_null(self.result_code, self.pos)))
code.put_error_if_neg(
self.pos, "__Pyx_PyUnicode_READY(%s)" % self.result_code)
else:
self.result_code = code.get_py_string_const(self.value)
else:
self.result_code = code.get_pyunicode_ptr_const(self.value)
def calculate_result_code(self):
return self.result_code
def compile_time_value(self, env):
return self.value
class StringNode(PyConstNode):
# A Python str object, i.e. a byte string in Python 2.x and a
# unicode string in Python 3.x
#
# value BytesLiteral (or EncodedString with ASCII content)
# unicode_value EncodedString or None
# is_identifier boolean
type = str_type
is_string_literal = True
is_identifier = None
unicode_value = None
def calculate_constant_result(self):
if self.unicode_value is not None:
# only the Unicode value is portable across Py2/3
self.constant_result = self.unicode_value
def analyse_as_type(self, env):
return _analyse_name_as_type(self.unicode_value or self.value.decode('ISO8859-1'), self.pos, env)
def as_sliced_node(self, start, stop, step=None):
value = type(self.value)(self.value[start:stop:step])
value.encoding = self.value.encoding
if self.unicode_value is not None:
if StringEncoding.string_contains_surrogates(self.unicode_value[:stop]):
# this is unsafe as it may give different results in different runtimes
return None
unicode_value = StringEncoding.EncodedString(
self.unicode_value[start:stop:step])
else:
unicode_value = None
return StringNode(
self.pos, value=value, unicode_value=unicode_value,
constant_result=value, is_identifier=self.is_identifier)
def coerce_to(self, dst_type, env):
if dst_type is not py_object_type and not str_type.subtype_of(dst_type):
# if dst_type is Builtin.bytes_type:
# # special case: bytes = 'str literal'
# return BytesNode(self.pos, value=self.value)
if not dst_type.is_pyobject:
return BytesNode(self.pos, value=self.value).coerce_to(dst_type, env)
if dst_type is not Builtin.basestring_type:
self.check_for_coercion_error(dst_type, env, fail=True)
return self
def can_coerce_to_char_literal(self):
return not self.is_identifier and len(self.value) == 1
def generate_evaluation_code(self, code):
self.result_code = code.get_py_string_const(
self.value, identifier=self.is_identifier, is_str=True,
unicode_value=self.unicode_value)
def get_constant_c_result_code(self):
return None
def calculate_result_code(self):
return self.result_code
def compile_time_value(self, env):
if self.value.is_unicode:
return self.value
if not IS_PYTHON3:
# use plain str/bytes object in Py2
return self.value.byteencode()
# in Py3, always return a Unicode string
if self.unicode_value is not None:
return self.unicode_value
return self.value.decode('iso8859-1')
class IdentifierStringNode(StringNode):
# A special str value that represents an identifier (bytes in Py2,
# unicode in Py3).
is_identifier = True
class ImagNode(AtomicExprNode):
# Imaginary number literal
#
# value float imaginary part
type = PyrexTypes.c_double_complex_type
def calculate_constant_result(self):
self.constant_result = complex(0.0, self.value)
def compile_time_value(self, denv):
return complex(0.0, self.value)
def analyse_types(self, env):
self.type.create_declaration_utility_code(env)
return self
def may_be_none(self):
return False
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
node = ImagNode(self.pos, value=self.value)
if dst_type.is_pyobject:
node.is_temp = 1
node.type = PyrexTypes.py_object_type
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return AtomicExprNode.coerce_to(node, dst_type, env)
gil_message = "Constructing complex number"
def calculate_result_code(self):
if self.type.is_pyobject:
return self.result()
else:
return "%s(0, %r)" % (self.type.from_parts, float(self.value))
def generate_result_code(self, code):
if self.type.is_pyobject:
code.putln(
"%s = PyComplex_FromDoubles(0.0, %r); %s" % (
self.result(),
float(self.value),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class NewExprNode(AtomicExprNode):
# C++ new statement
#
# cppclass node c++ class to create
type = None
def infer_type(self, env):
type = self.cppclass.analyse_as_type(env)
if type is None or not type.is_cpp_class:
error(self.pos, "new operator can only be applied to a C++ class")
self.type = error_type
return
self.cpp_check(env)
constructor = type.scope.lookup(u'<init>')
if constructor is None:
func_type = PyrexTypes.CFuncType(type, [], exception_check='+')
type.scope.declare_cfunction(u'<init>', func_type, self.pos)
constructor = type.scope.lookup(u'<init>')
self.class_type = type
self.entry = constructor
self.type = constructor.type
return self.type
def analyse_types(self, env):
if self.type is None:
self.infer_type(env)
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
pass
def calculate_result_code(self):
return "new " + self.class_type.empty_declaration_code()
class NameNode(AtomicExprNode):
# Reference to a local or global variable name.
#
# name string Python name of the variable
# entry Entry Symbol table entry
# type_entry Entry For extension type names, the original type entry
# cf_is_null boolean Is uninitialized before this node
# cf_maybe_null boolean Maybe uninitialized before this node
# allow_null boolean Don't raise UnboundLocalError
# nogil boolean Whether it is used in a nogil context
is_name = True
is_cython_module = False
cython_attribute = None
lhs_of_first_assignment = False # TODO: remove me
is_used_as_rvalue = 0
entry = None
type_entry = None
cf_maybe_null = True
cf_is_null = False
allow_null = False
nogil = False
inferred_type = None
def as_cython_attribute(self):
return self.cython_attribute
def type_dependencies(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
if self.entry is not None and self.entry.type.is_unspecified:
return (self,)
else:
return ()
def infer_type(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
if self.entry is None or self.entry.type is unspecified_type:
if self.inferred_type is not None:
return self.inferred_type
return py_object_type
elif (self.entry.type.is_extension_type or self.entry.type.is_builtin_type) and \
self.name == self.entry.type.name:
# Unfortunately the type attribute of type objects
# is used for the pointer to the type they represent.
return type_type
elif self.entry.type.is_cfunction:
if self.entry.scope.is_builtin_scope:
# special case: optimised builtin functions must be treated as Python objects
return py_object_type
else:
# special case: referring to a C function must return its pointer
return PyrexTypes.CPtrType(self.entry.type)
else:
# If entry is inferred as pyobject it's safe to use local
# NameNode's inferred_type.
if self.entry.type.is_pyobject and self.inferred_type:
# Overflow may happen if integer
if not (self.inferred_type.is_int and self.entry.might_overflow):
return self.inferred_type
return self.entry.type
def compile_time_value(self, denv):
try:
return denv.lookup(self.name)
except KeyError:
error(self.pos, "Compile-time name '%s' not defined" % self.name)
def get_constant_c_result_code(self):
if not self.entry or self.entry.type.is_pyobject:
return None
return self.entry.cname
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a builtin
# C function with a Python equivalent, manufacture a NameNode
# referring to the Python builtin.
#print "NameNode.coerce_to:", self.name, dst_type ###
if dst_type is py_object_type:
entry = self.entry
if entry and entry.is_cfunction:
var_entry = entry.as_variable
if var_entry:
if var_entry.is_builtin and var_entry.is_const:
var_entry = env.declare_builtin(var_entry.name, self.pos)
node = NameNode(self.pos, name = self.name)
node.entry = var_entry
node.analyse_rvalue_entry(env)
return node
return super(NameNode, self).coerce_to(dst_type, env)
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module.
# Returns the module scope, or None.
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.as_module:
return entry.as_module
return None
def analyse_as_type(self, env):
if self.cython_attribute:
type = PyrexTypes.parse_basic_type(self.cython_attribute)
else:
type = PyrexTypes.parse_basic_type(self.name)
if type:
return type
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.is_type:
return entry.type
else:
return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type.
# Returns the extension type, or None.
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.is_type:
if entry.type.is_extension_type or entry.type.is_builtin_type:
return entry.type
return None
def analyse_target_declaration(self, env):
if not self.entry:
self.entry = env.lookup_here(self.name)
if not self.entry:
if env.directives['warn.undeclared']:
warning(self.pos, "implicit declaration of '%s'" % self.name, 1)
if env.directives['infer_types'] != False:
type = unspecified_type
else:
type = py_object_type
self.entry = env.declare_var(self.name, type, self.pos)
if self.entry.is_declared_generic:
self.result_ctype = py_object_type
def analyse_types(self, env):
self.initialized_check = env.directives['initializedcheck']
if self.entry is None:
self.entry = env.lookup(self.name)
if not self.entry:
self.entry = env.declare_builtin(self.name, self.pos)
if not self.entry:
self.type = PyrexTypes.error_type
return self
entry = self.entry
if entry:
entry.used = 1
if entry.type.is_buffer:
from . import Buffer
Buffer.used_buffer_aux_vars(entry)
self.analyse_rvalue_entry(env)
return self
def analyse_target_types(self, env):
self.analyse_entry(env, is_target=True)
if self.entry.is_cfunction and self.entry.as_variable:
if self.entry.is_overridable or not self.is_lvalue() and self.entry.fused_cfunction:
# We need this for assigning to cpdef names and for the fused 'def' TreeFragment
self.entry = self.entry.as_variable
self.type = self.entry.type
if self.type.is_const:
error(self.pos, "Assignment to const '%s'" % self.name)
if self.type.is_reference:
error(self.pos, "Assignment to reference '%s'" % self.name)
if not self.is_lvalue():
error(self.pos, "Assignment to non-lvalue '%s'" % self.name)
self.type = PyrexTypes.error_type
self.entry.used = 1
if self.entry.type.is_buffer:
from . import Buffer
Buffer.used_buffer_aux_vars(self.entry)
return self
def analyse_rvalue_entry(self, env):
#print "NameNode.analyse_rvalue_entry:", self.name ###
#print "Entry:", self.entry.__dict__ ###
self.analyse_entry(env)
entry = self.entry
if entry.is_declared_generic:
self.result_ctype = py_object_type
if entry.is_pyglobal or entry.is_builtin:
if entry.is_builtin and entry.is_const:
self.is_temp = 0
else:
self.is_temp = 1
self.is_used_as_rvalue = 1
elif entry.type.is_memoryviewslice:
self.is_temp = False
self.is_used_as_rvalue = True
self.use_managed_ref = True
return self
def nogil_check(self, env):
self.nogil = True
if self.is_used_as_rvalue:
entry = self.entry
if entry.is_builtin:
if not entry.is_const: # cached builtins are ok
self.gil_error()
elif entry.is_pyglobal:
self.gil_error()
gil_message = "Accessing Python global or builtin"
def analyse_entry(self, env, is_target=False):
#print "NameNode.analyse_entry:", self.name ###
self.check_identifier_kind()
entry = self.entry
type = entry.type
if (not is_target and type.is_pyobject and self.inferred_type and
self.inferred_type.is_builtin_type):
# assume that type inference is smarter than the static entry
type = self.inferred_type
self.type = type
def check_identifier_kind(self):
# Check that this is an appropriate kind of name for use in an
# expression. Also finds the variable entry associated with
# an extension type.
entry = self.entry
if entry.is_type and entry.type.is_extension_type:
self.type_entry = entry
if entry.is_type and entry.type.is_enum:
py_entry = Symtab.Entry(self.name, None, py_object_type)
py_entry.is_pyglobal = True
py_entry.scope = self.entry.scope
self.entry = py_entry
elif not (entry.is_const or entry.is_variable
or entry.is_builtin or entry.is_cfunction
or entry.is_cpp_class):
if self.entry.as_variable:
self.entry = self.entry.as_variable
else:
error(self.pos,
"'%s' is not a constant, variable or function identifier" % self.name)
def is_simple(self):
# If it's not a C variable, it'll be in a temp.
return 1
def may_be_none(self):
if self.cf_state and self.type and (self.type.is_pyobject or
self.type.is_memoryviewslice):
# gard against infinite recursion on self-dependencies
if getattr(self, '_none_checking', False):
# self-dependency - either this node receives a None
# value from *another* node, or it can not reference
# None at this point => safe to assume "not None"
return False
self._none_checking = True
# evaluate control flow state to see if there were any
# potential None values assigned to the node so far
may_be_none = False
for assignment in self.cf_state:
if assignment.rhs.may_be_none():
may_be_none = True
break
del self._none_checking
return may_be_none
return super(NameNode, self).may_be_none()
def nonlocally_immutable(self):
if ExprNode.nonlocally_immutable(self):
return True
entry = self.entry
if not entry or entry.in_closure:
return False
return entry.is_local or entry.is_arg or entry.is_builtin or entry.is_readonly
def calculate_target_results(self, env):
pass
def check_const(self):
entry = self.entry
if entry is not None and not (entry.is_const or entry.is_cfunction or entry.is_builtin):
self.not_const()
return False
return True
def check_const_addr(self):
entry = self.entry
if not (entry.is_cglobal or entry.is_cfunction or entry.is_builtin):
self.addr_not_const()
return False
return True
def is_lvalue(self):
return (
self.entry.is_variable and
not self.entry.is_readonly
) or (
self.entry.is_cfunction and
self.entry.is_overridable
)
def is_addressable(self):
return self.entry.is_variable and not self.type.is_memoryviewslice
def is_ephemeral(self):
# Name nodes are never ephemeral, even if the
# result is in a temporary.
return 0
def calculate_result_code(self):
entry = self.entry
if not entry:
return "<error>" # There was an error earlier
return entry.cname
def generate_result_code(self, code):
assert hasattr(self, 'entry')
entry = self.entry
if entry is None:
return # There was an error earlier
if entry.is_builtin and entry.is_const:
return # Lookup already cached
elif entry.is_pyclass_attr:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
if entry.is_builtin:
namespace = Naming.builtins_cname
else: # entry.is_pyglobal
namespace = entry.scope.namespace_cname
if not self.cf_is_null:
code.putln(
'%s = PyObject_GetItem(%s, %s);' % (
self.result(),
namespace,
interned_cname))
code.putln('if (unlikely(!%s)) {' % self.result())
code.putln('PyErr_Clear();')
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetModuleGlobalName(%s);' % (
self.result(),
interned_cname))
if not self.cf_is_null:
code.putln("}")
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.py_result())
elif entry.is_builtin and not entry.scope.is_module_scope:
# known builtin
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetBuiltinName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetBuiltinName(%s); %s' % (
self.result(),
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif entry.is_pyglobal or (entry.is_builtin and entry.scope.is_module_scope):
# name in class body, global name or unknown builtin
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
if entry.scope.is_module_scope:
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetModuleGlobalName(%s); %s' % (
self.result(),
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
else:
# FIXME: is_pyglobal is also used for class namespace
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetNameInClass(%s, %s); %s' % (
self.result(),
entry.scope.namespace_cname,
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif entry.is_local or entry.in_closure or entry.from_closure or entry.type.is_memoryviewslice:
# Raise UnboundLocalError for objects and memoryviewslices
raise_unbound = (
(self.cf_maybe_null or self.cf_is_null) and not self.allow_null)
null_code = entry.type.check_for_null_code(entry.cname)
memslice_check = entry.type.is_memoryviewslice and self.initialized_check
if null_code and raise_unbound and (entry.type.is_pyobject or memslice_check):
code.put_error_if_unbound(self.pos, entry, self.in_nogil_context)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
#print "NameNode.generate_assignment_code:", self.name ###
entry = self.entry
if entry is None:
return # There was an error earlier
if (self.entry.type.is_ptr and isinstance(rhs, ListNode)
and not self.lhs_of_first_assignment and not rhs.in_module_scope):
error(self.pos, "Literal list must be assigned to pointer at time of declaration")
# is_pyglobal seems to be True for module level-globals only.
# We use this to access class->tp_dict if necessary.
if entry.is_pyglobal:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
namespace = self.entry.scope.namespace_cname
if entry.is_member:
# if the entry is a member we have to cheat: SetAttr does not work
# on types, so we create a descriptor which is then added to tp_dict
setter = 'PyDict_SetItem'
namespace = '%s->tp_dict' % namespace
elif entry.scope.is_module_scope:
setter = 'PyDict_SetItem'
namespace = Naming.moddict_cname
elif entry.is_pyclass_attr:
setter = 'PyObject_SetItem'
else:
assert False, repr(entry)
code.put_error_if_neg(
self.pos,
'%s(%s, %s, %s)' % (
setter,
namespace,
interned_cname,
rhs.py_result()))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating disposal code for %s" % rhs)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
if entry.is_member:
# in Py2.6+, we need to invalidate the method cache
code.putln("PyType_Modified(%s);" %
entry.scope.parent_type.typeptr_cname)
else:
if self.type.is_memoryviewslice:
self.generate_acquire_memoryviewslice(rhs, code)
elif self.type.is_buffer:
# Generate code for doing the buffer release/acquisition.
# This might raise an exception in which case the assignment (done
# below) will not happen.
#
# The reason this is not in a typetest-like node is because the
# variables that the acquired buffer info is stored to is allocated
# per entry and coupled with it.
self.generate_acquire_buffer(rhs, code)
assigned = False
if self.type.is_pyobject:
#print "NameNode.generate_assignment_code: to", self.name ###
#print "...from", rhs ###
#print "...LHS type", self.type, "ctype", self.ctype() ###
#print "...RHS type", rhs.type, "ctype", rhs.ctype() ###
if self.use_managed_ref:
rhs.make_owned_reference(code)
is_external_ref = entry.is_cglobal or self.entry.in_closure or self.entry.from_closure
if is_external_ref:
if not self.cf_is_null:
if self.cf_maybe_null:
code.put_xgotref(self.py_result())
else:
code.put_gotref(self.py_result())
assigned = True
if entry.is_cglobal:
code.put_decref_set(
self.result(), rhs.result_as(self.ctype()))
else:
if not self.cf_is_null:
if self.cf_maybe_null:
code.put_xdecref_set(
self.result(), rhs.result_as(self.ctype()))
else:
code.put_decref_set(
self.result(), rhs.result_as(self.ctype()))
else:
assigned = False
if is_external_ref:
code.put_giveref(rhs.py_result())
if not self.type.is_memoryviewslice:
if not assigned:
if overloaded_assignment:
result = rhs.result()
if exception_check == '+':
translate_cpp_exception(code, self.pos, '%s = %s;' % (self.result(), result), exception_value, self.in_nogil_context)
else:
code.putln('%s = %s;' % (self.result(), result))
else:
result = rhs.result_as(self.ctype())
code.putln('%s = %s;' % (self.result(), result))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating post-assignment code for %s" % rhs)
rhs.generate_post_assignment_code(code)
elif rhs.result_in_temp():
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
def generate_acquire_memoryviewslice(self, rhs, code):
"""
Slices, coercions from objects, return values etc are new references.
We have a borrowed reference in case of dst = src
"""
from . import MemoryView
MemoryView.put_acquire_memoryviewslice(
lhs_cname=self.result(),
lhs_type=self.type,
lhs_pos=self.pos,
rhs=rhs,
code=code,
have_gil=not self.in_nogil_context,
first_assignment=self.cf_is_null)
def generate_acquire_buffer(self, rhs, code):
# rhstmp is only used in case the rhs is a complicated expression leading to
# the object, to avoid repeating the same C expression for every reference
# to the rhs. It does NOT hold a reference.
pretty_rhs = isinstance(rhs, NameNode) or rhs.is_temp
if pretty_rhs:
rhstmp = rhs.result_as(self.ctype())
else:
rhstmp = code.funcstate.allocate_temp(self.entry.type, manage_ref=False)
code.putln('%s = %s;' % (rhstmp, rhs.result_as(self.ctype())))
from . import Buffer
Buffer.put_assign_to_buffer(self.result(), rhstmp, self.entry,
is_initialized=not self.lhs_of_first_assignment,
pos=self.pos, code=code)
if not pretty_rhs:
code.putln("%s = 0;" % rhstmp)
code.funcstate.release_temp(rhstmp)
def generate_deletion_code(self, code, ignore_nonexisting=False):
if self.entry is None:
return # There was an error earlier
elif self.entry.is_pyclass_attr:
namespace = self.entry.scope.namespace_cname
interned_cname = code.intern_identifier(self.entry.name)
if ignore_nonexisting:
key_error_code = 'PyErr_Clear(); else'
else:
# minor hack: fake a NameError on KeyError
key_error_code = (
'{ PyErr_Clear(); PyErr_Format(PyExc_NameError, "name \'%%s\' is not defined", "%s"); }' %
self.entry.name)
code.putln(
'if (unlikely(PyObject_DelItem(%s, %s) < 0)) {'
' if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) %s'
' %s '
'}' % (namespace, interned_cname,
key_error_code,
code.error_goto(self.pos)))
elif self.entry.is_pyglobal:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
interned_cname = code.intern_identifier(self.entry.name)
del_code = '__Pyx_PyObject_DelAttrStr(%s, %s)' % (
Naming.module_cname, interned_cname)
if ignore_nonexisting:
code.putln('if (unlikely(%s < 0)) { if (likely(PyErr_ExceptionMatches(PyExc_AttributeError))) PyErr_Clear(); else %s }' % (
del_code,
code.error_goto(self.pos)))
else:
code.put_error_if_neg(self.pos, del_code)
elif self.entry.type.is_pyobject or self.entry.type.is_memoryviewslice:
if not self.cf_is_null:
if self.cf_maybe_null and not ignore_nonexisting:
code.put_error_if_unbound(self.pos, self.entry)
if self.entry.type.is_pyobject:
if self.entry.in_closure:
# generator
if ignore_nonexisting and self.cf_maybe_null:
code.put_xgotref(self.result())
else:
code.put_gotref(self.result())
if ignore_nonexisting and self.cf_maybe_null:
code.put_xdecref(self.result(), self.ctype())
else:
code.put_decref(self.result(), self.ctype())
code.putln('%s = NULL;' % self.result())
else:
code.put_xdecref_memoryviewslice(self.entry.cname,
have_gil=not self.nogil)
else:
error(self.pos, "Deletion of C names not supported")
def annotate(self, code):
if hasattr(self, 'is_called') and self.is_called:
pos = (self.pos[0], self.pos[1], self.pos[2] - len(self.name) - 1)
if self.type.is_pyobject:
style, text = 'py_call', 'python function (%s)'
else:
style, text = 'c_call', 'c function (%s)'
code.annotate(pos, AnnotationItem(style, text % self.type, size=len(self.name)))
class BackquoteNode(ExprNode):
# `expr`
#
# arg ExprNode
type = py_object_type
subexprs = ['arg']
def analyse_types(self, env):
self.arg = self.arg.analyse_types(env)
self.arg = self.arg.coerce_to_pyobject(env)
self.is_temp = 1
return self
gil_message = "Backquote expression"
def calculate_constant_result(self):
self.constant_result = repr(self.arg.constant_result)
def generate_result_code(self, code):
code.putln(
"%s = PyObject_Repr(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class ImportNode(ExprNode):
# Used as part of import statement implementation.
# Implements result =
# __import__(module_name, globals(), None, name_list, level)
#
# module_name StringNode dotted name of module. Empty module
# name means importing the parent package according
# to level
# name_list ListNode or None list of names to be imported
# level int relative import level:
# -1: attempt both relative import and absolute import;
# 0: absolute import;
# >0: the number of parent directories to search
# relative to the current module.
# None: decide the level according to language level and
# directives
type = py_object_type
subexprs = ['module_name', 'name_list']
def analyse_types(self, env):
if self.level is None:
if (env.directives['py2_import'] or
Future.absolute_import not in env.global_scope().context.future_directives):
self.level = -1
else:
self.level = 0
module_name = self.module_name.analyse_types(env)
self.module_name = module_name.coerce_to_pyobject(env)
if self.name_list:
name_list = self.name_list.analyse_types(env)
self.name_list = name_list.coerce_to_pyobject(env)
self.is_temp = 1
return self
gil_message = "Python import"
def generate_result_code(self, code):
if self.name_list:
name_list_code = self.name_list.py_result()
else:
name_list_code = "0"
code.globalstate.use_utility_code(UtilityCode.load_cached("Import", "ImportExport.c"))
import_code = "__Pyx_Import(%s, %s, %d)" % (
self.module_name.py_result(),
name_list_code,
self.level)
if (self.level <= 0 and
self.module_name.is_string_literal and
self.module_name.value in utility_code_for_imports):
helper_func, code_name, code_file = utility_code_for_imports[self.module_name.value]
code.globalstate.use_utility_code(UtilityCode.load_cached(code_name, code_file))
import_code = '%s(%s)' % (helper_func, import_code)
code.putln("%s = %s; %s" % (
self.result(),
import_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class IteratorNode(ExprNode):
# Used as part of for statement implementation.
#
# Implements result = iter(sequence)
#
# sequence ExprNode
type = py_object_type
iter_func_ptr = None
counter_cname = None
cpp_iterator_cname = None
reversed = False # currently only used for list/tuple types (see Optimize.py)
is_async = False
subexprs = ['sequence']
def analyse_types(self, env):
self.sequence = self.sequence.analyse_types(env)
if (self.sequence.type.is_array or self.sequence.type.is_ptr) and \
not self.sequence.type.is_string:
# C array iteration will be transformed later on
self.type = self.sequence.type
elif self.sequence.type.is_cpp_class:
self.analyse_cpp_types(env)
else:
self.sequence = self.sequence.coerce_to_pyobject(env)
if self.sequence.type in (list_type, tuple_type):
self.sequence = self.sequence.as_none_safe_node("'NoneType' object is not iterable")
self.is_temp = 1
return self
gil_message = "Iterating over Python object"
_func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
]))
def type_dependencies(self, env):
return self.sequence.type_dependencies(env)
def infer_type(self, env):
sequence_type = self.sequence.infer_type(env)
if sequence_type.is_array or sequence_type.is_ptr:
return sequence_type
elif sequence_type.is_cpp_class:
begin = sequence_type.scope.lookup("begin")
if begin is not None:
return begin.type.return_type
elif sequence_type.is_pyobject:
return sequence_type
return py_object_type
def analyse_cpp_types(self, env):
sequence_type = self.sequence.type
if sequence_type.is_ptr:
sequence_type = sequence_type.base_type
begin = sequence_type.scope.lookup("begin")
end = sequence_type.scope.lookup("end")
if (begin is None
or not begin.type.is_cfunction
or begin.type.args):
error(self.pos, "missing begin() on %s" % self.sequence.type)
self.type = error_type
return
if (end is None
or not end.type.is_cfunction
or end.type.args):
error(self.pos, "missing end() on %s" % self.sequence.type)
self.type = error_type
return
iter_type = begin.type.return_type
if iter_type.is_cpp_class:
if env.lookup_operator_for_types(
self.pos,
"!=",
[iter_type, end.type.return_type]) is None:
error(self.pos, "missing operator!= on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
if env.lookup_operator_for_types(self.pos, '++', [iter_type]) is None:
error(self.pos, "missing operator++ on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
if env.lookup_operator_for_types(self.pos, '*', [iter_type]) is None:
error(self.pos, "missing operator* on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
self.type = iter_type
elif iter_type.is_ptr:
if not (iter_type == end.type.return_type):
error(self.pos, "incompatible types for begin() and end()")
self.type = iter_type
else:
error(self.pos, "result type of begin() on %s must be a C++ class or pointer" % self.sequence.type)
self.type = error_type
return
def generate_result_code(self, code):
sequence_type = self.sequence.type
if sequence_type.is_cpp_class:
if self.sequence.is_name:
# safe: C++ won't allow you to reassign to class references
begin_func = "%s.begin" % self.sequence.result()
else:
sequence_type = PyrexTypes.c_ptr_type(sequence_type)
self.cpp_iterator_cname = code.funcstate.allocate_temp(sequence_type, manage_ref=False)
code.putln("%s = &%s;" % (self.cpp_iterator_cname, self.sequence.result()))
begin_func = "%s->begin" % self.cpp_iterator_cname
# TODO: Limit scope.
code.putln("%s = %s();" % (self.result(), begin_func))
return
if sequence_type.is_array or sequence_type.is_ptr:
raise InternalError("for in carray slice not transformed")
is_builtin_sequence = sequence_type in (list_type, tuple_type)
if not is_builtin_sequence:
# reversed() not currently optimised (see Optimize.py)
assert not self.reversed, "internal error: reversed() only implemented for list/tuple objects"
self.may_be_a_sequence = not sequence_type.is_builtin_type
if self.may_be_a_sequence:
code.putln(
"if (likely(PyList_CheckExact(%s)) || PyTuple_CheckExact(%s)) {" % (
self.sequence.py_result(),
self.sequence.py_result()))
if is_builtin_sequence or self.may_be_a_sequence:
self.counter_cname = code.funcstate.allocate_temp(
PyrexTypes.c_py_ssize_t_type, manage_ref=False)
if self.reversed:
if sequence_type is list_type:
init_value = 'PyList_GET_SIZE(%s) - 1' % self.result()
else:
init_value = 'PyTuple_GET_SIZE(%s) - 1' % self.result()
else:
init_value = '0'
code.putln("%s = %s; __Pyx_INCREF(%s); %s = %s;" % (
self.result(),
self.sequence.py_result(),
self.result(),
self.counter_cname,
init_value))
if not is_builtin_sequence:
self.iter_func_ptr = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
if self.may_be_a_sequence:
code.putln("%s = NULL;" % self.iter_func_ptr)
code.putln("} else {")
code.put("%s = -1; " % self.counter_cname)
code.putln("%s = PyObject_GetIter(%s); %s" % (
self.result(),
self.sequence.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
# PyObject_GetIter() fails if "tp_iternext" is not set, but the check below
# makes it visible to the C compiler that the pointer really isn't NULL, so that
# it can distinguish between the special cases and the generic case
code.putln("%s = Py_TYPE(%s)->tp_iternext; %s" % (
self.iter_func_ptr, self.py_result(),
code.error_goto_if_null(self.iter_func_ptr, self.pos)))
if self.may_be_a_sequence:
code.putln("}")
def generate_next_sequence_item(self, test_name, result_name, code):
assert self.counter_cname, "internal error: counter_cname temp not prepared"
final_size = 'Py%s_GET_SIZE(%s)' % (test_name, self.py_result())
if self.sequence.is_sequence_constructor:
item_count = len(self.sequence.args)
if self.sequence.mult_factor is None:
final_size = item_count
elif isinstance(self.sequence.mult_factor.constant_result, _py_int_types):
final_size = item_count * self.sequence.mult_factor.constant_result
code.putln("if (%s >= %s) break;" % (self.counter_cname, final_size))
if self.reversed:
inc_dec = '--'
else:
inc_dec = '++'
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
code.putln(
"%s = Py%s_GET_ITEM(%s, %s); __Pyx_INCREF(%s); %s%s; %s" % (
result_name,
test_name,
self.py_result(),
self.counter_cname,
result_name,
self.counter_cname,
inc_dec,
# use the error label to avoid C compiler warnings if we only use it below
code.error_goto_if_neg('0', self.pos)
))
code.putln("#else")
code.putln(
"%s = PySequence_ITEM(%s, %s); %s%s; %s" % (
result_name,
self.py_result(),
self.counter_cname,
self.counter_cname,
inc_dec,
code.error_goto_if_null(result_name, self.pos)))
code.put_gotref(result_name)
code.putln("#endif")
def generate_iter_next_result_code(self, result_name, code):
sequence_type = self.sequence.type
if self.reversed:
code.putln("if (%s < 0) break;" % self.counter_cname)
if sequence_type.is_cpp_class:
if self.cpp_iterator_cname:
end_func = "%s->end" % self.cpp_iterator_cname
else:
end_func = "%s.end" % self.sequence.result()
# TODO: Cache end() call?
code.putln("if (!(%s != %s())) break;" % (
self.result(),
end_func))
code.putln("%s = *%s;" % (
result_name,
self.result()))
code.putln("++%s;" % self.result())
return
elif sequence_type is list_type:
self.generate_next_sequence_item('List', result_name, code)
return
elif sequence_type is tuple_type:
self.generate_next_sequence_item('Tuple', result_name, code)
return
if self.may_be_a_sequence:
code.putln("if (likely(!%s)) {" % self.iter_func_ptr)
code.putln("if (likely(PyList_CheckExact(%s))) {" % self.py_result())
self.generate_next_sequence_item('List', result_name, code)
code.putln("} else {")
self.generate_next_sequence_item('Tuple', result_name, code)
code.putln("}")
code.put("} else ")
code.putln("{")
code.putln(
"%s = %s(%s);" % (
result_name,
self.iter_func_ptr,
self.py_result()))
code.putln("if (unlikely(!%s)) {" % result_name)
code.putln("PyObject* exc_type = PyErr_Occurred();")
code.putln("if (exc_type) {")
code.putln("if (likely(exc_type == PyExc_StopIteration ||"
" PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();")
code.putln("else %s" % code.error_goto(self.pos))
code.putln("}")
code.putln("break;")
code.putln("}")
code.put_gotref(result_name)
code.putln("}")
def free_temps(self, code):
if self.counter_cname:
code.funcstate.release_temp(self.counter_cname)
if self.iter_func_ptr:
code.funcstate.release_temp(self.iter_func_ptr)
self.iter_func_ptr = None
if self.cpp_iterator_cname:
code.funcstate.release_temp(self.cpp_iterator_cname)
ExprNode.free_temps(self, code)
class NextNode(AtomicExprNode):
# Used as part of for statement implementation.
# Implements result = next(iterator)
# Created during analyse_types phase.
# The iterator is not owned by this node.
#
# iterator IteratorNode
def __init__(self, iterator):
AtomicExprNode.__init__(self, iterator.pos)
self.iterator = iterator
def nogil_check(self, env):
# ignore - errors (if any) are already handled by IteratorNode
pass
def type_dependencies(self, env):
return self.iterator.type_dependencies(env)
def infer_type(self, env, iterator_type=None):
if iterator_type is None:
iterator_type = self.iterator.infer_type(env)
if iterator_type.is_ptr or iterator_type.is_array:
return iterator_type.base_type
elif iterator_type.is_cpp_class:
item_type = env.lookup_operator_for_types(self.pos, "*", [iterator_type]).type.return_type
if item_type.is_reference:
item_type = item_type.ref_base_type
if item_type.is_const:
item_type = item_type.const_base_type
return item_type
else:
# Avoid duplication of complicated logic.
fake_index_node = IndexNode(
self.pos,
base=self.iterator.sequence,
index=IntNode(self.pos, value='PY_SSIZE_T_MAX',
type=PyrexTypes.c_py_ssize_t_type))
return fake_index_node.infer_type(env)
def analyse_types(self, env):
self.type = self.infer_type(env, self.iterator.type)
self.is_temp = 1
return self
def generate_result_code(self, code):
self.iterator.generate_iter_next_result_code(self.result(), code)
class AsyncIteratorNode(ExprNode):
# Used as part of 'async for' statement implementation.
#
# Implements result = sequence.__aiter__()
#
# sequence ExprNode
subexprs = ['sequence']
is_async = True
type = py_object_type
is_temp = 1
def infer_type(self, env):
return py_object_type
def analyse_types(self, env):
self.sequence = self.sequence.analyse_types(env)
if not self.sequence.type.is_pyobject:
error(self.pos, "async for loops not allowed on C/C++ types")
self.sequence = self.sequence.coerce_to_pyobject(env)
return self
def generate_result_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("AsyncIter", "Coroutine.c"))
code.putln("%s = __Pyx_Coroutine_GetAsyncIter(%s); %s" % (
self.result(),
self.sequence.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
class AsyncNextNode(AtomicExprNode):
# Used as part of 'async for' statement implementation.
# Implements result = iterator.__anext__()
# Created during analyse_types phase.
# The iterator is not owned by this node.
#
# iterator IteratorNode
type = py_object_type
is_temp = 1
def __init__(self, iterator):
AtomicExprNode.__init__(self, iterator.pos)
self.iterator = iterator
def infer_type(self, env):
return py_object_type
def analyse_types(self, env):
return self
def generate_result_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("AsyncIter", "Coroutine.c"))
code.putln("%s = __Pyx_Coroutine_AsyncIterNext(%s); %s" % (
self.result(),
self.iterator.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
class WithExitCallNode(ExprNode):
# The __exit__() call of a 'with' statement. Used in both the
# except and finally clauses.
# with_stat WithStatNode the surrounding 'with' statement
# args TupleNode or ResultStatNode the exception info tuple
# await AwaitExprNode the await expression of an 'async with' statement
subexprs = ['args', 'await']
test_if_run = True
await = None
def analyse_types(self, env):
self.args = self.args.analyse_types(env)
if self.await:
self.await = self.await.analyse_types(env)
self.type = PyrexTypes.c_bint_type
self.is_temp = True
return self
def generate_evaluation_code(self, code):
if self.test_if_run:
# call only if it was not already called (and decref-cleared)
code.putln("if (%s) {" % self.with_stat.exit_var)
self.args.generate_evaluation_code(code)
result_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False)
code.mark_pos(self.pos)
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln("%s = __Pyx_PyObject_Call(%s, %s, NULL);" % (
result_var,
self.with_stat.exit_var,
self.args.result()))
code.put_decref_clear(self.with_stat.exit_var, type=py_object_type)
self.args.generate_disposal_code(code)
self.args.free_temps(code)
code.putln(code.error_goto_if_null(result_var, self.pos))
code.put_gotref(result_var)
if self.await:
# FIXME: result_var temp currently leaks into the closure
self.await.generate_evaluation_code(code, source_cname=result_var, decref_source=True)
code.putln("%s = %s;" % (result_var, self.await.py_result()))
self.await.generate_post_assignment_code(code)
self.await.free_temps(code)
if self.result_is_used:
self.allocate_temp_result(code)
code.putln("%s = __Pyx_PyObject_IsTrue(%s);" % (self.result(), result_var))
code.put_decref_clear(result_var, type=py_object_type)
if self.result_is_used:
code.put_error_if_neg(self.pos, self.result())
code.funcstate.release_temp(result_var)
if self.test_if_run:
code.putln("}")
class ExcValueNode(AtomicExprNode):
# Node created during analyse_types phase
# of an ExceptClauseNode to fetch the current
# exception value.
type = py_object_type
def __init__(self, pos):
ExprNode.__init__(self, pos)
def set_var(self, var):
self.var = var
def calculate_result_code(self):
return self.var
def generate_result_code(self, code):
pass
def analyse_types(self, env):
return self
class TempNode(ExprNode):
# Node created during analyse_types phase
# of some nodes to hold a temporary value.
#
# Note: One must call "allocate" and "release" on
# the node during code generation to get/release the temp.
# This is because the temp result is often used outside of
# the regular cycle.
subexprs = []
def __init__(self, pos, type, env=None):
ExprNode.__init__(self, pos)
self.type = type
if type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
def analyse_types(self, env):
return self
def analyse_target_declaration(self, env):
pass
def generate_result_code(self, code):
pass
def allocate(self, code):
self.temp_cname = code.funcstate.allocate_temp(self.type, manage_ref=True)
def release(self, code):
code.funcstate.release_temp(self.temp_cname)
self.temp_cname = None
def result(self):
try:
return self.temp_cname
except:
assert False, "Remember to call allocate/release on TempNode"
raise
# Do not participate in normal temp alloc/dealloc:
def allocate_temp_result(self, code):
pass
def release_temp_result(self, code):
pass
class PyTempNode(TempNode):
# TempNode holding a Python value.
def __init__(self, pos, env):
TempNode.__init__(self, pos, PyrexTypes.py_object_type, env)
class RawCNameExprNode(ExprNode):
subexprs = []
def __init__(self, pos, type=None, cname=None):
ExprNode.__init__(self, pos, type=type)
if cname is not None:
self.cname = cname
def analyse_types(self, env):
return self
def set_cname(self, cname):
self.cname = cname
def result(self):
return self.cname
def generate_result_code(self, code):
pass
#-------------------------------------------------------------------
#
# Parallel nodes (cython.parallel.thread(savailable|id))
#
#-------------------------------------------------------------------
class ParallelThreadsAvailableNode(AtomicExprNode):
"""
Note: this is disabled and not a valid directive at this moment
Implements cython.parallel.threadsavailable(). If we are called from the
sequential part of the application, we need to call omp_get_max_threads(),
and in the parallel part we can just call omp_get_num_threads()
"""
type = PyrexTypes.c_int_type
def analyse_types(self, env):
self.is_temp = True
# env.add_include_file("omp.h")
return self
def generate_result_code(self, code):
code.putln("#ifdef _OPENMP")
code.putln("if (omp_in_parallel()) %s = omp_get_max_threads();" %
self.temp_code)
code.putln("else %s = omp_get_num_threads();" % self.temp_code)
code.putln("#else")
code.putln("%s = 1;" % self.temp_code)
code.putln("#endif")
def result(self):
return self.temp_code
class ParallelThreadIdNode(AtomicExprNode): #, Nodes.ParallelNode):
"""
Implements cython.parallel.threadid()
"""
type = PyrexTypes.c_int_type
def analyse_types(self, env):
self.is_temp = True
# env.add_include_file("omp.h")
return self
def generate_result_code(self, code):
code.putln("#ifdef _OPENMP")
code.putln("%s = omp_get_thread_num();" % self.temp_code)
code.putln("#else")
code.putln("%s = 0;" % self.temp_code)
code.putln("#endif")
def result(self):
return self.temp_code
#-------------------------------------------------------------------
#
# Trailer nodes
#
#-------------------------------------------------------------------
class _IndexingBaseNode(ExprNode):
# Base class for indexing nodes.
#
# base ExprNode the value being indexed
def is_ephemeral(self):
# in most cases, indexing will return a safe reference to an object in a container,
# so we consider the result safe if the base object is
return self.base.is_ephemeral() or self.base.type in (
basestring_type, str_type, bytes_type, unicode_type)
def check_const_addr(self):
return self.base.check_const_addr() and self.index.check_const()
def is_lvalue(self):
# NOTE: references currently have both is_reference and is_ptr
# set. Since pointers and references have different lvalue
# rules, we must be careful to separate the two.
if self.type.is_reference:
if self.type.ref_base_type.is_array:
# fixed-sized arrays aren't l-values
return False
elif self.type.is_ptr:
# non-const pointers can always be reassigned
return True
# Just about everything else returned by the index operator
# can be an lvalue.
return True
class IndexNode(_IndexingBaseNode):
# Sequence indexing.
#
# base ExprNode
# index ExprNode
# type_indices [PyrexType]
#
# is_fused_index boolean Whether the index is used to specialize a
# c(p)def function
subexprs = ['base', 'index']
type_indices = None
is_subscript = True
is_fused_index = False
def __init__(self, pos, index, **kw):
ExprNode.__init__(self, pos, index=index, **kw)
self._index = index
def calculate_constant_result(self):
self.constant_result = self.base.constant_result[self.index.constant_result]
def compile_time_value(self, denv):
base = self.base.compile_time_value(denv)
index = self.index.compile_time_value(denv)
try:
return base[index]
except Exception as e:
self.compile_time_value_error(e)
def is_simple(self):
base = self.base
return (base.is_simple() and self.index.is_simple()
and base.type and (base.type.is_ptr or base.type.is_array))
def may_be_none(self):
base_type = self.base.type
if base_type:
if base_type.is_string:
return False
if isinstance(self.index, SliceNode):
# slicing!
if base_type in (bytes_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return False
return ExprNode.may_be_none(self)
def analyse_target_declaration(self, env):
pass
def analyse_as_type(self, env):
base_type = self.base.analyse_as_type(env)
if base_type and not base_type.is_pyobject:
if base_type.is_cpp_class:
if isinstance(self.index, TupleNode):
template_values = self.index.args
else:
template_values = [self.index]
type_node = Nodes.TemplatedTypeNode(
pos=self.pos,
positional_args=template_values,
keyword_args=None)
return type_node.analyse(env, base_type=base_type)
else:
index = self.index.compile_time_value(env)
if index is not None:
return PyrexTypes.CArrayType(base_type, int(index))
error(self.pos, "Array size must be a compile time constant")
return None
def type_dependencies(self, env):
return self.base.type_dependencies(env) + self.index.type_dependencies(env)
def infer_type(self, env):
base_type = self.base.infer_type(env)
if self.index.is_slice:
# slicing!
if base_type.is_string:
# sliced C strings must coerce to Python
return bytes_type
elif base_type.is_pyunicode_ptr:
# sliced Py_UNICODE* strings must coerce to Python
return unicode_type
elif base_type in (unicode_type, bytes_type, str_type,
bytearray_type, list_type, tuple_type):
# slicing these returns the same type
return base_type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
index_type = self.index.infer_type(env)
if index_type and index_type.is_int or isinstance(self.index, IntNode):
# indexing!
if base_type is unicode_type:
# Py_UCS4 will automatically coerce to a unicode string
# if required, so this is safe. We only infer Py_UCS4
# when the index is a C integer type. Otherwise, we may
# need to use normal Python item access, in which case
# it's faster to return the one-char unicode string than
# to receive it, throw it away, and potentially rebuild it
# on a subsequent PyObject coercion.
return PyrexTypes.c_py_ucs4_type
elif base_type is str_type:
# always returns str - Py2: bytes, Py3: unicode
return base_type
elif base_type is bytearray_type:
return PyrexTypes.c_uchar_type
elif isinstance(self.base, BytesNode):
#if env.global_scope().context.language_level >= 3:
# # inferring 'char' can be made to work in Python 3 mode
# return PyrexTypes.c_char_type
# Py2/3 return different types on indexing bytes objects
return py_object_type
elif base_type in (tuple_type, list_type):
# if base is a literal, take a look at its values
item_type = infer_sequence_item_type(
env, self.base, self.index, seq_type=base_type)
if item_type is not None:
return item_type
elif base_type.is_ptr or base_type.is_array:
return base_type.base_type
elif base_type.is_ctuple and isinstance(self.index, IntNode):
if self.index.has_constant_result():
index = self.index.constant_result
if index < 0:
index += base_type.size
if 0 <= index < base_type.size:
return base_type.components[index]
if base_type.is_cpp_class:
class FakeOperand:
def __init__(self, **kwds):
self.__dict__.update(kwds)
operands = [
FakeOperand(pos=self.pos, type=base_type),
FakeOperand(pos=self.pos, type=index_type),
]
index_func = env.lookup_operator('[]', operands)
if index_func is not None:
return index_func.type.return_type
# may be slicing or indexing, we don't know
if base_type in (unicode_type, str_type):
# these types always returns their own type on Python indexing/slicing
return base_type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
def analyse_types(self, env):
return self.analyse_base_and_index_types(env, getting=True)
def analyse_target_types(self, env):
node = self.analyse_base_and_index_types(env, setting=True)
if node.type.is_const:
error(self.pos, "Assignment to const dereference")
if node is self and not node.is_lvalue():
error(self.pos, "Assignment to non-lvalue of type '%s'" % node.type)
return node
def analyse_base_and_index_types(self, env, getting=False, setting=False,
analyse_base=True):
# Note: This might be cleaned up by having IndexNode
# parsed in a saner way and only construct the tuple if
# needed.
if analyse_base:
self.base = self.base.analyse_types(env)
if self.base.type.is_error:
# Do not visit child tree if base is undeclared to avoid confusing
# error messages
self.type = PyrexTypes.error_type
return self
is_slice = self.index.is_slice
if not env.directives['wraparound']:
if is_slice:
check_negative_indices(self.index.start, self.index.stop)
else:
check_negative_indices(self.index)
# Potentially overflowing index value.
if not is_slice and isinstance(self.index, IntNode) and Utils.long_literal(self.index.value):
self.index = self.index.coerce_to_pyobject(env)
is_memslice = self.base.type.is_memoryviewslice
# Handle the case where base is a literal char* (and we expect a string, not an int)
if not is_memslice and (isinstance(self.base, BytesNode) or is_slice):
if self.base.type.is_string or not (self.base.type.is_ptr or self.base.type.is_array):
self.base = self.base.coerce_to_pyobject(env)
replacement_node = self.analyse_as_buffer_operation(env, getting)
if replacement_node is not None:
return replacement_node
self.nogil = env.nogil
base_type = self.base.type
if not base_type.is_cfunction:
self.index = self.index.analyse_types(env)
self.original_index_type = self.index.type
if base_type.is_unicode_char:
# we infer Py_UNICODE/Py_UCS4 for unicode strings in some
# cases, but indexing must still work for them
if setting:
warning(self.pos, "cannot assign to Unicode string index", level=1)
elif self.index.constant_result in (0, -1):
# uchar[0] => uchar
return self.base
self.base = self.base.coerce_to_pyobject(env)
base_type = self.base.type
if base_type.is_pyobject:
return self.analyse_as_pyobject(env, is_slice, getting, setting)
elif base_type.is_ptr or base_type.is_array:
return self.analyse_as_c_array(env, is_slice)
elif base_type.is_cpp_class:
return self.analyse_as_cpp(env, setting)
elif base_type.is_cfunction:
return self.analyse_as_c_function(env)
elif base_type.is_ctuple:
return self.analyse_as_c_tuple(env, getting, setting)
else:
error(self.pos,
"Attempting to index non-array type '%s'" %
base_type)
self.type = PyrexTypes.error_type
return self
def analyse_as_pyobject(self, env, is_slice, getting, setting):
base_type = self.base.type
if self.index.type.is_int and base_type is not dict_type:
if (getting
and (base_type in (list_type, tuple_type, bytearray_type))
and (not self.index.type.signed
or not env.directives['wraparound']
or (isinstance(self.index, IntNode) and
self.index.has_constant_result() and self.index.constant_result >= 0))
and not env.directives['boundscheck']):
self.is_temp = 0
else:
self.is_temp = 1
self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env).coerce_to_simple(env)
self.original_index_type.create_to_py_utility_code(env)
else:
self.index = self.index.coerce_to_pyobject(env)
self.is_temp = 1
if self.index.type.is_int and base_type is unicode_type:
# Py_UNICODE/Py_UCS4 will automatically coerce to a unicode string
# if required, so this is fast and safe
self.type = PyrexTypes.c_py_ucs4_type
elif self.index.type.is_int and base_type is bytearray_type:
if setting:
self.type = PyrexTypes.c_uchar_type
else:
# not using 'uchar' to enable fast and safe error reporting as '-1'
self.type = PyrexTypes.c_int_type
elif is_slice and base_type in (bytes_type, str_type, unicode_type, list_type, tuple_type):
self.type = base_type
else:
item_type = None
if base_type in (list_type, tuple_type) and self.index.type.is_int:
item_type = infer_sequence_item_type(
env, self.base, self.index, seq_type=base_type)
if item_type is None:
item_type = py_object_type
self.type = item_type
if base_type in (list_type, tuple_type, dict_type):
# do the None check explicitly (not in a helper) to allow optimising it away
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
self.wrap_in_nonecheck_node(env, getting)
return self
def analyse_as_c_array(self, env, is_slice):
base_type = self.base.type
self.type = base_type.base_type
if is_slice:
self.type = base_type
elif self.index.type.is_pyobject:
self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
elif not self.index.type.is_int:
error(self.pos, "Invalid index type '%s'" % self.index.type)
return self
def analyse_as_cpp(self, env, setting):
base_type = self.base.type
function = env.lookup_operator("[]", [self.base, self.index])
if function is None:
error(self.pos, "Indexing '%s' not supported for index type '%s'" % (base_type, self.index.type))
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return self
func_type = function.type
if func_type.is_ptr:
func_type = func_type.base_type
self.exception_check = func_type.exception_check
self.exception_value = func_type.exception_value
if self.exception_check:
if not setting:
self.is_temp = True
if self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
self.index = self.index.coerce_to(func_type.args[0].type, env)
self.type = func_type.return_type
if setting and not func_type.return_type.is_reference:
error(self.pos, "Can't set non-reference result '%s'" % self.type)
return self
def analyse_as_c_function(self, env):
base_type = self.base.type
if base_type.is_fused:
self.parse_indexed_fused_cdef(env)
else:
self.type_indices = self.parse_index_as_types(env)
self.index = None # FIXME: use a dedicated Node class instead of generic IndexNode
if base_type.templates is None:
error(self.pos, "Can only parameterize template functions.")
self.type = error_type
elif len(base_type.templates) != len(self.type_indices):
error(self.pos, "Wrong number of template arguments: expected %s, got %s" % (
(len(base_type.templates), len(self.type_indices))))
self.type = error_type
else:
self.type = base_type.specialize(dict(zip(base_type.templates, self.type_indices)))
# FIXME: use a dedicated Node class instead of generic IndexNode
return self
def analyse_as_c_tuple(self, env, getting, setting):
base_type = self.base.type
if isinstance(self.index, IntNode) and self.index.has_constant_result():
index = self.index.constant_result
if -base_type.size <= index < base_type.size:
if index < 0:
index += base_type.size
self.type = base_type.components[index]
else:
error(self.pos,
"Index %s out of bounds for '%s'" %
(index, base_type))
self.type = PyrexTypes.error_type
return self
else:
self.base = self.base.coerce_to_pyobject(env)
return self.analyse_base_and_index_types(env, getting=getting, setting=setting, analyse_base=False)
def analyse_as_buffer_operation(self, env, getting):
"""
Analyse buffer indexing and memoryview indexing/slicing
"""
if isinstance(self.index, TupleNode):
indices = self.index.args
else:
indices = [self.index]
base_type = self.base.type
replacement_node = None
if base_type.is_memoryviewslice:
# memoryviewslice indexing or slicing
from . import MemoryView
have_slices, indices, newaxes = MemoryView.unellipsify(indices, base_type.ndim)
if have_slices:
replacement_node = MemoryViewSliceNode(self.pos, indices=indices, base=self.base)
else:
replacement_node = MemoryViewIndexNode(self.pos, indices=indices, base=self.base)
elif base_type.is_buffer and len(indices) == base_type.ndim:
# Buffer indexing
is_buffer_access = True
for index in indices:
index = index.analyse_types(env)
if not index.type.is_int:
is_buffer_access = False
if is_buffer_access:
replacement_node = BufferIndexNode(self.pos, indices=indices, base=self.base)
# On cloning, indices is cloned. Otherwise, unpack index into indices.
assert not isinstance(self.index, CloneNode)
if replacement_node is not None:
replacement_node = replacement_node.analyse_types(env, getting)
return replacement_node
def wrap_in_nonecheck_node(self, env, getting):
if not env.directives['nonecheck'] or not self.base.may_be_none():
return
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
def parse_index_as_types(self, env, required=True):
if isinstance(self.index, TupleNode):
indices = self.index.args
else:
indices = [self.index]
type_indices = []
for index in indices:
type_indices.append(index.analyse_as_type(env))
if type_indices[-1] is None:
if required:
error(index.pos, "not parsable as a type")
return None
return type_indices
def parse_indexed_fused_cdef(self, env):
"""
Interpret fused_cdef_func[specific_type1, ...]
Note that if this method is called, we are an indexed cdef function
with fused argument types, and this IndexNode will be replaced by the
NameNode with specific entry just after analysis of expressions by
AnalyseExpressionsTransform.
"""
self.type = PyrexTypes.error_type
self.is_fused_index = True
base_type = self.base.type
positions = []
if self.index.is_name or self.index.is_attribute:
positions.append(self.index.pos)
elif isinstance(self.index, TupleNode):
for arg in self.index.args:
positions.append(arg.pos)
specific_types = self.parse_index_as_types(env, required=False)
if specific_types is None:
self.index = self.index.analyse_types(env)
if not self.base.entry.as_variable:
error(self.pos, "Can only index fused functions with types")
else:
# A cpdef function indexed with Python objects
self.base.entry = self.entry = self.base.entry.as_variable
self.base.type = self.type = self.entry.type
self.base.is_temp = True
self.is_temp = True
self.entry.used = True
self.is_fused_index = False
return
for i, type in enumerate(specific_types):
specific_types[i] = type.specialize_fused(env)
fused_types = base_type.get_fused_types()
if len(specific_types) > len(fused_types):
return error(self.pos, "Too many types specified")
elif len(specific_types) < len(fused_types):
t = fused_types[len(specific_types)]
return error(self.pos, "Not enough types specified to specialize "
"the function, %s is still fused" % t)
# See if our index types form valid specializations
for pos, specific_type, fused_type in zip(positions,
specific_types,
fused_types):
if not any([specific_type.same_as(t) for t in fused_type.types]):
return error(pos, "Type not in fused type")
if specific_type is None or specific_type.is_error:
return
fused_to_specific = dict(zip(fused_types, specific_types))
type = base_type.specialize(fused_to_specific)
if type.is_fused:
# Only partially specific, this is invalid
error(self.pos,
"Index operation makes function only partially specific")
else:
# Fully specific, find the signature with the specialized entry
for signature in self.base.type.get_all_specialized_function_types():
if type.same_as(signature):
self.type = signature
if self.base.is_attribute:
# Pretend to be a normal attribute, for cdef extension
# methods
self.entry = signature.entry
self.is_attribute = True
self.obj = self.base.obj
self.type.entry.used = True
self.base.type = signature
self.base.entry = signature.entry
break
else:
# This is a bug
raise InternalError("Couldn't find the right signature")
gil_message = "Indexing Python object"
def calculate_result_code(self):
if self.base.type in (list_type, tuple_type, bytearray_type):
if self.base.type is list_type:
index_code = "PyList_GET_ITEM(%s, %s)"
elif self.base.type is tuple_type:
index_code = "PyTuple_GET_ITEM(%s, %s)"
elif self.base.type is bytearray_type:
index_code = "((unsigned char)(PyByteArray_AS_STRING(%s)[%s]))"
else:
assert False, "unexpected base type in indexing: %s" % self.base.type
elif self.base.type.is_cfunction:
return "%s<%s>" % (
self.base.result(),
",".join([param.empty_declaration_code() for param in self.type_indices]))
elif self.base.type.is_ctuple:
index = self.index.constant_result
if index < 0:
index += self.base.type.size
return "%s.f%s" % (self.base.result(), index)
else:
if (self.type.is_ptr or self.type.is_array) and self.type == self.base.type:
error(self.pos, "Invalid use of pointer slice")
return
index_code = "(%s[%s])"
return index_code % (self.base.result(), self.index.result())
def extra_index_params(self, code):
if self.index.type.is_int:
is_list = self.base.type is list_type
wraparound = (
bool(code.globalstate.directives['wraparound']) and
self.original_index_type.signed and
not (isinstance(self.index.constant_result, _py_int_types)
and self.index.constant_result >= 0))
boundscheck = bool(code.globalstate.directives['boundscheck'])
return ", %s, %d, %s, %d, %d, %d" % (
self.original_index_type.empty_declaration_code(),
self.original_index_type.signed and 1 or 0,
self.original_index_type.to_py_function,
is_list, wraparound, boundscheck)
else:
return ""
def generate_result_code(self, code):
if not self.is_temp:
# all handled in self.calculate_result_code()
return
if self.type.is_pyobject:
error_value = 'NULL'
if self.index.type.is_int:
if self.base.type is list_type:
function = "__Pyx_GetItemInt_List"
elif self.base.type is tuple_type:
function = "__Pyx_GetItemInt_Tuple"
else:
function = "__Pyx_GetItemInt"
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c"))
else:
if self.base.type is dict_type:
function = "__Pyx_PyDict_GetItem"
code.globalstate.use_utility_code(
UtilityCode.load_cached("DictGetItem", "ObjectHandling.c"))
else:
function = "PyObject_GetItem"
elif self.type.is_unicode_char and self.base.type is unicode_type:
assert self.index.type.is_int
function = "__Pyx_GetItemInt_Unicode"
error_value = '(Py_UCS4)-1'
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetItemIntUnicode", "StringTools.c"))
elif self.base.type is bytearray_type:
assert self.index.type.is_int
assert self.type.is_int
function = "__Pyx_GetItemInt_ByteArray"
error_value = '-1'
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetItemIntByteArray", "StringTools.c"))
elif not (self.base.type.is_cpp_class and self.exception_check):
assert False, "unexpected type %s and base type %s for indexing" % (
self.type, self.base.type)
if self.index.type.is_int:
index_code = self.index.result()
else:
index_code = self.index.py_result()
if self.base.type.is_cpp_class and self.exception_check:
translate_cpp_exception(code, self.pos,
"%s = %s[%s];" % (self.result(), self.base.result(),
self.index.result()),
self.exception_value, self.in_nogil_context)
else:
code.putln(
"%s = %s(%s, %s%s); if (unlikely(%s == %s)) %s;" % (
self.result(),
function,
self.base.py_result(),
index_code,
self.extra_index_params(code),
self.result(),
error_value,
code.error_goto(self.pos)))
if self.type.is_pyobject:
code.put_gotref(self.py_result())
def generate_setitem_code(self, value_code, code):
if self.index.type.is_int:
if self.base.type is bytearray_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("SetItemIntByteArray", "StringTools.c"))
function = "__Pyx_SetItemInt_ByteArray"
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("SetItemInt", "ObjectHandling.c"))
function = "__Pyx_SetItemInt"
index_code = self.index.result()
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "PyDict_SetItem"
# It would seem that we could specialized lists/tuples, but that
# shouldn't happen here.
# Both PyList_SetItem() and PyTuple_SetItem() take a Py_ssize_t as
# index instead of an object, and bad conversion here would give
# the wrong exception. Also, tuples are supposed to be immutable,
# and raise a TypeError when trying to set their entries
# (PyTuple_SetItem() is for creating new tuples from scratch).
else:
function = "PyObject_SetItem"
code.putln(
"if (unlikely(%s(%s, %s, %s%s) < 0)) %s" % (
function,
self.base.py_result(),
index_code,
value_code,
self.extra_index_params(code),
code.error_goto(self.pos)))
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
self.generate_subexpr_evaluation_code(code)
if self.type.is_pyobject:
self.generate_setitem_code(rhs.py_result(), code)
elif self.base.type is bytearray_type:
value_code = self._check_byte_value(code, rhs)
self.generate_setitem_code(value_code, code)
elif self.base.type.is_cpp_class and self.exception_check and self.exception_check == '+':
if overloaded_assignment and exception_check and \
self.exception_value != exception_value:
# Handle the case that both the index operator and the assignment
# operator have a c++ exception handler and they are not the same.
translate_double_cpp_exception(code, self.pos, self.type,
self.result(), rhs.result(), self.exception_value,
exception_value, self.in_nogil_context)
else:
# Handle the case that only the index operator has a
# c++ exception handler, or that
# both exception handlers are the same.
translate_cpp_exception(code, self.pos,
"%s = %s;" % (self.result(), rhs.result()),
self.exception_value, self.in_nogil_context)
else:
code.putln(
"%s = %s;" % (self.result(), rhs.result()))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def _check_byte_value(self, code, rhs):
# TODO: should we do this generally on downcasts, or just here?
assert rhs.type.is_int, repr(rhs.type)
value_code = rhs.result()
if rhs.has_constant_result():
if 0 <= rhs.constant_result < 256:
return value_code
needs_cast = True # make at least the C compiler happy
warning(rhs.pos,
"value outside of range(0, 256)"
" when assigning to byte: %s" % rhs.constant_result,
level=1)
else:
needs_cast = rhs.type != PyrexTypes.c_uchar_type
if not self.nogil:
conditions = []
if rhs.is_literal or rhs.type.signed:
conditions.append('%s < 0' % value_code)
if (rhs.is_literal or not
(rhs.is_temp and rhs.type in (
PyrexTypes.c_uchar_type, PyrexTypes.c_char_type,
PyrexTypes.c_schar_type))):
conditions.append('%s > 255' % value_code)
if conditions:
code.putln("if (unlikely(%s)) {" % ' || '.join(conditions))
code.putln(
'PyErr_SetString(PyExc_ValueError,'
' "byte must be in range(0, 256)"); %s' %
code.error_goto(self.pos))
code.putln("}")
if needs_cast:
value_code = '((unsigned char)%s)' % value_code
return value_code
def generate_deletion_code(self, code, ignore_nonexisting=False):
self.generate_subexpr_evaluation_code(code)
#if self.type.is_pyobject:
if self.index.type.is_int:
function = "__Pyx_DelItemInt"
index_code = self.index.result()
code.globalstate.use_utility_code(
UtilityCode.load_cached("DelItemInt", "ObjectHandling.c"))
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "PyDict_DelItem"
else:
function = "PyObject_DelItem"
code.putln(
"if (%s(%s, %s%s) < 0) %s" % (
function,
self.base.py_result(),
index_code,
self.extra_index_params(code),
code.error_goto(self.pos)))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
class BufferIndexNode(_IndexingBaseNode):
"""
Indexing of buffers and memoryviews. This node is created during type
analysis from IndexNode and replaces it.
Attributes:
base - base node being indexed
indices - list of indexing expressions
"""
subexprs = ['base', 'indices']
is_buffer_access = True
# Whether we're assigning to a buffer (in that case it needs to be writable)
writable_needed = False
def analyse_target_types(self, env):
self.analyse_types(env, getting=False)
def analyse_types(self, env, getting=True):
"""
Analyse types for buffer indexing only. Overridden by memoryview
indexing and slicing subclasses
"""
# self.indices are already analyzed
if not self.base.is_name:
error(self.pos, "Can only index buffer variables")
self.type = error_type
return self
if not getting:
if not self.base.entry.type.writable:
error(self.pos, "Writing to readonly buffer")
else:
self.writable_needed = True
if self.base.type.is_buffer:
self.base.entry.buffer_aux.writable_needed = True
self.none_error_message = "'NoneType' object is not subscriptable"
self.analyse_buffer_index(env, getting)
self.wrap_in_nonecheck_node(env)
return self
def analyse_buffer_index(self, env, getting):
self.base = self.base.coerce_to_simple(env)
self.type = self.base.type.dtype
self.buffer_type = self.base.type
if getting and self.type.is_pyobject:
self.is_temp = True
def analyse_assignment(self, rhs):
"""
Called by IndexNode when this node is assigned to,
with the rhs of the assignment
"""
def wrap_in_nonecheck_node(self, env):
if not env.directives['nonecheck'] or not self.base.may_be_none():
return
self.base = self.base.as_none_safe_node(self.none_error_message)
def nogil_check(self, env):
if self.is_buffer_access or self.is_memview_index:
if env.directives['boundscheck']:
warning(self.pos, "Use boundscheck(False) for faster access",
level=1)
if self.type.is_pyobject:
error(self.pos, "Cannot access buffer with object dtype without gil")
self.type = error_type
def calculate_result_code(self):
return "(*%s)" % self.buffer_ptr_code
def buffer_entry(self):
base = self.base
if self.base.is_nonecheck:
base = base.arg
return base.type.get_entry(base)
def buffer_lookup_code(self, code):
"""
ndarray[1, 2, 3] and memslice[1, 2, 3]
"""
# Assign indices to temps of at least (s)size_t to allow further index calculations.
index_temps = [
code.funcstate.allocate_temp(
PyrexTypes.widest_numeric_type(
ivar.type, PyrexTypes.c_ssize_t_type if ivar.type.signed else PyrexTypes.c_size_t_type),
manage_ref=False)
for ivar in self.indices]
for temp, index in zip(index_temps, self.indices):
code.putln("%s = %s;" % (temp, index.result()))
# Generate buffer access code using these temps
from . import Buffer
buffer_entry = self.buffer_entry()
if buffer_entry.type.is_buffer:
negative_indices = buffer_entry.type.negative_indices
else:
negative_indices = Buffer.buffer_defaults['negative_indices']
return buffer_entry, Buffer.put_buffer_lookup_code(
entry=buffer_entry,
index_signeds=[ivar.type.signed for ivar in self.indices],
index_cnames=index_temps,
directives=code.globalstate.directives,
pos=self.pos, code=code,
negative_indices=negative_indices,
in_nogil_context=self.in_nogil_context)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
self.generate_subexpr_evaluation_code(code)
self.generate_buffer_setitem_code(rhs, code)
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def generate_buffer_setitem_code(self, rhs, code, op=""):
# Used from generate_assignment_code and InPlaceAssignmentNode
buffer_entry, ptrexpr = self.buffer_lookup_code(code)
if self.buffer_type.dtype.is_pyobject:
# Must manage refcounts. Decref what is already there
# and incref what we put in.
ptr = code.funcstate.allocate_temp(buffer_entry.buf_ptr_type,
manage_ref=False)
rhs_code = rhs.result()
code.putln("%s = %s;" % (ptr, ptrexpr))
code.put_gotref("*%s" % ptr)
code.putln("__Pyx_INCREF(%s); __Pyx_DECREF(*%s);" % (
rhs_code, ptr))
code.putln("*%s %s= %s;" % (ptr, op, rhs_code))
code.put_giveref("*%s" % ptr)
code.funcstate.release_temp(ptr)
else:
# Simple case
code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result()))
def generate_result_code(self, code):
buffer_entry, self.buffer_ptr_code = self.buffer_lookup_code(code)
if self.type.is_pyobject:
# is_temp is True, so must pull out value and incref it.
# NOTE: object temporary results for nodes are declared
# as PyObject *, so we need a cast
code.putln("%s = (PyObject *) *%s;" % (self.result(), self.buffer_ptr_code))
code.putln("__Pyx_INCREF((PyObject*)%s);" % self.result())
class MemoryViewIndexNode(BufferIndexNode):
is_memview_index = True
is_buffer_access = False
warned_untyped_idx = False
def analyse_types(self, env, getting=True):
# memoryviewslice indexing or slicing
from . import MemoryView
indices = self.indices
have_slices, indices, newaxes = MemoryView.unellipsify(indices, self.base.type.ndim)
self.memslice_index = (not newaxes and len(indices) == self.base.type.ndim)
axes = []
index_type = PyrexTypes.c_py_ssize_t_type
new_indices = []
if len(indices) - len(newaxes) > self.base.type.ndim:
self.type = error_type
error(indices[self.base.type.ndim].pos,
"Too many indices specified for type %s" % self.base.type)
return self
axis_idx = 0
for i, index in enumerate(indices[:]):
index = index.analyse_types(env)
if index.is_none:
self.is_memview_slice = True
new_indices.append(index)
axes.append(('direct', 'strided'))
continue
access, packing = self.base.type.axes[axis_idx]
axis_idx += 1
if index.is_slice:
self.is_memview_slice = True
if index.step.is_none:
axes.append((access, packing))
else:
axes.append((access, 'strided'))
# Coerce start, stop and step to temps of the right type
for attr in ('start', 'stop', 'step'):
value = getattr(index, attr)
if not value.is_none:
value = value.coerce_to(index_type, env)
#value = value.coerce_to_temp(env)
setattr(index, attr, value)
new_indices.append(value)
elif index.type.is_int or index.type.is_pyobject:
if index.type.is_pyobject and not self.warned_untyped_idx:
warning(index.pos, "Index should be typed for more efficient access", level=2)
MemoryViewIndexNode.warned_untyped_idx = True
self.is_memview_index = True
index = index.coerce_to(index_type, env)
indices[i] = index
new_indices.append(index)
else:
self.type = error_type
error(index.pos, "Invalid index for memoryview specified, type %s" % index.type)
return self
### FIXME: replace by MemoryViewSliceNode if is_memview_slice ?
self.is_memview_index = self.is_memview_index and not self.is_memview_slice
self.indices = new_indices
# All indices with all start/stop/step for slices.
# We need to keep this around.
self.original_indices = indices
self.nogil = env.nogil
self.analyse_operation(env, getting, axes)
self.wrap_in_nonecheck_node(env)
return self
def analyse_operation(self, env, getting, axes):
self.none_error_message = "Cannot index None memoryview slice"
self.analyse_buffer_index(env, getting)
def analyse_broadcast_operation(self, rhs):
"""
Support broadcasting for slice assignment.
E.g.
m_2d[...] = m_1d # or,
m_1d[...] = m_2d # if the leading dimension has extent 1
"""
if self.type.is_memoryviewslice:
lhs = self
if lhs.is_memview_broadcast or rhs.is_memview_broadcast:
lhs.is_memview_broadcast = True
rhs.is_memview_broadcast = True
def analyse_as_memview_scalar_assignment(self, rhs):
lhs = self.analyse_assignment(rhs)
if lhs:
rhs.is_memview_copy_assignment = lhs.is_memview_copy_assignment
return lhs
return self
class MemoryViewSliceNode(MemoryViewIndexNode):
is_memview_slice = True
# No-op slicing operation, this node will be replaced
is_ellipsis_noop = False
is_memview_scalar_assignment = False
is_memview_index = False
is_memview_broadcast = False
def analyse_ellipsis_noop(self, env, getting):
"""Slicing operations needing no evaluation, i.e. m[...] or m[:, :]"""
### FIXME: replace directly
self.is_ellipsis_noop = all(
index.is_slice and index.start.is_none and index.stop.is_none and index.step.is_none
for index in self.indices)
if self.is_ellipsis_noop:
self.type = self.base.type
def analyse_operation(self, env, getting, axes):
from . import MemoryView
if not getting:
self.is_memview_broadcast = True
self.none_error_message = "Cannot assign to None memoryview slice"
else:
self.none_error_message = "Cannot slice None memoryview slice"
self.analyse_ellipsis_noop(env, getting)
if self.is_ellipsis_noop:
return
self.index = None
self.is_temp = True
self.use_managed_ref = True
if not MemoryView.validate_axes(self.pos, axes):
self.type = error_type
return
self.type = PyrexTypes.MemoryViewSliceType(self.base.type.dtype, axes)
if not (self.base.is_simple() or self.base.result_in_temp()):
self.base = self.base.coerce_to_temp(env)
def analyse_assignment(self, rhs):
if not rhs.type.is_memoryviewslice and (
self.type.dtype.assignable_from(rhs.type) or
rhs.type.is_pyobject):
# scalar assignment
return MemoryCopyScalar(self.pos, self)
else:
return MemoryCopySlice(self.pos, self)
def is_simple(self):
if self.is_ellipsis_noop:
# TODO: fix SimpleCallNode.is_simple()
return self.base.is_simple() or self.base.result_in_temp()
return self.result_in_temp()
def calculate_result_code(self):
"""This is called in case this is a no-op slicing node"""
return self.base.result()
def generate_result_code(self, code):
if self.is_ellipsis_noop:
return ### FIXME: remove
buffer_entry = self.buffer_entry()
have_gil = not self.in_nogil_context
# TODO Mark: this is insane, do it better
have_slices = False
it = iter(self.indices)
for index in self.original_indices:
if index.is_slice:
have_slices = True
if not index.start.is_none:
index.start = next(it)
if not index.stop.is_none:
index.stop = next(it)
if not index.step.is_none:
index.step = next(it)
else:
next(it)
assert not list(it)
buffer_entry.generate_buffer_slice_code(
code, self.original_indices, self.result(),
have_gil=have_gil, have_slices=have_slices,
directives=code.globalstate.directives)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
if self.is_ellipsis_noop:
self.generate_subexpr_evaluation_code(code)
else:
self.generate_evaluation_code(code)
if self.is_memview_scalar_assignment:
self.generate_memoryviewslice_assign_scalar_code(rhs, code)
else:
self.generate_memoryviewslice_setslice_code(rhs, code)
if self.is_ellipsis_noop:
self.generate_subexpr_disposal_code(code)
else:
self.generate_disposal_code(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
class MemoryCopyNode(ExprNode):
"""
Wraps a memoryview slice for slice assignment.
dst: destination mememoryview slice
"""
subexprs = ['dst']
def __init__(self, pos, dst):
super(MemoryCopyNode, self).__init__(pos)
self.dst = dst
self.type = dst.type
def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
self.dst.generate_evaluation_code(code)
self._generate_assignment_code(rhs, code)
self.dst.generate_disposal_code(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
class MemoryCopySlice(MemoryCopyNode):
"""
Copy the contents of slice src to slice dst. Does not support indirect
slices.
memslice1[...] = memslice2
memslice1[:] = memslice2
"""
is_memview_copy_assignment = True
copy_slice_cname = "__pyx_memoryview_copy_contents"
def _generate_assignment_code(self, src, code):
dst = self.dst
src.type.assert_direct_dims(src.pos)
dst.type.assert_direct_dims(dst.pos)
code.putln(code.error_goto_if_neg(
"%s(%s, %s, %d, %d, %d)" % (self.copy_slice_cname,
src.result(), dst.result(),
src.type.ndim, dst.type.ndim,
dst.type.dtype.is_pyobject),
dst.pos))
class MemoryCopyScalar(MemoryCopyNode):
"""
Assign a scalar to a slice. dst must be simple, scalar will be assigned
to a correct type and not just something assignable.
memslice1[...] = 0.0
memslice1[:] = 0.0
"""
def __init__(self, pos, dst):
super(MemoryCopyScalar, self).__init__(pos, dst)
self.type = dst.type.dtype
def _generate_assignment_code(self, scalar, code):
from . import MemoryView
self.dst.type.assert_direct_dims(self.dst.pos)
dtype = self.dst.type.dtype
type_decl = dtype.declaration_code("")
slice_decl = self.dst.type.declaration_code("")
code.begin_block()
code.putln("%s __pyx_temp_scalar = %s;" % (type_decl, scalar.result()))
if self.dst.result_in_temp() or self.dst.is_simple():
dst_temp = self.dst.result()
else:
code.putln("%s __pyx_temp_slice = %s;" % (slice_decl, self.dst.result()))
dst_temp = "__pyx_temp_slice"
slice_iter_obj = MemoryView.slice_iter(self.dst.type, dst_temp,
self.dst.type.ndim, code)
p = slice_iter_obj.start_loops()
if dtype.is_pyobject:
code.putln("Py_DECREF(*(PyObject **) %s);" % p)
code.putln("*((%s *) %s) = __pyx_temp_scalar;" % (type_decl, p))
if dtype.is_pyobject:
code.putln("Py_INCREF(__pyx_temp_scalar);")
slice_iter_obj.end_loops()
code.end_block()
class SliceIndexNode(ExprNode):
# 2-element slice indexing
#
# base ExprNode
# start ExprNode or None
# stop ExprNode or None
# slice ExprNode or None constant slice object
subexprs = ['base', 'start', 'stop', 'slice']
slice = None
def infer_type(self, env):
base_type = self.base.infer_type(env)
if base_type.is_string or base_type.is_cpp_class:
return bytes_type
elif base_type.is_pyunicode_ptr:
return unicode_type
elif base_type in (bytes_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return base_type
elif base_type.is_ptr or base_type.is_array:
return PyrexTypes.c_array_type(base_type.base_type, None)
return py_object_type
def inferable_item_node(self, index=0):
# slicing shouldn't change the result type of the base, but the index might
if index is not not_a_constant and self.start:
if self.start.has_constant_result():
index += self.start.constant_result
else:
index = not_a_constant
return self.base.inferable_item_node(index)
def may_be_none(self):
base_type = self.base.type
if base_type:
if base_type.is_string:
return False
if base_type in (bytes_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return False
return ExprNode.may_be_none(self)
def calculate_constant_result(self):
if self.start is None:
start = None
else:
start = self.start.constant_result
if self.stop is None:
stop = None
else:
stop = self.stop.constant_result
self.constant_result = self.base.constant_result[start:stop]
def compile_time_value(self, denv):
base = self.base.compile_time_value(denv)
if self.start is None:
start = 0
else:
start = self.start.compile_time_value(denv)
if self.stop is None:
stop = None
else:
stop = self.stop.compile_time_value(denv)
try:
return base[start:stop]
except Exception as e:
self.compile_time_value_error(e)
def analyse_target_declaration(self, env):
pass
def analyse_target_types(self, env):
node = self.analyse_types(env, getting=False)
# when assigning, we must accept any Python type
if node.type.is_pyobject:
node.type = py_object_type
return node
def analyse_types(self, env, getting=True):
self.base = self.base.analyse_types(env)
if self.base.type.is_memoryviewslice:
none_node = NoneNode(self.pos)
index = SliceNode(self.pos,
start=self.start or none_node,
stop=self.stop or none_node,
step=none_node)
index_node = IndexNode(self.pos, index, base=self.base)
return index_node.analyse_base_and_index_types(
env, getting=getting, setting=not getting,
analyse_base=False)
if self.start:
self.start = self.start.analyse_types(env)
if self.stop:
self.stop = self.stop.analyse_types(env)
if not env.directives['wraparound']:
check_negative_indices(self.start, self.stop)
base_type = self.base.type
if base_type.is_array and not getting:
# cannot assign directly to C array => try to assign by making a copy
if not self.start and not self.stop:
self.type = base_type
else:
self.type = PyrexTypes.CPtrType(base_type.base_type)
elif base_type.is_string or base_type.is_cpp_string:
self.type = default_str_type(env)
elif base_type.is_pyunicode_ptr:
self.type = unicode_type
elif base_type.is_ptr:
self.type = base_type
elif base_type.is_array:
# we need a ptr type here instead of an array type, as
# array types can result in invalid type casts in the C
# code
self.type = PyrexTypes.CPtrType(base_type.base_type)
else:
self.base = self.base.coerce_to_pyobject(env)
self.type = py_object_type
if base_type.is_builtin_type:
# slicing builtin types returns something of the same type
self.type = base_type
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
if self.type is py_object_type:
if (not self.start or self.start.is_literal) and \
(not self.stop or self.stop.is_literal):
# cache the constant slice object, in case we need it
none_node = NoneNode(self.pos)
self.slice = SliceNode(
self.pos,
start=copy.deepcopy(self.start or none_node),
stop=copy.deepcopy(self.stop or none_node),
step=none_node
).analyse_types(env)
else:
c_int = PyrexTypes.c_py_ssize_t_type
if self.start:
self.start = self.start.coerce_to(c_int, env)
if self.stop:
self.stop = self.stop.coerce_to(c_int, env)
self.is_temp = 1
return self
nogil_check = Node.gil_error
gil_message = "Slicing Python object"
get_slice_utility_code = TempitaUtilityCode.load(
"SliceObject", "ObjectHandling.c", context={'access': 'Get'})
set_slice_utility_code = TempitaUtilityCode.load(
"SliceObject", "ObjectHandling.c", context={'access': 'Set'})
def coerce_to(self, dst_type, env):
if ((self.base.type.is_string or self.base.type.is_cpp_string)
and dst_type in (bytes_type, bytearray_type, str_type, unicode_type)):
if (dst_type not in (bytes_type, bytearray_type)
and not env.directives['c_string_encoding']):
error(self.pos,
"default encoding required for conversion from '%s' to '%s'" %
(self.base.type, dst_type))
self.type = dst_type
if dst_type.is_array and self.base.type.is_array:
if not self.start and not self.stop:
# redundant slice building, copy C arrays directly
return self.base.coerce_to(dst_type, env)
# else: check array size if possible
return super(SliceIndexNode, self).coerce_to(dst_type, env)
def generate_result_code(self, code):
if not self.type.is_pyobject:
error(self.pos,
"Slicing is not currently supported for '%s'." % self.type)
return
base_result = self.base.result()
result = self.result()
start_code = self.start_code()
stop_code = self.stop_code()
if self.base.type.is_string:
base_result = self.base.result()
if self.base.type not in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type):
base_result = '((const char*)%s)' % base_result
if self.type is bytearray_type:
type_name = 'ByteArray'
else:
type_name = self.type.name.title()
if self.stop is None:
code.putln(
"%s = __Pyx_Py%s_FromString(%s + %s); %s" % (
result,
type_name,
base_result,
start_code,
code.error_goto_if_null(result, self.pos)))
else:
code.putln(
"%s = __Pyx_Py%s_FromStringAndSize(%s + %s, %s - %s); %s" % (
result,
type_name,
base_result,
start_code,
stop_code,
start_code,
code.error_goto_if_null(result, self.pos)))
elif self.base.type.is_pyunicode_ptr:
base_result = self.base.result()
if self.base.type != PyrexTypes.c_py_unicode_ptr_type:
base_result = '((const Py_UNICODE*)%s)' % base_result
if self.stop is None:
code.putln(
"%s = __Pyx_PyUnicode_FromUnicode(%s + %s); %s" % (
result,
base_result,
start_code,
code.error_goto_if_null(result, self.pos)))
else:
code.putln(
"%s = __Pyx_PyUnicode_FromUnicodeAndLength(%s + %s, %s - %s); %s" % (
result,
base_result,
start_code,
stop_code,
start_code,
code.error_goto_if_null(result, self.pos)))
elif self.base.type is unicode_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyUnicode_Substring", "StringTools.c"))
code.putln(
"%s = __Pyx_PyUnicode_Substring(%s, %s, %s); %s" % (
result,
base_result,
start_code,
stop_code,
code.error_goto_if_null(result, self.pos)))
elif self.type is py_object_type:
code.globalstate.use_utility_code(self.get_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.putln(
"%s = __Pyx_PyObject_GetSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d); %s" % (
result,
self.base.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound']),
code.error_goto_if_null(result, self.pos)))
else:
if self.base.type is list_type:
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
cfunc = '__Pyx_PyList_GetSlice'
elif self.base.type is tuple_type:
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
cfunc = '__Pyx_PyTuple_GetSlice'
else:
cfunc = 'PySequence_GetSlice'
code.putln(
"%s = %s(%s, %s, %s); %s" % (
result,
cfunc,
self.base.py_result(),
start_code,
stop_code,
code.error_goto_if_null(result, self.pos)))
code.put_gotref(self.py_result())
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
self.generate_subexpr_evaluation_code(code)
if self.type.is_pyobject:
code.globalstate.use_utility_code(self.set_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.put_error_if_neg(self.pos,
"__Pyx_PyObject_SetSlice(%s, %s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
self.base.py_result(),
rhs.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound'])))
else:
start_offset = self.start_code() if self.start else '0'
if rhs.type.is_array:
array_length = rhs.type.size
self.generate_slice_guard_code(code, array_length)
else:
array_length = '%s - %s' % (self.stop_code(), start_offset)
code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
code.putln("memcpy(&(%s[%s]), %s, sizeof(%s[0]) * (%s));" % (
self.base.result(), start_offset,
rhs.result(),
self.base.result(), array_length
))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def generate_deletion_code(self, code, ignore_nonexisting=False):
if not self.base.type.is_pyobject:
error(self.pos,
"Deleting slices is only supported for Python types, not '%s'." % self.type)
return
self.generate_subexpr_evaluation_code(code)
code.globalstate.use_utility_code(self.set_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.put_error_if_neg(self.pos,
"__Pyx_PyObject_DelSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
self.base.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound'])))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def get_slice_config(self):
has_c_start, c_start, py_start = False, '0', 'NULL'
if self.start:
has_c_start = not self.start.type.is_pyobject
if has_c_start:
c_start = self.start.result()
else:
py_start = '&%s' % self.start.py_result()
has_c_stop, c_stop, py_stop = False, '0', 'NULL'
if self.stop:
has_c_stop = not self.stop.type.is_pyobject
if has_c_stop:
c_stop = self.stop.result()
else:
py_stop = '&%s' % self.stop.py_result()
py_slice = self.slice and '&%s' % self.slice.py_result() or 'NULL'
return (has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice)
def generate_slice_guard_code(self, code, target_size):
if not self.base.type.is_array:
return
slice_size = self.base.type.size
try:
total_length = slice_size = int(slice_size)
except ValueError:
total_length = None
start = stop = None
if self.stop:
stop = self.stop.result()
try:
stop = int(stop)
if stop < 0:
if total_length is None:
slice_size = '%s + %d' % (slice_size, stop)
else:
slice_size += stop
else:
slice_size = stop
stop = None
except ValueError:
pass
if self.start:
start = self.start.result()
try:
start = int(start)
if start < 0:
if total_length is None:
start = '%s + %d' % (self.base.type.size, start)
else:
start += total_length
if isinstance(slice_size, _py_int_types):
slice_size -= start
else:
slice_size = '%s - (%s)' % (slice_size, start)
start = None
except ValueError:
pass
runtime_check = None
compile_time_check = False
try:
int_target_size = int(target_size)
except ValueError:
int_target_size = None
else:
compile_time_check = isinstance(slice_size, _py_int_types)
if compile_time_check and slice_size < 0:
if int_target_size > 0:
error(self.pos, "Assignment to empty slice.")
elif compile_time_check and start is None and stop is None:
# we know the exact slice length
if int_target_size != slice_size:
error(self.pos, "Assignment to slice of wrong length, expected %s, got %s" % (
slice_size, target_size))
elif start is not None:
if stop is None:
stop = slice_size
runtime_check = "(%s)-(%s)" % (stop, start)
elif stop is not None:
runtime_check = stop
else:
runtime_check = slice_size
if runtime_check:
code.putln("if (unlikely((%s) != (%s))) {" % (runtime_check, target_size))
code.putln(
'PyErr_Format(PyExc_ValueError, "Assignment to slice of wrong length,'
' expected %%" CYTHON_FORMAT_SSIZE_T "d, got %%" CYTHON_FORMAT_SSIZE_T "d",'
' (Py_ssize_t)(%s), (Py_ssize_t)(%s));' % (
target_size, runtime_check))
code.putln(code.error_goto(self.pos))
code.putln("}")
def start_code(self):
if self.start:
return self.start.result()
else:
return "0"
def stop_code(self):
if self.stop:
return self.stop.result()
elif self.base.type.is_array:
return self.base.type.size
else:
return "PY_SSIZE_T_MAX"
def calculate_result_code(self):
# self.result() is not used, but this method must exist
return "<unused>"
class SliceNode(ExprNode):
# start:stop:step in subscript list
#
# start ExprNode
# stop ExprNode
# step ExprNode
subexprs = ['start', 'stop', 'step']
is_slice = True
type = slice_type
is_temp = 1
def calculate_constant_result(self):
self.constant_result = slice(
self.start.constant_result,
self.stop.constant_result,
self.step.constant_result)
def compile_time_value(self, denv):
start = self.start.compile_time_value(denv)
stop = self.stop.compile_time_value(denv)
step = self.step.compile_time_value(denv)
try:
return slice(start, stop, step)
except Exception as e:
self.compile_time_value_error(e)
def may_be_none(self):
return False
def analyse_types(self, env):
start = self.start.analyse_types(env)
stop = self.stop.analyse_types(env)
step = self.step.analyse_types(env)
self.start = start.coerce_to_pyobject(env)
self.stop = stop.coerce_to_pyobject(env)
self.step = step.coerce_to_pyobject(env)
if self.start.is_literal and self.stop.is_literal and self.step.is_literal:
self.is_literal = True
self.is_temp = False
return self
gil_message = "Constructing Python slice object"
def calculate_result_code(self):
return self.result_code
def generate_result_code(self, code):
if self.is_literal:
self.result_code = code.get_py_const(py_object_type, 'slice', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
code.putln(
"%s = PySlice_New(%s, %s, %s); %s" % (
self.result(),
self.start.py_result(),
self.stop.py_result(),
self.step.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
if self.is_literal:
code.put_giveref(self.py_result())
class CallNode(ExprNode):
# allow overriding the default 'may_be_none' behaviour
may_return_none = None
def infer_type(self, env):
function = self.function
func_type = function.infer_type(env)
if isinstance(function, NewExprNode):
# note: needs call to infer_type() above
return PyrexTypes.CPtrType(function.class_type)
if func_type is py_object_type:
# function might have lied for safety => try to find better type
entry = getattr(function, 'entry', None)
if entry is not None:
func_type = entry.type or func_type
if func_type.is_ptr:
func_type = func_type.base_type
if func_type.is_cfunction:
return func_type.return_type
elif func_type is type_type:
if function.is_name and function.entry and function.entry.type:
result_type = function.entry.type
if result_type.is_extension_type:
return result_type
elif result_type.is_builtin_type:
if function.entry.name == 'float':
return PyrexTypes.c_double_type
elif function.entry.name in Builtin.types_that_construct_their_instance:
return result_type
return py_object_type
def type_dependencies(self, env):
# TODO: Update when Danilo's C++ code merged in to handle the
# the case of function overloading.
return self.function.type_dependencies(env)
def is_simple(self):
# C function calls could be considered simple, but they may
# have side-effects that may hit when multiple operations must
# be effected in order, e.g. when constructing the argument
# sequence for a function call or comparing values.
return False
def may_be_none(self):
if self.may_return_none is not None:
return self.may_return_none
func_type = self.function.type
if func_type is type_type and self.function.is_name:
entry = self.function.entry
if entry.type.is_extension_type:
return False
if (entry.type.is_builtin_type and
entry.name in Builtin.types_that_construct_their_instance):
return False
return ExprNode.may_be_none(self)
def analyse_as_type_constructor(self, env):
type = self.function.analyse_as_type(env)
if type and type.is_struct_or_union:
args, kwds = self.explicit_args_kwds()
items = []
for arg, member in zip(args, type.scope.var_entries):
items.append(DictItemNode(pos=arg.pos, key=StringNode(pos=arg.pos, value=member.name), value=arg))
if kwds:
items += kwds.key_value_pairs
self.key_value_pairs = items
self.__class__ = DictNode
self.analyse_types(env) # FIXME
self.coerce_to(type, env)
return True
elif type and type.is_cpp_class:
self.args = [ arg.analyse_types(env) for arg in self.args ]
constructor = type.scope.lookup("<init>")
self.function = RawCNameExprNode(self.function.pos, constructor.type)
self.function.entry = constructor
self.function.set_cname(type.empty_declaration_code())
self.analyse_c_function_call(env)
self.type = type
return True
def is_lvalue(self):
return self.type.is_reference
def nogil_check(self, env):
func_type = self.function_type()
if func_type.is_pyobject:
self.gil_error()
elif not getattr(func_type, 'nogil', False):
self.gil_error()
gil_message = "Calling gil-requiring function"
class SimpleCallNode(CallNode):
# Function call without keyword, * or ** args.
#
# function ExprNode
# args [ExprNode]
# arg_tuple ExprNode or None used internally
# self ExprNode or None used internally
# coerced_self ExprNode or None used internally
# wrapper_call bool used internally
# has_optional_args bool used internally
# nogil bool used internally
subexprs = ['self', 'coerced_self', 'function', 'args', 'arg_tuple']
self = None
coerced_self = None
arg_tuple = None
wrapper_call = False
has_optional_args = False
nogil = False
analysed = False
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
args = [arg.compile_time_value(denv) for arg in self.args]
try:
return function(*args)
except Exception as e:
self.compile_time_value_error(e)
def analyse_as_type(self, env):
attr = self.function.as_cython_attribute()
if attr == 'pointer':
if len(self.args) != 1:
error(self.args.pos, "only one type allowed.")
else:
type = self.args[0].analyse_as_type(env)
if not type:
error(self.args[0].pos, "Unknown type")
else:
return PyrexTypes.CPtrType(type)
def explicit_args_kwds(self):
return self.args, None
def analyse_types(self, env):
if self.analyse_as_type_constructor(env):
return self
if self.analysed:
return self
self.analysed = True
self.function.is_called = 1
self.function = self.function.analyse_types(env)
function = self.function
if function.is_attribute and function.entry and function.entry.is_cmethod:
# Take ownership of the object from which the attribute
# was obtained, because we need to pass it as 'self'.
self.self = function.obj
function.obj = CloneNode(self.self)
func_type = self.function_type()
if func_type.is_pyobject:
self.arg_tuple = TupleNode(self.pos, args = self.args)
self.arg_tuple = self.arg_tuple.analyse_types(env).coerce_to_pyobject(env)
self.args = None
if func_type is Builtin.type_type and function.is_name and \
function.entry and \
function.entry.is_builtin and \
function.entry.name in Builtin.types_that_construct_their_instance:
# calling a builtin type that returns a specific object type
if function.entry.name == 'float':
# the following will come true later on in a transform
self.type = PyrexTypes.c_double_type
self.result_ctype = PyrexTypes.c_double_type
else:
self.type = Builtin.builtin_types[function.entry.name]
self.result_ctype = py_object_type
self.may_return_none = False
elif function.is_name and function.type_entry:
# We are calling an extension type constructor. As
# long as we do not support __new__(), the result type
# is clear
self.type = function.type_entry.type
self.result_ctype = py_object_type
self.may_return_none = False
else:
self.type = py_object_type
self.is_temp = 1
else:
self.args = [ arg.analyse_types(env) for arg in self.args ]
self.analyse_c_function_call(env)
if func_type.exception_check == '+':
self.is_temp = True
return self
def function_type(self):
# Return the type of the function being called, coercing a function
# pointer to a function if necessary. If the function has fused
# arguments, return the specific type.
func_type = self.function.type
if func_type.is_ptr:
func_type = func_type.base_type
return func_type
def analyse_c_function_call(self, env):
func_type = self.function.type
if func_type is error_type:
self.type = error_type
return
if func_type.is_cfunction and func_type.is_static_method:
if self.self and self.self.type.is_extension_type:
# To support this we'd need to pass self to determine whether
# it was overloaded in Python space (possibly via a Cython
# superclass turning a cdef method into a cpdef one).
error(self.pos, "Cannot call a static method on an instance variable.")
args = self.args
elif self.self:
args = [self.self] + self.args
else:
args = self.args
if func_type.is_cpp_class:
overloaded_entry = self.function.type.scope.lookup("operator()")
if overloaded_entry is None:
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
elif hasattr(self.function, 'entry'):
overloaded_entry = self.function.entry
elif self.function.is_subscript and self.function.is_fused_index:
overloaded_entry = self.function.type.entry
else:
overloaded_entry = None
if overloaded_entry:
if self.function.type.is_fused:
functypes = self.function.type.get_all_specialized_function_types()
alternatives = [f.entry for f in functypes]
else:
alternatives = overloaded_entry.all_alternatives()
entry = PyrexTypes.best_match(args, alternatives, self.pos, env)
if not entry:
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
entry.used = True
if not func_type.is_cpp_class:
self.function.entry = entry
self.function.type = entry.type
func_type = self.function_type()
else:
entry = None
func_type = self.function_type()
if not func_type.is_cfunction:
error(self.pos, "Calling non-function type '%s'" % func_type)
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
# Check no. of args
max_nargs = len(func_type.args)
expected_nargs = max_nargs - func_type.optional_arg_count
actual_nargs = len(args)
if func_type.optional_arg_count and expected_nargs != actual_nargs:
self.has_optional_args = 1
self.is_temp = 1
# check 'self' argument
if entry and entry.is_cmethod and func_type.args and not func_type.is_static_method:
formal_arg = func_type.args[0]
arg = args[0]
if formal_arg.not_none:
if self.self:
self.self = self.self.as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error='PyExc_AttributeError',
format_args=[entry.name])
else:
# unbound method
arg = arg.as_none_safe_node(
"descriptor '%s' requires a '%s' object but received a 'NoneType'",
format_args=[entry.name, formal_arg.type.name])
if self.self:
if formal_arg.accept_builtin_subtypes:
arg = CMethodSelfCloneNode(self.self)
else:
arg = CloneNode(self.self)
arg = self.coerced_self = arg.coerce_to(formal_arg.type, env)
elif formal_arg.type.is_builtin_type:
# special case: unbound methods of builtins accept subtypes
arg = arg.coerce_to(formal_arg.type, env)
if arg.type.is_builtin_type and isinstance(arg, PyTypeTestNode):
arg.exact_builtin_type = False
args[0] = arg
# Coerce arguments
some_args_in_temps = False
for i in range(min(max_nargs, actual_nargs)):
formal_arg = func_type.args[i]
formal_type = formal_arg.type
arg = args[i].coerce_to(formal_type, env)
if formal_arg.not_none:
# C methods must do the None checks at *call* time
arg = arg.as_none_safe_node(
"cannot pass None into a C function argument that is declared 'not None'")
if arg.is_temp:
if i > 0:
# first argument in temp doesn't impact subsequent arguments
some_args_in_temps = True
elif arg.type.is_pyobject and not env.nogil:
if i == 0 and self.self is not None:
# a method's cloned "self" argument is ok
pass
elif arg.nonlocally_immutable():
# plain local variables are ok
pass
else:
# we do not safely own the argument's reference,
# but we must make sure it cannot be collected
# before we return from the function, so we create
# an owned temp reference to it
if i > 0: # first argument doesn't matter
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
args[i] = arg
# handle additional varargs parameters
for i in range(max_nargs, actual_nargs):
arg = args[i]
if arg.type.is_pyobject:
if arg.type is str_type:
arg_ctype = PyrexTypes.c_char_ptr_type
else:
arg_ctype = arg.type.default_coerced_ctype()
if arg_ctype is None:
error(self.args[i].pos,
"Python object cannot be passed as a varargs parameter")
else:
args[i] = arg = arg.coerce_to(arg_ctype, env)
if arg.is_temp and i > 0:
some_args_in_temps = True
if some_args_in_temps:
# if some args are temps and others are not, they may get
# constructed in the wrong order (temps first) => make
# sure they are either all temps or all not temps (except
# for the last argument, which is evaluated last in any
# case)
for i in range(actual_nargs-1):
if i == 0 and self.self is not None:
continue # self is ok
arg = args[i]
if arg.nonlocally_immutable():
# locals, C functions, unassignable types are safe.
pass
elif arg.type.is_cpp_class:
# Assignment has side effects, avoid.
pass
elif env.nogil and arg.type.is_pyobject:
# can't copy a Python reference into a temp in nogil
# env (this is safe: a construction would fail in
# nogil anyway)
pass
else:
#self.args[i] = arg.coerce_to_temp(env)
# instead: issue a warning
if i > 0 or i == 1 and self.self is not None: # skip first arg
warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
break
self.args[:] = args
# Calc result type and code fragment
if isinstance(self.function, NewExprNode):
self.type = PyrexTypes.CPtrType(self.function.class_type)
else:
self.type = func_type.return_type
if self.function.is_name or self.function.is_attribute:
if self.function.entry and self.function.entry.utility_code:
self.is_temp = 1 # currently doesn't work for self.calculate_result_code()
if self.type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
elif func_type.exception_value is not None or func_type.exception_check:
self.is_temp = 1
elif self.type.is_memoryviewslice:
self.is_temp = 1
# func_type.exception_check = True
if self.is_temp and self.type.is_reference:
self.type = PyrexTypes.CFakeReferenceType(self.type.ref_base_type)
# Called in 'nogil' context?
self.nogil = env.nogil
if (self.nogil and
func_type.exception_check and
func_type.exception_check != '+'):
env.use_utility_code(pyerr_occurred_withgil_utility_code)
# C++ exception handler
if func_type.exception_check == '+':
if func_type.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
def calculate_result_code(self):
return self.c_call_code()
def c_call_code(self):
func_type = self.function_type()
if self.type is PyrexTypes.error_type or not func_type.is_cfunction:
return "<error>"
formal_args = func_type.args
arg_list_code = []
args = list(zip(formal_args, self.args))
max_nargs = len(func_type.args)
expected_nargs = max_nargs - func_type.optional_arg_count
actual_nargs = len(self.args)
for formal_arg, actual_arg in args[:expected_nargs]:
arg_code = actual_arg.result_as(formal_arg.type)
arg_list_code.append(arg_code)
if func_type.is_overridable:
arg_list_code.append(str(int(self.wrapper_call or self.function.entry.is_unbound_cmethod)))
if func_type.optional_arg_count:
if expected_nargs == actual_nargs:
optional_args = 'NULL'
else:
optional_args = "&%s" % self.opt_arg_struct
arg_list_code.append(optional_args)
for actual_arg in self.args[len(formal_args):]:
arg_list_code.append(actual_arg.result())
result = "%s(%s)" % (self.function.result(), ', '.join(arg_list_code))
return result
def is_c_result_required(self):
func_type = self.function_type()
if not func_type.exception_value or func_type.exception_check == '+':
return False # skip allocation of unused result temp
return True
def generate_result_code(self, code):
func_type = self.function_type()
if self.function.is_name or self.function.is_attribute:
if self.function.entry and self.function.entry.utility_code:
code.globalstate.use_utility_code(self.function.entry.utility_code)
if func_type.is_pyobject:
if func_type is not type_type and not self.arg_tuple.args and self.arg_tuple.is_literal:
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCallNoArg", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_CallNoArg(%s); %s" % (
self.result(),
self.function.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
else:
arg_code = self.arg_tuple.py_result()
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
self.result(),
self.function.py_result(),
arg_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif func_type.is_cfunction:
if self.has_optional_args:
actual_nargs = len(self.args)
expected_nargs = len(func_type.args) - func_type.optional_arg_count
self.opt_arg_struct = code.funcstate.allocate_temp(
func_type.op_arg_struct.base_type, manage_ref=True)
code.putln("%s.%s = %s;" % (
self.opt_arg_struct,
Naming.pyrex_prefix + "n",
len(self.args) - expected_nargs))
args = list(zip(func_type.args, self.args))
for formal_arg, actual_arg in args[expected_nargs:actual_nargs]:
code.putln("%s.%s = %s;" % (
self.opt_arg_struct,
func_type.opt_arg_cname(formal_arg.name),
actual_arg.result_as(formal_arg.type)))
exc_checks = []
if self.type.is_pyobject and self.is_temp:
exc_checks.append("!%s" % self.result())
elif self.type.is_memoryviewslice:
assert self.is_temp
exc_checks.append(self.type.error_condition(self.result()))
else:
exc_val = func_type.exception_value
exc_check = func_type.exception_check
if exc_val is not None:
exc_checks.append("%s == %s" % (self.result(), exc_val))
if exc_check:
if self.nogil:
exc_checks.append("__Pyx_ErrOccurredWithGIL()")
else:
exc_checks.append("PyErr_Occurred()")
if self.is_temp or exc_checks:
rhs = self.c_call_code()
if self.result():
lhs = "%s = " % self.result()
if self.is_temp and self.type.is_pyobject:
#return_type = self.type # func_type.return_type
#print "SimpleCallNode.generate_result_code: casting", rhs, \
# "from", return_type, "to pyobject" ###
rhs = typecast(py_object_type, self.type, rhs)
else:
lhs = ""
if func_type.exception_check == '+':
translate_cpp_exception(code, self.pos, '%s%s;' % (lhs, rhs),
func_type.exception_value, self.nogil)
else:
if exc_checks:
goto_error = code.error_goto_if(" && ".join(exc_checks), self.pos)
else:
goto_error = ""
code.putln("%s%s; %s" % (lhs, rhs, goto_error))
if self.type.is_pyobject and self.result():
code.put_gotref(self.py_result())
if self.has_optional_args:
code.funcstate.release_temp(self.opt_arg_struct)
class PyMethodCallNode(SimpleCallNode):
# Specialised call to a (potential) PyMethodObject with non-constant argument tuple.
# Allows the self argument to be injected directly instead of repacking a tuple for it.
#
# function ExprNode the function/method object to call
# arg_tuple TupleNode the arguments for the args tuple
subexprs = ['function', 'arg_tuple']
is_temp = True
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.function.generate_evaluation_code(code)
assert self.arg_tuple.mult_factor is None
args = self.arg_tuple.args
for arg in args:
arg.generate_evaluation_code(code)
# make sure function is in temp so that we can replace the reference below if it's a method
reuse_function_temp = self.function.is_temp
if reuse_function_temp:
function = self.function.result()
else:
function = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
self.function.make_owned_reference(code)
code.put("%s = %s; " % (function, self.function.py_result()))
self.function.generate_disposal_code(code)
self.function.free_temps(code)
self_arg = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln("%s = NULL;" % self_arg)
arg_offset_cname = None
if len(args) > 1:
arg_offset_cname = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
code.putln("%s = 0;" % arg_offset_cname)
def attribute_is_likely_method(attr):
obj = attr.obj
if obj.is_name and obj.entry.is_pyglobal:
return False # more likely to be a function
return True
if self.function.is_attribute:
likely_method = 'likely' if attribute_is_likely_method(self.function) else 'unlikely'
elif self.function.is_name and self.function.cf_state:
# not an attribute itself, but might have been assigned from one (e.g. bound method)
for assignment in self.function.cf_state:
value = assignment.rhs
if value and value.is_attribute and value.obj.type.is_pyobject:
if attribute_is_likely_method(value):
likely_method = 'likely'
break
else:
likely_method = 'unlikely'
else:
likely_method = 'unlikely'
code.putln("if (CYTHON_COMPILING_IN_CPYTHON && %s(PyMethod_Check(%s))) {" % (likely_method, function))
code.putln("%s = PyMethod_GET_SELF(%s);" % (self_arg, function))
# the following is always true in Py3 (kept only for safety),
# but is false for unbound methods in Py2
code.putln("if (likely(%s)) {" % self_arg)
code.putln("PyObject* function = PyMethod_GET_FUNCTION(%s);" % function)
code.put_incref(self_arg, py_object_type)
code.put_incref("function", py_object_type)
# free method object as early to possible to enable reuse from CPython's freelist
code.put_decref_set(function, "function")
if len(args) > 1:
code.putln("%s = 1;" % arg_offset_cname)
code.putln("}")
code.putln("}")
if not args:
# fastest special case: try to avoid tuple creation
code.putln("if (%s) {" % self_arg)
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectCallOneArg", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_CallOneArg(%s, %s); %s" % (
self.result(),
function, self_arg,
code.error_goto_if_null(self.result(), self.pos)))
code.put_decref_clear(self_arg, py_object_type)
code.funcstate.release_temp(self_arg)
code.putln("} else {")
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectCallNoArg", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_CallNoArg(%s); %s" % (
self.result(),
function,
code.error_goto_if_null(self.result(), self.pos)))
code.putln("}")
code.put_gotref(self.py_result())
else:
if len(args) == 1:
code.putln("if (!%s) {" % self_arg)
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectCallOneArg", "ObjectHandling.c"))
arg = args[0]
code.putln(
"%s = __Pyx_PyObject_CallOneArg(%s, %s); %s" % (
self.result(),
function, arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
arg.generate_disposal_code(code)
code.put_gotref(self.py_result())
code.putln("} else {")
arg_offset = 1
else:
arg_offset = arg_offset_cname
args_tuple = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln("%s = PyTuple_New(%d+%s); %s" % (
args_tuple, len(args), arg_offset,
code.error_goto_if_null(args_tuple, self.pos)))
code.put_gotref(args_tuple)
if len(args) > 1:
code.putln("if (%s) {" % self_arg)
code.putln("__Pyx_GIVEREF(%s); PyTuple_SET_ITEM(%s, 0, %s); %s = NULL;" % (
self_arg, args_tuple, self_arg, self_arg)) # stealing owned ref in this case
code.funcstate.release_temp(self_arg)
if len(args) > 1:
code.putln("}")
for i, arg in enumerate(args):
arg.make_owned_reference(code)
code.put_giveref(arg.py_result())
code.putln("PyTuple_SET_ITEM(%s, %d+%s, %s);" % (
args_tuple, i, arg_offset, arg.py_result()))
if len(args) > 1:
code.funcstate.release_temp(arg_offset_cname)
for arg in args:
arg.generate_post_assignment_code(code)
arg.free_temps(code)
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
self.result(),
function, args_tuple,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.put_decref_clear(args_tuple, py_object_type)
code.funcstate.release_temp(args_tuple)
if len(args) == 1:
code.putln("}")
if reuse_function_temp:
self.function.generate_disposal_code(code)
self.function.free_temps(code)
else:
code.put_decref_clear(function, py_object_type)
code.funcstate.release_temp(function)
class InlinedDefNodeCallNode(CallNode):
# Inline call to defnode
#
# function PyCFunctionNode
# function_name NameNode
# args [ExprNode]
subexprs = ['args', 'function_name']
is_temp = 1
type = py_object_type
function = None
function_name = None
def can_be_inlined(self):
func_type= self.function.def_node
if func_type.star_arg or func_type.starstar_arg:
return False
if len(func_type.args) != len(self.args):
return False
if func_type.num_kwonly_args:
return False # actually wrong number of arguments
return True
def analyse_types(self, env):
self.function_name = self.function_name.analyse_types(env)
self.args = [ arg.analyse_types(env) for arg in self.args ]
func_type = self.function.def_node
actual_nargs = len(self.args)
# Coerce arguments
some_args_in_temps = False
for i in range(actual_nargs):
formal_type = func_type.args[i].type
arg = self.args[i].coerce_to(formal_type, env)
if arg.is_temp:
if i > 0:
# first argument in temp doesn't impact subsequent arguments
some_args_in_temps = True
elif arg.type.is_pyobject and not env.nogil:
if arg.nonlocally_immutable():
# plain local variables are ok
pass
else:
# we do not safely own the argument's reference,
# but we must make sure it cannot be collected
# before we return from the function, so we create
# an owned temp reference to it
if i > 0: # first argument doesn't matter
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
self.args[i] = arg
if some_args_in_temps:
# if some args are temps and others are not, they may get
# constructed in the wrong order (temps first) => make
# sure they are either all temps or all not temps (except
# for the last argument, which is evaluated last in any
# case)
for i in range(actual_nargs-1):
arg = self.args[i]
if arg.nonlocally_immutable():
# locals, C functions, unassignable types are safe.
pass
elif arg.type.is_cpp_class:
# Assignment has side effects, avoid.
pass
elif env.nogil and arg.type.is_pyobject:
# can't copy a Python reference into a temp in nogil
# env (this is safe: a construction would fail in
# nogil anyway)
pass
else:
#self.args[i] = arg.coerce_to_temp(env)
# instead: issue a warning
if i > 0:
warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
break
return self
def generate_result_code(self, code):
arg_code = [self.function_name.py_result()]
func_type = self.function.def_node
for arg, proto_arg in zip(self.args, func_type.args):
if arg.type.is_pyobject:
arg_code.append(arg.result_as(proto_arg.type))
else:
arg_code.append(arg.result())
arg_code = ', '.join(arg_code)
code.putln(
"%s = %s(%s); %s" % (
self.result(),
self.function.def_node.entry.pyfunc_cname,
arg_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PythonCapiFunctionNode(ExprNode):
subexprs = []
def __init__(self, pos, py_name, cname, func_type, utility_code = None):
ExprNode.__init__(self, pos, name=py_name, cname=cname,
type=func_type, utility_code=utility_code)
def analyse_types(self, env):
return self
def generate_result_code(self, code):
if self.utility_code:
code.globalstate.use_utility_code(self.utility_code)
def calculate_result_code(self):
return self.cname
class PythonCapiCallNode(SimpleCallNode):
# Python C-API Function call (only created in transforms)
# By default, we assume that the call never returns None, as this
# is true for most C-API functions in CPython. If this does not
# apply to a call, set the following to True (or None to inherit
# the default behaviour).
may_return_none = False
def __init__(self, pos, function_name, func_type,
utility_code = None, py_name=None, **kwargs):
self.type = func_type.return_type
self.result_ctype = self.type
self.function = PythonCapiFunctionNode(
pos, py_name, function_name, func_type,
utility_code = utility_code)
# call this last so that we can override the constructed
# attributes above with explicit keyword arguments if required
SimpleCallNode.__init__(self, pos, **kwargs)
class GeneralCallNode(CallNode):
# General Python function call, including keyword,
# * and ** arguments.
#
# function ExprNode
# positional_args ExprNode Tuple of positional arguments
# keyword_args ExprNode or None Dict of keyword arguments
type = py_object_type
subexprs = ['function', 'positional_args', 'keyword_args']
nogil_check = Node.gil_error
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
positional_args = self.positional_args.compile_time_value(denv)
keyword_args = self.keyword_args.compile_time_value(denv)
try:
return function(*positional_args, **keyword_args)
except Exception as e:
self.compile_time_value_error(e)
def explicit_args_kwds(self):
if (self.keyword_args and not self.keyword_args.is_dict_literal or
not self.positional_args.is_sequence_constructor):
raise CompileError(self.pos,
'Compile-time keyword arguments must be explicit.')
return self.positional_args.args, self.keyword_args
def analyse_types(self, env):
if self.analyse_as_type_constructor(env):
return self
self.function = self.function.analyse_types(env)
if not self.function.type.is_pyobject:
if self.function.type.is_error:
self.type = error_type
return self
if hasattr(self.function, 'entry'):
node = self.map_to_simple_call_node()
if node is not None and node is not self:
return node.analyse_types(env)
elif self.function.entry.as_variable:
self.function = self.function.coerce_to_pyobject(env)
elif node is self:
error(self.pos,
"Non-trivial keyword arguments and starred "
"arguments not allowed in cdef functions.")
else:
# error was already reported
pass
else:
self.function = self.function.coerce_to_pyobject(env)
if self.keyword_args:
self.keyword_args = self.keyword_args.analyse_types(env)
self.positional_args = self.positional_args.analyse_types(env)
self.positional_args = \
self.positional_args.coerce_to_pyobject(env)
function = self.function
if function.is_name and function.type_entry:
# We are calling an extension type constructor. As long
# as we do not support __new__(), the result type is clear
self.type = function.type_entry.type
self.result_ctype = py_object_type
self.may_return_none = False
else:
self.type = py_object_type
self.is_temp = 1
return self
def map_to_simple_call_node(self):
"""
Tries to map keyword arguments to declared positional arguments.
Returns self to try a Python call, None to report an error
or a SimpleCallNode if the mapping succeeds.
"""
if not isinstance(self.positional_args, TupleNode):
# has starred argument
return self
if not self.keyword_args.is_dict_literal:
# keywords come from arbitrary expression => nothing to do here
return self
function = self.function
entry = getattr(function, 'entry', None)
if not entry:
return self
function_type = entry.type
if function_type.is_ptr:
function_type = function_type.base_type
if not function_type.is_cfunction:
return self
pos_args = self.positional_args.args
kwargs = self.keyword_args
declared_args = function_type.args
if entry.is_cmethod:
declared_args = declared_args[1:] # skip 'self'
if len(pos_args) > len(declared_args):
error(self.pos, "function call got too many positional arguments, "
"expected %d, got %s" % (len(declared_args),
len(pos_args)))
return None
matched_args = set([ arg.name for arg in declared_args[:len(pos_args)]
if arg.name ])
unmatched_args = declared_args[len(pos_args):]
matched_kwargs_count = 0
args = list(pos_args)
# check for duplicate keywords
seen = set(matched_args)
has_errors = False
for arg in kwargs.key_value_pairs:
name = arg.key.value
if name in seen:
error(arg.pos, "argument '%s' passed twice" % name)
has_errors = True
# continue to report more errors if there are any
seen.add(name)
# match keywords that are passed in order
for decl_arg, arg in zip(unmatched_args, kwargs.key_value_pairs):
name = arg.key.value
if decl_arg.name == name:
matched_args.add(name)
matched_kwargs_count += 1
args.append(arg.value)
else:
break
# match keyword arguments that are passed out-of-order, but keep
# the evaluation of non-simple arguments in order by moving them
# into temps
from .UtilNodes import EvalWithTempExprNode, LetRefNode
temps = []
if len(kwargs.key_value_pairs) > matched_kwargs_count:
unmatched_args = declared_args[len(args):]
keywords = dict([ (arg.key.value, (i+len(pos_args), arg))
for i, arg in enumerate(kwargs.key_value_pairs) ])
first_missing_keyword = None
for decl_arg in unmatched_args:
name = decl_arg.name
if name not in keywords:
# missing keyword argument => either done or error
if not first_missing_keyword:
first_missing_keyword = name
continue
elif first_missing_keyword:
if entry.as_variable:
# we might be able to convert the function to a Python
# object, which then allows full calling semantics
# with default values in gaps - currently, we only
# support optional arguments at the end
return self
# wasn't the last keyword => gaps are not supported
error(self.pos, "C function call is missing "
"argument '%s'" % first_missing_keyword)
return None
pos, arg = keywords[name]
matched_args.add(name)
matched_kwargs_count += 1
if arg.value.is_simple():
args.append(arg.value)
else:
temp = LetRefNode(arg.value)
assert temp.is_simple()
args.append(temp)
temps.append((pos, temp))
if temps:
# may have to move preceding non-simple args into temps
final_args = []
new_temps = []
first_temp_arg = temps[0][-1]
for arg_value in args:
if arg_value is first_temp_arg:
break # done
if arg_value.is_simple():
final_args.append(arg_value)
else:
temp = LetRefNode(arg_value)
new_temps.append(temp)
final_args.append(temp)
if new_temps:
args = final_args
temps = new_temps + [ arg for i,arg in sorted(temps) ]
# check for unexpected keywords
for arg in kwargs.key_value_pairs:
name = arg.key.value
if name not in matched_args:
has_errors = True
error(arg.pos,
"C function got unexpected keyword argument '%s'" %
name)
if has_errors:
# error was reported already
return None
# all keywords mapped to positional arguments
# if we are missing arguments, SimpleCallNode will figure it out
node = SimpleCallNode(self.pos, function=function, args=args)
for temp in temps[::-1]:
node = EvalWithTempExprNode(temp, node)
return node
def generate_result_code(self, code):
if self.type.is_error: return
if self.keyword_args:
kwargs = self.keyword_args.py_result()
else:
kwargs = 'NULL'
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, %s); %s" % (
self.result(),
self.function.py_result(),
self.positional_args.py_result(),
kwargs,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class AsTupleNode(ExprNode):
# Convert argument to tuple. Used for normalising
# the * argument of a function call.
#
# arg ExprNode
subexprs = ['arg']
def calculate_constant_result(self):
self.constant_result = tuple(self.arg.constant_result)
def compile_time_value(self, denv):
arg = self.arg.compile_time_value(denv)
try:
return tuple(arg)
except Exception as e:
self.compile_time_value_error(e)
def analyse_types(self, env):
self.arg = self.arg.analyse_types(env).coerce_to_pyobject(env)
if self.arg.type is tuple_type:
return self.arg.as_none_safe_node("'NoneType' object is not iterable")
self.type = tuple_type
self.is_temp = 1
return self
def may_be_none(self):
return False
nogil_check = Node.gil_error
gil_message = "Constructing Python tuple"
def generate_result_code(self, code):
code.putln(
"%s = PySequence_Tuple(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class MergedDictNode(ExprNode):
# Helper class for keyword arguments and other merged dicts.
#
# keyword_args [DictNode or other ExprNode]
subexprs = ['keyword_args']
is_temp = 1
type = dict_type
reject_duplicates = True
def calculate_constant_result(self):
result = {}
reject_duplicates = self.reject_duplicates
for item in self.keyword_args:
if item.is_dict_literal:
# process items in order
items = ((key.constant_result, value.constant_result)
for key, value in item.key_value_pairs)
else:
items = item.constant_result.iteritems()
for key, value in items:
if reject_duplicates and key in result:
raise ValueError("duplicate keyword argument found: %s" % key)
result[key] = value
self.constant_result = result
def compile_time_value(self, denv):
result = {}
reject_duplicates = self.reject_duplicates
for item in self.keyword_args:
if item.is_dict_literal:
# process items in order
items = [(key.compile_time_value(denv), value.compile_time_value(denv))
for key, value in item.key_value_pairs]
else:
items = item.compile_time_value(denv).iteritems()
try:
for key, value in items:
if reject_duplicates and key in result:
raise ValueError("duplicate keyword argument found: %s" % key)
result[key] = value
except Exception as e:
self.compile_time_value_error(e)
return result
def type_dependencies(self, env):
return ()
def infer_type(self, env):
return dict_type
def analyse_types(self, env):
args = [
arg.analyse_types(env).coerce_to_pyobject(env).as_none_safe_node(
# FIXME: CPython's error message starts with the runtime function name
'argument after ** must be a mapping, not NoneType')
for arg in self.keyword_args
]
if len(args) == 1 and args[0].type is dict_type:
# strip this intermediate node and use the bare dict
arg = args[0]
if arg.is_name and arg.entry.is_arg and len(arg.entry.cf_assignments) == 1:
# passing **kwargs through to function call => allow NULL
arg.allow_null = True
return arg
self.keyword_args = args
return self
def may_be_none(self):
return False
gil_message = "Constructing Python dict"
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
args = iter(self.keyword_args)
item = next(args)
item.generate_evaluation_code(code)
if item.type is not dict_type:
# CPython supports calling functions with non-dicts, so do we
code.putln('if (likely(PyDict_CheckExact(%s))) {' %
item.py_result())
if item.is_dict_literal:
item.make_owned_reference(code)
code.putln("%s = %s;" % (self.result(), item.py_result()))
item.generate_post_assignment_code(code)
else:
code.putln("%s = PyDict_Copy(%s); %s" % (
self.result(),
item.py_result(),
code.error_goto_if_null(self.result(), item.pos)))
code.put_gotref(self.result())
item.generate_disposal_code(code)
if item.type is not dict_type:
code.putln('} else {')
code.putln("%s = PyObject_CallFunctionObjArgs((PyObject*)&PyDict_Type, %s, NULL); %s" % (
self.result(),
item.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
item.generate_disposal_code(code)
code.putln('}')
item.free_temps(code)
helpers = set()
for item in args:
if item.is_dict_literal:
# inline update instead of creating an intermediate dict
for arg in item.key_value_pairs:
arg.generate_evaluation_code(code)
if self.reject_duplicates:
code.putln("if (unlikely(PyDict_Contains(%s, %s))) {" % (
self.result(),
arg.key.py_result()))
helpers.add("RaiseDoubleKeywords")
# FIXME: find out function name at runtime!
code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % (
arg.key.py_result(),
code.error_goto(self.pos)))
code.putln("}")
code.put_error_if_neg(arg.key.pos, "PyDict_SetItem(%s, %s, %s)" % (
self.result(),
arg.key.py_result(),
arg.value.py_result()))
arg.generate_disposal_code(code)
arg.free_temps(code)
else:
item.generate_evaluation_code(code)
if self.reject_duplicates:
# merge mapping into kwdict one by one as we need to check for duplicates
helpers.add("MergeKeywords")
code.put_error_if_neg(item.pos, "__Pyx_MergeKeywords(%s, %s)" % (
self.result(), item.py_result()))
else:
# simple case, just add all entries
helpers.add("RaiseMappingExpected")
code.putln("if (unlikely(PyDict_Update(%s, %s) < 0)) {" % (
self.result(), item.py_result()))
code.putln("if (PyErr_ExceptionMatches(PyExc_AttributeError)) "
"__Pyx_RaiseMappingExpectedError(%s);" % item.py_result())
code.putln(code.error_goto(item.pos))
code.putln("}")
item.generate_disposal_code(code)
item.free_temps(code)
for helper in sorted(helpers):
code.globalstate.use_utility_code(UtilityCode.load_cached(helper, "FunctionArguments.c"))
def annotate(self, code):
for item in self.keyword_args:
item.annotate(code)
class AttributeNode(ExprNode):
# obj.attribute
#
# obj ExprNode
# attribute string
# needs_none_check boolean Used if obj is an extension type.
# If set to True, it is known that the type is not None.
#
# Used internally:
#
# is_py_attr boolean Is a Python getattr operation
# member string C name of struct member
# is_called boolean Function call is being done on result
# entry Entry Symbol table entry of attribute
is_attribute = 1
subexprs = ['obj']
type = PyrexTypes.error_type
entry = None
is_called = 0
needs_none_check = True
is_memslice_transpose = False
is_special_lookup = False
def as_cython_attribute(self):
if (isinstance(self.obj, NameNode) and
self.obj.is_cython_module and not
self.attribute == u"parallel"):
return self.attribute
cy = self.obj.as_cython_attribute()
if cy:
return "%s.%s" % (cy, self.attribute)
return None
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a cpdef function
# we can create the corresponding attribute
if dst_type is py_object_type:
entry = self.entry
if entry and entry.is_cfunction and entry.as_variable:
# must be a cpdef function
self.is_temp = 1
self.entry = entry.as_variable
self.analyse_as_python_attribute(env)
return self
return ExprNode.coerce_to(self, dst_type, env)
def calculate_constant_result(self):
attr = self.attribute
if attr.startswith("__") and attr.endswith("__"):
return
self.constant_result = getattr(self.obj.constant_result, attr)
def compile_time_value(self, denv):
attr = self.attribute
if attr.startswith("__") and attr.endswith("__"):
error(self.pos,
"Invalid attribute name '%s' in compile-time expression" % attr)
return None
obj = self.obj.compile_time_value(denv)
try:
return getattr(obj, attr)
except Exception as e:
self.compile_time_value_error(e)
def type_dependencies(self, env):
return self.obj.type_dependencies(env)
def infer_type(self, env):
# FIXME: this is way too redundant with analyse_types()
node = self.analyse_as_cimported_attribute_node(env, target=False)
if node is not None:
return node.entry.type
node = self.analyse_as_type_attribute(env)
if node is not None:
return node.entry.type
obj_type = self.obj.infer_type(env)
self.analyse_attribute(env, obj_type=obj_type)
if obj_type.is_builtin_type and self.type.is_cfunction:
# special case: C-API replacements for C methods of
# builtin types cannot be inferred as C functions as
# that would prevent their use as bound methods
return py_object_type
return self.type
def analyse_target_declaration(self, env):
pass
def analyse_target_types(self, env):
node = self.analyse_types(env, target = 1)
if node.type.is_const:
error(self.pos, "Assignment to const attribute '%s'" % self.attribute)
if not node.is_lvalue():
error(self.pos, "Assignment to non-lvalue of type '%s'" % self.type)
return node
def analyse_types(self, env, target = 0):
self.initialized_check = env.directives['initializedcheck']
node = self.analyse_as_cimported_attribute_node(env, target)
if node is None and not target:
node = self.analyse_as_type_attribute(env)
if node is None:
node = self.analyse_as_ordinary_attribute_node(env, target)
assert node is not None
if node.entry:
node.entry.used = True
if node.is_attribute:
node.wrap_obj_in_nonecheck(env)
return node
def analyse_as_cimported_attribute_node(self, env, target):
# Try to interpret this as a reference to an imported
# C const, type, var or function. If successful, mutates
# this node into a NameNode and returns 1, otherwise
# returns 0.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and (
entry.is_cglobal or entry.is_cfunction
or entry.is_type or entry.is_const):
return self.as_name_node(env, entry, target)
return None
def analyse_as_type_attribute(self, env):
# Try to interpret this as a reference to an unbound
# C method of an extension type or builtin type. If successful,
# creates a corresponding NameNode and returns it, otherwise
# returns None.
if self.obj.is_string_literal:
return
type = self.obj.analyse_as_type(env)
if type:
if type.is_extension_type or type.is_builtin_type or type.is_cpp_class:
entry = type.scope.lookup_here(self.attribute)
if entry and (entry.is_cmethod or type.is_cpp_class and entry.type.is_cfunction):
if type.is_builtin_type:
if not self.is_called:
# must handle this as Python object
return None
ubcm_entry = entry
else:
# Create a temporary entry describing the C method
# as an ordinary function.
if entry.func_cname and not hasattr(entry.type, 'op_arg_struct'):
cname = entry.func_cname
if entry.type.is_static_method:
ctype = entry.type
elif type.is_cpp_class:
error(self.pos, "%s not a static member of %s" % (entry.name, type))
ctype = PyrexTypes.error_type
else:
# Fix self type.
ctype = copy.copy(entry.type)
ctype.args = ctype.args[:]
ctype.args[0] = PyrexTypes.CFuncTypeArg('self', type, 'self', None)
else:
cname = "%s->%s" % (type.vtabptr_cname, entry.cname)
ctype = entry.type
ubcm_entry = Symtab.Entry(entry.name, cname, ctype)
ubcm_entry.is_cfunction = 1
ubcm_entry.func_cname = entry.func_cname
ubcm_entry.is_unbound_cmethod = 1
return self.as_name_node(env, ubcm_entry, target=False)
elif type.is_enum:
if self.attribute in type.values:
return self.as_name_node(env, env.lookup(self.attribute), target=False)
else:
error(self.pos, "%s not a known value of %s" % (self.attribute, type))
return None
def analyse_as_type(self, env):
module_scope = self.obj.analyse_as_module(env)
if module_scope:
return module_scope.lookup_type(self.attribute)
if not self.obj.is_string_literal:
base_type = self.obj.analyse_as_type(env)
if base_type and hasattr(base_type, 'scope') and base_type.scope is not None:
return base_type.scope.lookup_type(self.attribute)
return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type
# in a cimported module. Returns the extension type, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.is_type:
if entry.type.is_extension_type or entry.type.is_builtin_type:
return entry.type
return None
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module
# in another cimported module. Returns the module scope, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.as_module:
return entry.as_module
return None
def as_name_node(self, env, entry, target):
# Create a corresponding NameNode from this node and complete the
# analyse_types phase.
node = NameNode.from_node(self, name=self.attribute, entry=entry)
if target:
node = node.analyse_target_types(env)
else:
node = node.analyse_rvalue_entry(env)
node.entry.used = 1
return node
def analyse_as_ordinary_attribute_node(self, env, target):
self.obj = self.obj.analyse_types(env)
self.analyse_attribute(env)
if self.entry and self.entry.is_cmethod and not self.is_called:
# error(self.pos, "C method can only be called")
pass
## Reference to C array turns into pointer to first element.
#while self.type.is_array:
# self.type = self.type.element_ptr_type()
if self.is_py_attr:
if not target:
self.is_temp = 1
self.result_ctype = py_object_type
elif target and self.obj.type.is_builtin_type:
error(self.pos, "Assignment to an immutable object field")
#elif self.type.is_memoryviewslice and not target:
# self.is_temp = True
return self
def analyse_attribute(self, env, obj_type = None):
# Look up attribute and set self.type and self.member.
immutable_obj = obj_type is not None # used during type inference
self.is_py_attr = 0
self.member = self.attribute
if obj_type is None:
if self.obj.type.is_string or self.obj.type.is_pyunicode_ptr:
self.obj = self.obj.coerce_to_pyobject(env)
obj_type = self.obj.type
else:
if obj_type.is_string or obj_type.is_pyunicode_ptr:
obj_type = py_object_type
if obj_type.is_ptr or obj_type.is_array:
obj_type = obj_type.base_type
self.op = "->"
elif obj_type.is_extension_type or obj_type.is_builtin_type:
self.op = "->"
elif obj_type.is_reference and obj_type.is_fake_reference:
self.op = "->"
else:
self.op = "."
if obj_type.has_attributes:
if obj_type.attributes_known():
if (obj_type.is_memoryviewslice and not
obj_type.scope.lookup_here(self.attribute)):
if self.attribute == 'T':
self.is_memslice_transpose = True
self.is_temp = True
self.use_managed_ref = True
self.type = self.obj.type.transpose(self.pos)
return
else:
obj_type.declare_attribute(self.attribute, env, self.pos)
entry = obj_type.scope.lookup_here(self.attribute)
if entry and entry.is_member:
entry = None
else:
error(self.pos,
"Cannot select attribute of incomplete type '%s'"
% obj_type)
self.type = PyrexTypes.error_type
return
self.entry = entry
if entry:
if obj_type.is_extension_type and entry.name == "__weakref__":
error(self.pos, "Illegal use of special attribute __weakref__")
# def methods need the normal attribute lookup
# because they do not have struct entries
# fused function go through assignment synthesis
# (foo = pycfunction(foo_func_obj)) and need to go through
# regular Python lookup as well
if (entry.is_variable and not entry.fused_cfunction) or entry.is_cmethod:
self.type = entry.type
self.member = entry.cname
return
else:
# If it's not a variable or C method, it must be a Python
# method of an extension type, so we treat it like a Python
# attribute.
pass
# If we get here, the base object is not a struct/union/extension
# type, or it is an extension type and the attribute is either not
# declared or is declared as a Python method. Treat it as a Python
# attribute reference.
self.analyse_as_python_attribute(env, obj_type, immutable_obj)
def analyse_as_python_attribute(self, env, obj_type=None, immutable_obj=False):
if obj_type is None:
obj_type = self.obj.type
# mangle private '__*' Python attributes used inside of a class
self.attribute = env.mangle_class_private_name(self.attribute)
self.member = self.attribute
self.type = py_object_type
self.is_py_attr = 1
if not obj_type.is_pyobject and not obj_type.is_error:
if obj_type.can_coerce_to_pyobject(env):
if not immutable_obj:
self.obj = self.obj.coerce_to_pyobject(env)
elif (obj_type.is_cfunction and (self.obj.is_name or self.obj.is_attribute)
and self.obj.entry.as_variable
and self.obj.entry.as_variable.type.is_pyobject):
# might be an optimised builtin function => unpack it
if not immutable_obj:
self.obj = self.obj.coerce_to_pyobject(env)
else:
error(self.pos,
"Object of type '%s' has no attribute '%s'" %
(obj_type, self.attribute))
def wrap_obj_in_nonecheck(self, env):
if not env.directives['nonecheck']:
return
msg = None
format_args = ()
if (self.obj.type.is_extension_type and self.needs_none_check and not
self.is_py_attr):
msg = "'NoneType' object has no attribute '%s'"
format_args = (self.attribute,)
elif self.obj.type.is_memoryviewslice:
if self.is_memslice_transpose:
msg = "Cannot transpose None memoryview slice"
else:
entry = self.obj.type.scope.lookup_here(self.attribute)
if entry:
# copy/is_c_contig/shape/strides etc
msg = "Cannot access '%s' attribute of None memoryview slice"
format_args = (entry.name,)
if msg:
self.obj = self.obj.as_none_safe_node(msg, 'PyExc_AttributeError',
format_args=format_args)
def nogil_check(self, env):
if self.is_py_attr:
self.gil_error()
gil_message = "Accessing Python attribute"
def is_simple(self):
if self.obj:
return self.result_in_temp() or self.obj.is_simple()
else:
return NameNode.is_simple(self)
def is_lvalue(self):
if self.obj:
return True
else:
return NameNode.is_lvalue(self)
def is_ephemeral(self):
if self.obj:
return self.obj.is_ephemeral()
else:
return NameNode.is_ephemeral(self)
def calculate_result_code(self):
#print "AttributeNode.calculate_result_code:", self.member ###
#print "...obj node =", self.obj, "code", self.obj.result() ###
#print "...obj type", self.obj.type, "ctype", self.obj.ctype() ###
obj = self.obj
obj_code = obj.result_as(obj.type)
#print "...obj_code =", obj_code ###
if self.entry and self.entry.is_cmethod:
if obj.type.is_extension_type and not self.entry.is_builtin_cmethod:
if self.entry.final_func_cname:
return self.entry.final_func_cname
if self.type.from_fused:
# If the attribute was specialized through indexing, make
# sure to get the right fused name, as our entry was
# replaced by our parent index node
# (AnalyseExpressionsTransform)
self.member = self.entry.cname
return "((struct %s *)%s%s%s)->%s" % (
obj.type.vtabstruct_cname, obj_code, self.op,
obj.type.vtabslot_cname, self.member)
elif self.result_is_used:
return self.member
# Generating no code at all for unused access to optimised builtin
# methods fixes the problem that some optimisations only exist as
# macros, i.e. there is no function pointer to them, so we would
# generate invalid C code here.
return
elif obj.type.is_complex:
return "__Pyx_C%s(%s)" % (self.member.upper(), obj_code)
else:
if obj.type.is_builtin_type and self.entry and self.entry.is_variable:
# accessing a field of a builtin type, need to cast better than result_as() does
obj_code = obj.type.cast_code(obj.result(), to_object_struct = True)
return "%s%s%s" % (obj_code, self.op, self.member)
def generate_result_code(self, code):
if self.is_py_attr:
if self.is_special_lookup:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectLookupSpecial", "ObjectHandling.c"))
lookup_func_name = '__Pyx_PyObject_LookupSpecial'
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectGetAttrStr", "ObjectHandling.c"))
lookup_func_name = '__Pyx_PyObject_GetAttrStr'
code.putln(
'%s = %s(%s, %s); %s' % (
self.result(),
lookup_func_name,
self.obj.py_result(),
code.intern_identifier(self.attribute),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif self.type.is_memoryviewslice:
if self.is_memslice_transpose:
# transpose the slice
for access, packing in self.type.axes:
if access == 'ptr':
error(self.pos, "Transposing not supported for slices "
"with indirect dimensions")
return
code.putln("%s = %s;" % (self.result(), self.obj.result()))
if self.obj.is_name or (self.obj.is_attribute and
self.obj.is_memslice_transpose):
code.put_incref_memoryviewslice(self.result(), have_gil=True)
T = "__pyx_memslice_transpose(&%s) == 0"
code.putln(code.error_goto_if(T % self.result(), self.pos))
elif self.initialized_check:
code.putln(
'if (unlikely(!%s.memview)) {'
'PyErr_SetString(PyExc_AttributeError,'
'"Memoryview is not initialized");'
'%s'
'}' % (self.result(), code.error_goto(self.pos)))
else:
# result_code contains what is needed, but we may need to insert
# a check and raise an exception
if self.obj.type.is_extension_type:
pass
elif self.entry and self.entry.is_cmethod and self.entry.utility_code:
# C method implemented as function call with utility code
code.globalstate.use_utility_code(self.entry.utility_code)
def generate_disposal_code(self, code):
if self.is_temp and self.type.is_memoryviewslice and self.is_memslice_transpose:
# mirror condition for putting the memview incref here:
if self.obj.is_name or (self.obj.is_attribute and
self.obj.is_memslice_transpose):
code.put_xdecref_memoryviewslice(
self.result(), have_gil=True)
else:
ExprNode.generate_disposal_code(self, code)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
self.obj.generate_evaluation_code(code)
if self.is_py_attr:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
code.put_error_if_neg(self.pos,
'__Pyx_PyObject_SetAttrStr(%s, %s, %s)' % (
self.obj.py_result(),
code.intern_identifier(self.attribute),
rhs.py_result()))
rhs.generate_disposal_code(code)
rhs.free_temps(code)
elif self.obj.type.is_complex:
code.putln("__Pyx_SET_C%s(%s, %s);" % (
self.member.upper(),
self.obj.result_as(self.obj.type),
rhs.result_as(self.ctype())))
else:
select_code = self.result()
if self.type.is_pyobject and self.use_managed_ref:
rhs.make_owned_reference(code)
code.put_giveref(rhs.py_result())
code.put_gotref(select_code)
code.put_decref(select_code, self.ctype())
elif self.type.is_memoryviewslice:
from . import MemoryView
MemoryView.put_assign_to_memviewslice(
select_code, rhs, rhs.result(), self.type, code)
if not self.type.is_memoryviewslice:
code.putln(
"%s = %s;" % (
select_code,
rhs.result_as(self.ctype())))
#rhs.result()))
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
def generate_deletion_code(self, code, ignore_nonexisting=False):
self.obj.generate_evaluation_code(code)
if self.is_py_attr or (self.entry.scope.is_property_scope
and u'__del__' in self.entry.scope.entries):
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
code.put_error_if_neg(self.pos,
'__Pyx_PyObject_DelAttrStr(%s, %s)' % (
self.obj.py_result(),
code.intern_identifier(self.attribute)))
else:
error(self.pos, "Cannot delete C attribute of extension type")
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
def annotate(self, code):
if self.is_py_attr:
style, text = 'py_attr', 'python attribute (%s)'
else:
style, text = 'c_attr', 'c attribute (%s)'
code.annotate(self.pos, AnnotationItem(style, text % self.type, size=len(self.attribute)))
#-------------------------------------------------------------------
#
# Constructor nodes
#
#-------------------------------------------------------------------
class StarredUnpackingNode(ExprNode):
# A starred expression like "*a"
#
# This is only allowed in sequence assignment or construction such as
#
# a, *b = (1,2,3,4) => a = 1 ; b = [2,3,4]
#
# and will be special cased during type analysis (or generate an error
# if it's found at unexpected places).
#
# target ExprNode
subexprs = ['target']
is_starred = 1
type = py_object_type
is_temp = 1
starred_expr_allowed_here = False
def __init__(self, pos, target):
ExprNode.__init__(self, pos, target=target)
def analyse_declarations(self, env):
if not self.starred_expr_allowed_here:
error(self.pos, "starred expression is not allowed here")
self.target.analyse_declarations(env)
def infer_type(self, env):
return self.target.infer_type(env)
def analyse_types(self, env):
if not self.starred_expr_allowed_here:
error(self.pos, "starred expression is not allowed here")
self.target = self.target.analyse_types(env)
self.type = self.target.type
return self
def analyse_target_declaration(self, env):
self.target.analyse_target_declaration(env)
def analyse_target_types(self, env):
self.target = self.target.analyse_target_types(env)
self.type = self.target.type
return self
def calculate_result_code(self):
return ""
def generate_result_code(self, code):
pass
class SequenceNode(ExprNode):
# Base class for list and tuple constructor nodes.
# Contains common code for performing sequence unpacking.
#
# args [ExprNode]
# unpacked_items [ExprNode] or None
# coerced_unpacked_items [ExprNode] or None
# mult_factor ExprNode the integer number of content repetitions ([1,2]*3)
subexprs = ['args', 'mult_factor']
is_sequence_constructor = 1
unpacked_items = None
mult_factor = None
slow = False # trade speed for code size (e.g. use PyTuple_Pack())
def compile_time_value_list(self, denv):
return [arg.compile_time_value(denv) for arg in self.args]
def replace_starred_target_node(self):
# replace a starred node in the targets by the contained expression
self.starred_assignment = False
args = []
for arg in self.args:
if arg.is_starred:
if self.starred_assignment:
error(arg.pos, "more than 1 starred expression in assignment")
self.starred_assignment = True
arg = arg.target
arg.is_starred = True
args.append(arg)
self.args = args
def analyse_target_declaration(self, env):
self.replace_starred_target_node()
for arg in self.args:
arg.analyse_target_declaration(env)
def analyse_types(self, env, skip_children=False):
for i, arg in enumerate(self.args):
if not skip_children:
arg = arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
if self.mult_factor:
self.mult_factor = self.mult_factor.analyse_types(env)
if not self.mult_factor.type.is_int:
self.mult_factor = self.mult_factor.coerce_to_pyobject(env)
self.is_temp = 1
# not setting self.type here, subtypes do this
return self
def _create_merge_node_if_necessary(self, env):
self._flatten_starred_args()
if not any(arg.is_starred for arg in self.args):
return self
# convert into MergedSequenceNode by building partial sequences
args = []
values = []
for arg in self.args:
if arg.is_starred:
if values:
args.append(TupleNode(values[0].pos, args=values).analyse_types(env, skip_children=True))
values = []
args.append(arg.target)
else:
values.append(arg)
if values:
args.append(TupleNode(values[0].pos, args=values).analyse_types(env, skip_children=True))
node = MergedSequenceNode(self.pos, args, self.type)
if self.mult_factor:
node = binop_node(
self.pos, '*', node, self.mult_factor.coerce_to_pyobject(env),
inplace=True, type=self.type, is_temp=True)
return node
def _flatten_starred_args(self):
args = []
for arg in self.args:
if arg.is_starred and arg.target.is_sequence_constructor and not arg.target.mult_factor:
args.extend(arg.target.args)
else:
args.append(arg)
self.args[:] = args
def may_be_none(self):
return False
def analyse_target_types(self, env):
if self.mult_factor:
error(self.pos, "can't assign to multiplied sequence")
self.unpacked_items = []
self.coerced_unpacked_items = []
self.any_coerced_items = False
for i, arg in enumerate(self.args):
arg = self.args[i] = arg.analyse_target_types(env)
if arg.is_starred:
if not arg.type.assignable_from(list_type):
error(arg.pos,
"starred target must have Python object (list) type")
if arg.type is py_object_type:
arg.type = list_type
unpacked_item = PyTempNode(self.pos, env)
coerced_unpacked_item = unpacked_item.coerce_to(arg.type, env)
if unpacked_item is not coerced_unpacked_item:
self.any_coerced_items = True
self.unpacked_items.append(unpacked_item)
self.coerced_unpacked_items.append(coerced_unpacked_item)
self.type = py_object_type
return self
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_sequence_packing_code(self, code, target=None, plain=False):
if target is None:
target = self.result()
size_factor = c_mult = ''
mult_factor = None
if self.mult_factor and not plain:
mult_factor = self.mult_factor
if mult_factor.type.is_int:
c_mult = mult_factor.result()
if (isinstance(mult_factor.constant_result, _py_int_types) and
mult_factor.constant_result > 0):
size_factor = ' * %s' % mult_factor.constant_result
elif mult_factor.type.signed:
size_factor = ' * ((%s<0) ? 0:%s)' % (c_mult, c_mult)
else:
size_factor = ' * (%s)' % (c_mult,)
if self.type is tuple_type and (self.is_literal or self.slow) and not c_mult:
# use PyTuple_Pack() to avoid generating huge amounts of one-time code
code.putln('%s = PyTuple_Pack(%d, %s); %s' % (
target,
len(self.args),
', '.join(arg.py_result() for arg in self.args),
code.error_goto_if_null(target, self.pos)))
code.put_gotref(target)
elif self.type.is_ctuple:
for i, arg in enumerate(self.args):
code.putln("%s.f%s = %s;" % (
target, i, arg.result()))
else:
# build the tuple/list step by step, potentially multiplying it as we go
if self.type is list_type:
create_func, set_item_func = 'PyList_New', 'PyList_SET_ITEM'
elif self.type is tuple_type:
create_func, set_item_func = 'PyTuple_New', 'PyTuple_SET_ITEM'
else:
raise InternalError("sequence packing for unexpected type %s" % self.type)
arg_count = len(self.args)
code.putln("%s = %s(%s%s); %s" % (
target, create_func, arg_count, size_factor,
code.error_goto_if_null(target, self.pos)))
code.put_gotref(target)
if c_mult:
# FIXME: can't use a temp variable here as the code may
# end up in the constant building function. Temps
# currently don't work there.
#counter = code.funcstate.allocate_temp(mult_factor.type, manage_ref=False)
counter = Naming.quick_temp_cname
code.putln('{ Py_ssize_t %s;' % counter)
if arg_count == 1:
offset = counter
else:
offset = '%s * %s' % (counter, arg_count)
code.putln('for (%s=0; %s < %s; %s++) {' % (
counter, counter, c_mult, counter
))
else:
offset = ''
for i in range(arg_count):
arg = self.args[i]
if c_mult or not arg.result_in_temp():
code.put_incref(arg.result(), arg.ctype())
code.put_giveref(arg.py_result())
code.putln("%s(%s, %s, %s);" % (
set_item_func,
target,
(offset and i) and ('%s + %s' % (offset, i)) or (offset or i),
arg.py_result()))
if c_mult:
code.putln('}')
#code.funcstate.release_temp(counter)
code.putln('}')
if mult_factor is not None and mult_factor.type.is_pyobject:
code.putln('{ PyObject* %s = PyNumber_InPlaceMultiply(%s, %s); %s' % (
Naming.quick_temp_cname, target, mult_factor.py_result(),
code.error_goto_if_null(Naming.quick_temp_cname, self.pos)
))
code.put_gotref(Naming.quick_temp_cname)
code.put_decref(target, py_object_type)
code.putln('%s = %s;' % (target, Naming.quick_temp_cname))
code.putln('}')
def generate_subexpr_disposal_code(self, code):
if self.mult_factor and self.mult_factor.type.is_int:
super(SequenceNode, self).generate_subexpr_disposal_code(code)
elif self.type is tuple_type and (self.is_literal or self.slow):
super(SequenceNode, self).generate_subexpr_disposal_code(code)
else:
# We call generate_post_assignment_code here instead
# of generate_disposal_code, because values were stored
# in the tuple using a reference-stealing operation.
for arg in self.args:
arg.generate_post_assignment_code(code)
# Should NOT call free_temps -- this is invoked by the default
# generate_evaluation_code which will do that.
if self.mult_factor:
self.mult_factor.generate_disposal_code(code)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
if self.starred_assignment:
self.generate_starred_assignment_code(rhs, code)
else:
self.generate_parallel_assignment_code(rhs, code)
for item in self.unpacked_items:
item.release(code)
rhs.free_temps(code)
_func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
]))
def generate_parallel_assignment_code(self, rhs, code):
# Need to work around the fact that generate_evaluation_code
# allocates the temps in a rather hacky way -- the assignment
# is evaluated twice, within each if-block.
for item in self.unpacked_items:
item.allocate(code)
special_unpack = (rhs.type is py_object_type
or rhs.type in (tuple_type, list_type)
or not rhs.type.is_builtin_type)
long_enough_for_a_loop = len(self.unpacked_items) > 3
if special_unpack:
self.generate_special_parallel_unpacking_code(
code, rhs, use_loop=long_enough_for_a_loop)
else:
code.putln("{")
self.generate_generic_parallel_unpacking_code(
code, rhs, self.unpacked_items, use_loop=long_enough_for_a_loop)
code.putln("}")
for value_node in self.coerced_unpacked_items:
value_node.generate_evaluation_code(code)
for i in range(len(self.args)):
self.args[i].generate_assignment_code(
self.coerced_unpacked_items[i], code)
def generate_special_parallel_unpacking_code(self, code, rhs, use_loop):
sequence_type_test = '1'
none_check = "likely(%s != Py_None)" % rhs.py_result()
if rhs.type is list_type:
sequence_types = ['List']
if rhs.may_be_none():
sequence_type_test = none_check
elif rhs.type is tuple_type:
sequence_types = ['Tuple']
if rhs.may_be_none():
sequence_type_test = none_check
else:
sequence_types = ['Tuple', 'List']
tuple_check = 'likely(PyTuple_CheckExact(%s))' % rhs.py_result()
list_check = 'PyList_CheckExact(%s)' % rhs.py_result()
sequence_type_test = "(%s) || (%s)" % (tuple_check, list_check)
code.putln("if (%s) {" % sequence_type_test)
code.putln("PyObject* sequence = %s;" % rhs.py_result())
# list/tuple => check size
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
code.putln("Py_ssize_t size = Py_SIZE(sequence);")
code.putln("#else")
code.putln("Py_ssize_t size = PySequence_Size(sequence);") # < 0 => exception
code.putln("#endif")
code.putln("if (unlikely(size != %d)) {" % len(self.args))
code.globalstate.use_utility_code(raise_too_many_values_to_unpack)
code.putln("if (size > %d) __Pyx_RaiseTooManyValuesError(%d);" % (
len(self.args), len(self.args)))
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.putln("else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);")
code.putln(code.error_goto(self.pos))
code.putln("}")
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
# unpack items from list/tuple in unrolled loop (can't fail)
if len(sequence_types) == 2:
code.putln("if (likely(Py%s_CheckExact(sequence))) {" % sequence_types[0])
for i, item in enumerate(self.unpacked_items):
code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
item.result(), sequence_types[0], i))
if len(sequence_types) == 2:
code.putln("} else {")
for i, item in enumerate(self.unpacked_items):
code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
item.result(), sequence_types[1], i))
code.putln("}")
for item in self.unpacked_items:
code.put_incref(item.result(), item.ctype())
code.putln("#else")
# in non-CPython, use the PySequence protocol (which can fail)
if not use_loop:
for i, item in enumerate(self.unpacked_items):
code.putln("%s = PySequence_ITEM(sequence, %d); %s" % (
item.result(), i,
code.error_goto_if_null(item.result(), self.pos)))
code.put_gotref(item.result())
else:
code.putln("{")
code.putln("Py_ssize_t i;")
code.putln("PyObject** temps[%s] = {%s};" % (
len(self.unpacked_items),
','.join(['&%s' % item.result() for item in self.unpacked_items])))
code.putln("for (i=0; i < %s; i++) {" % len(self.unpacked_items))
code.putln("PyObject* item = PySequence_ITEM(sequence, i); %s" % (
code.error_goto_if_null('item', self.pos)))
code.put_gotref('item')
code.putln("*(temps[i]) = item;")
code.putln("}")
code.putln("}")
code.putln("#endif")
rhs.generate_disposal_code(code)
if sequence_type_test == '1':
code.putln("}") # all done
elif sequence_type_test == none_check:
# either tuple/list or None => save some code by generating the error directly
code.putln("} else {")
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseNoneIterError", "ObjectHandling.c"))
code.putln("__Pyx_RaiseNoneNotIterableError(); %s" % code.error_goto(self.pos))
code.putln("}") # all done
else:
code.putln("} else {") # needs iteration fallback code
self.generate_generic_parallel_unpacking_code(
code, rhs, self.unpacked_items, use_loop=use_loop)
code.putln("}")
def generate_generic_parallel_unpacking_code(self, code, rhs, unpacked_items, use_loop, terminate=True):
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.globalstate.use_utility_code(UtilityCode.load_cached("IterFinish", "ObjectHandling.c"))
code.putln("Py_ssize_t index = -1;") # must be at the start of a C block!
if use_loop:
code.putln("PyObject** temps[%s] = {%s};" % (
len(self.unpacked_items),
','.join(['&%s' % item.result() for item in unpacked_items])))
iterator_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln(
"%s = PyObject_GetIter(%s); %s" % (
iterator_temp,
rhs.py_result(),
code.error_goto_if_null(iterator_temp, self.pos)))
code.put_gotref(iterator_temp)
rhs.generate_disposal_code(code)
iternext_func = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
code.putln("%s = Py_TYPE(%s)->tp_iternext;" % (
iternext_func, iterator_temp))
unpacking_error_label = code.new_label('unpacking_failed')
unpack_code = "%s(%s)" % (iternext_func, iterator_temp)
if use_loop:
code.putln("for (index=0; index < %s; index++) {" % len(unpacked_items))
code.put("PyObject* item = %s; if (unlikely(!item)) " % unpack_code)
code.put_goto(unpacking_error_label)
code.put_gotref("item")
code.putln("*(temps[index]) = item;")
code.putln("}")
else:
for i, item in enumerate(unpacked_items):
code.put(
"index = %d; %s = %s; if (unlikely(!%s)) " % (
i,
item.result(),
unpack_code,
item.result()))
code.put_goto(unpacking_error_label)
code.put_gotref(item.py_result())
if terminate:
code.globalstate.use_utility_code(
UtilityCode.load_cached("UnpackItemEndCheck", "ObjectHandling.c"))
code.put_error_if_neg(self.pos, "__Pyx_IternextUnpackEndCheck(%s, %d)" % (
unpack_code,
len(unpacked_items)))
code.putln("%s = NULL;" % iternext_func)
code.put_decref_clear(iterator_temp, py_object_type)
unpacking_done_label = code.new_label('unpacking_done')
code.put_goto(unpacking_done_label)
code.put_label(unpacking_error_label)
code.put_decref_clear(iterator_temp, py_object_type)
code.putln("%s = NULL;" % iternext_func)
code.putln("if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);")
code.putln(code.error_goto(self.pos))
code.put_label(unpacking_done_label)
code.funcstate.release_temp(iternext_func)
if terminate:
code.funcstate.release_temp(iterator_temp)
iterator_temp = None
return iterator_temp
def generate_starred_assignment_code(self, rhs, code):
for i, arg in enumerate(self.args):
if arg.is_starred:
starred_target = self.unpacked_items[i]
unpacked_fixed_items_left = self.unpacked_items[:i]
unpacked_fixed_items_right = self.unpacked_items[i+1:]
break
else:
assert False
iterator_temp = None
if unpacked_fixed_items_left:
for item in unpacked_fixed_items_left:
item.allocate(code)
code.putln('{')
iterator_temp = self.generate_generic_parallel_unpacking_code(
code, rhs, unpacked_fixed_items_left,
use_loop=True, terminate=False)
for i, item in enumerate(unpacked_fixed_items_left):
value_node = self.coerced_unpacked_items[i]
value_node.generate_evaluation_code(code)
code.putln('}')
starred_target.allocate(code)
target_list = starred_target.result()
code.putln("%s = PySequence_List(%s); %s" % (
target_list,
iterator_temp or rhs.py_result(),
code.error_goto_if_null(target_list, self.pos)))
code.put_gotref(target_list)
if iterator_temp:
code.put_decref_clear(iterator_temp, py_object_type)
code.funcstate.release_temp(iterator_temp)
else:
rhs.generate_disposal_code(code)
if unpacked_fixed_items_right:
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
length_temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
code.putln('%s = PyList_GET_SIZE(%s);' % (length_temp, target_list))
code.putln("if (unlikely(%s < %d)) {" % (length_temp, len(unpacked_fixed_items_right)))
code.putln("__Pyx_RaiseNeedMoreValuesError(%d+%s); %s" % (
len(unpacked_fixed_items_left), length_temp,
code.error_goto(self.pos)))
code.putln('}')
for item in unpacked_fixed_items_right[::-1]:
item.allocate(code)
for i, (item, coerced_arg) in enumerate(zip(unpacked_fixed_items_right[::-1],
self.coerced_unpacked_items[::-1])):
code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
code.putln("%s = PyList_GET_ITEM(%s, %s-%d); " % (
item.py_result(), target_list, length_temp, i+1))
# resize the list the hard way
code.putln("((PyVarObject*)%s)->ob_size--;" % target_list)
code.putln('#else')
code.putln("%s = PySequence_ITEM(%s, %s-%d); " % (
item.py_result(), target_list, length_temp, i+1))
code.putln('#endif')
code.put_gotref(item.py_result())
coerced_arg.generate_evaluation_code(code)
code.putln('#if !CYTHON_COMPILING_IN_CPYTHON')
sublist_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln('%s = PySequence_GetSlice(%s, 0, %s-%d); %s' % (
sublist_temp, target_list, length_temp, len(unpacked_fixed_items_right),
code.error_goto_if_null(sublist_temp, self.pos)))
code.put_gotref(sublist_temp)
code.funcstate.release_temp(length_temp)
code.put_decref(target_list, py_object_type)
code.putln('%s = %s; %s = NULL;' % (target_list, sublist_temp, sublist_temp))
code.putln('#else')
code.putln('%s = %s;' % (sublist_temp, sublist_temp)) # avoid warning about unused variable
code.funcstate.release_temp(sublist_temp)
code.putln('#endif')
for i, arg in enumerate(self.args):
arg.generate_assignment_code(self.coerced_unpacked_items[i], code)
def annotate(self, code):
for arg in self.args:
arg.annotate(code)
if self.unpacked_items:
for arg in self.unpacked_items:
arg.annotate(code)
for arg in self.coerced_unpacked_items:
arg.annotate(code)
class TupleNode(SequenceNode):
# Tuple constructor.
type = tuple_type
is_partly_literal = False
gil_message = "Constructing Python tuple"
def infer_type(self, env):
if self.mult_factor or not self.args:
return tuple_type
arg_types = [arg.infer_type(env) for arg in self.args]
if any(type.is_pyobject or type.is_unspecified or type.is_fused for type in arg_types):
return tuple_type
else:
return env.declare_tuple_type(self.pos, arg_types).type
def analyse_types(self, env, skip_children=False):
if len(self.args) == 0:
self.is_temp = False
self.is_literal = True
return self
if not skip_children:
for i, arg in enumerate(self.args):
if arg.is_starred:
arg.starred_expr_allowed_here = True
self.args[i] = arg.analyse_types(env)
if (not self.mult_factor and
not any((arg.is_starred or arg.type.is_pyobject or arg.type.is_fused) for arg in self.args)):
self.type = env.declare_tuple_type(self.pos, (arg.type for arg in self.args)).type
self.is_temp = 1
return self
node = SequenceNode.analyse_types(self, env, skip_children=True)
node = node._create_merge_node_if_necessary(env)
if not node.is_sequence_constructor:
return node
if not all(child.is_literal for child in node.args):
return node
if not node.mult_factor or (
node.mult_factor.is_literal and
isinstance(node.mult_factor.constant_result, _py_int_types)):
node.is_temp = False
node.is_literal = True
else:
if not node.mult_factor.type.is_pyobject:
node.mult_factor = node.mult_factor.coerce_to_pyobject(env)
node.is_temp = True
node.is_partly_literal = True
return node
def coerce_to(self, dst_type, env):
if self.type.is_ctuple:
if dst_type.is_ctuple and self.type.size == dst_type.size:
if self.type == dst_type:
return self
coerced_args = [arg.coerce_to(type, env) for arg, type in zip(self.args, dst_type.components)]
return TupleNode(self.pos, args=coerced_args, type=dst_type, is_temp=1)
elif dst_type is tuple_type or dst_type is py_object_type:
coerced_args = [arg.coerce_to_pyobject(env) for arg in self.args]
return TupleNode(self.pos, args=coerced_args, type=tuple_type, is_temp=1).analyse_types(env, skip_children=True)
else:
return self.coerce_to_pyobject(env).coerce_to(dst_type, env)
else:
return SequenceNode.coerce_to(self, dst_type, env)
def as_list(self):
t = ListNode(self.pos, args=self.args, mult_factor=self.mult_factor)
if isinstance(self.constant_result, tuple):
t.constant_result = list(self.constant_result)
return t
def is_simple(self):
# either temp or constant => always simple
return True
def nonlocally_immutable(self):
# either temp or constant => always safe
return True
def calculate_result_code(self):
if len(self.args) > 0:
return self.result_code
else:
return Naming.empty_tuple
def calculate_constant_result(self):
self.constant_result = tuple([
arg.constant_result for arg in self.args])
def compile_time_value(self, denv):
values = self.compile_time_value_list(denv)
try:
return tuple(values)
except Exception as e:
self.compile_time_value_error(e)
def generate_operation_code(self, code):
if len(self.args) == 0:
# result_code is Naming.empty_tuple
return
if self.is_partly_literal:
# underlying tuple is const, but factor is not
tuple_target = code.get_py_const(py_object_type, 'tuple', cleanup_level=2)
const_code = code.get_cached_constants_writer()
const_code.mark_pos(self.pos)
self.generate_sequence_packing_code(const_code, tuple_target, plain=True)
const_code.put_giveref(tuple_target)
code.putln('%s = PyNumber_Multiply(%s, %s); %s' % (
self.result(), tuple_target, self.mult_factor.py_result(),
code.error_goto_if_null(self.result(), self.pos)
))
code.put_gotref(self.py_result())
elif self.is_literal:
# non-empty cached tuple => result is global constant,
# creation code goes into separate code writer
self.result_code = code.get_py_const(py_object_type, 'tuple', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
self.generate_sequence_packing_code(code)
code.put_giveref(self.py_result())
else:
self.type.entry.used = True
self.generate_sequence_packing_code(code)
class ListNode(SequenceNode):
# List constructor.
# obj_conversion_errors [PyrexError] used internally
# orignial_args [ExprNode] used internally
obj_conversion_errors = []
type = list_type
in_module_scope = False
gil_message = "Constructing Python list"
def type_dependencies(self, env):
return ()
def infer_type(self, env):
# TOOD: Infer non-object list arrays.
return list_type
def analyse_expressions(self, env):
for arg in self.args:
if arg.is_starred:
arg.starred_expr_allowed_here = True
node = SequenceNode.analyse_expressions(self, env)
return node.coerce_to_pyobject(env)
def analyse_types(self, env):
hold_errors()
self.original_args = list(self.args)
node = SequenceNode.analyse_types(self, env)
node.obj_conversion_errors = held_errors()
release_errors(ignore=True)
if env.is_module_scope:
self.in_module_scope = True
node = node._create_merge_node_if_necessary(env)
return node
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
for err in self.obj_conversion_errors:
report_error(err)
self.obj_conversion_errors = []
if not self.type.subtype_of(dst_type):
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
elif (dst_type.is_array or dst_type.is_ptr) and dst_type.base_type is not PyrexTypes.c_void_type:
array_length = len(self.args)
if self.mult_factor:
if isinstance(self.mult_factor.constant_result, _py_int_types):
if self.mult_factor.constant_result <= 0:
error(self.pos, "Cannot coerce non-positively multiplied list to '%s'" % dst_type)
else:
array_length *= self.mult_factor.constant_result
else:
error(self.pos, "Cannot coerce dynamically multiplied list to '%s'" % dst_type)
base_type = dst_type.base_type
self.type = PyrexTypes.CArrayType(base_type, array_length)
for i in range(len(self.original_args)):
arg = self.args[i]
if isinstance(arg, CoerceToPyTypeNode):
arg = arg.arg
self.args[i] = arg.coerce_to(base_type, env)
elif dst_type.is_cpp_class:
# TODO(robertwb): Avoid object conversion for vector/list/set.
return TypecastNode(self.pos, operand=self, type=PyrexTypes.py_object_type).coerce_to(dst_type, env)
elif self.mult_factor:
error(self.pos, "Cannot coerce multiplied list to '%s'" % dst_type)
elif dst_type.is_struct:
if len(self.args) > len(dst_type.scope.var_entries):
error(self.pos, "Too many members for '%s'" % dst_type)
else:
if len(self.args) < len(dst_type.scope.var_entries):
warning(self.pos, "Too few members for '%s'" % dst_type, 1)
for i, (arg, member) in enumerate(zip(self.original_args, dst_type.scope.var_entries)):
if isinstance(arg, CoerceToPyTypeNode):
arg = arg.arg
self.args[i] = arg.coerce_to(member.type, env)
self.type = dst_type
else:
self.type = error_type
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
return self
def as_list(self): # dummy for compatibility with TupleNode
return self
def as_tuple(self):
t = TupleNode(self.pos, args=self.args, mult_factor=self.mult_factor)
if isinstance(self.constant_result, list):
t.constant_result = tuple(self.constant_result)
return t
def allocate_temp_result(self, code):
if self.type.is_array and self.in_module_scope:
self.temp_code = code.funcstate.allocate_temp(
self.type, manage_ref=False, static=True)
else:
SequenceNode.allocate_temp_result(self, code)
def release_temp_result(self, env):
if self.type.is_array:
# To be valid C++, we must allocate the memory on the stack
# manually and be sure not to reuse it for something else.
# Yes, this means that we leak a temp array variable.
pass
else:
SequenceNode.release_temp_result(self, env)
def calculate_constant_result(self):
if self.mult_factor:
raise ValueError() # may exceed the compile time memory
self.constant_result = [
arg.constant_result for arg in self.args]
def compile_time_value(self, denv):
l = self.compile_time_value_list(denv)
if self.mult_factor:
l *= self.mult_factor.compile_time_value(denv)
return l
def generate_operation_code(self, code):
if self.type.is_pyobject:
for err in self.obj_conversion_errors:
report_error(err)
self.generate_sequence_packing_code(code)
elif self.type.is_array:
if self.mult_factor:
code.putln("{")
code.putln("Py_ssize_t %s;" % Naming.quick_temp_cname)
code.putln("for ({i} = 0; {i} < {count}; {i}++) {{".format(
i=Naming.quick_temp_cname, count=self.mult_factor.result()))
offset = '+ (%d * %s)' % (len(self.args), Naming.quick_temp_cname)
else:
offset = ''
for i, arg in enumerate(self.args):
if arg.type.is_array:
code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
code.putln("memcpy(&(%s[%s%s]), %s, sizeof(%s[0]));" % (
self.result(), i, offset,
arg.result(), self.result()
))
else:
code.putln("%s[%s%s] = %s;" % (
self.result(),
i,
offset,
arg.result()))
if self.mult_factor:
code.putln("}")
code.putln("}")
elif self.type.is_struct:
for arg, member in zip(self.args, self.type.scope.var_entries):
code.putln("%s.%s = %s;" % (
self.result(),
member.cname,
arg.result()))
else:
raise InternalError("List type never specified")
class ScopedExprNode(ExprNode):
# Abstract base class for ExprNodes that have their own local
# scope, such as generator expressions.
#
# expr_scope Scope the inner scope of the expression
subexprs = []
expr_scope = None
# does this node really have a local scope, e.g. does it leak loop
# variables or not? non-leaking Py3 behaviour is default, except
# for list comprehensions where the behaviour differs in Py2 and
# Py3 (set in Parsing.py based on parser context)
has_local_scope = True
def init_scope(self, outer_scope, expr_scope=None):
if expr_scope is not None:
self.expr_scope = expr_scope
elif self.has_local_scope:
self.expr_scope = Symtab.GeneratorExpressionScope(outer_scope)
else:
self.expr_scope = None
def analyse_declarations(self, env):
self.init_scope(env)
def analyse_scoped_declarations(self, env):
# this is called with the expr_scope as env
pass
def analyse_types(self, env):
# no recursion here, the children will be analysed separately below
return self
def analyse_scoped_expressions(self, env):
# this is called with the expr_scope as env
return self
def generate_evaluation_code(self, code):
# set up local variables and free their references on exit
generate_inner_evaluation_code = super(ScopedExprNode, self).generate_evaluation_code
if not self.has_local_scope or not self.expr_scope.var_entries:
# no local variables => delegate, done
generate_inner_evaluation_code(code)
return
code.putln('{ /* enter inner scope */')
py_entries = []
for entry in self.expr_scope.var_entries:
if not entry.in_closure:
code.put_var_declaration(entry)
if entry.type.is_pyobject and entry.used:
py_entries.append(entry)
if not py_entries:
# no local Python references => no cleanup required
generate_inner_evaluation_code(code)
code.putln('} /* exit inner scope */')
return
# must free all local Python references at each exit point
old_loop_labels = tuple(code.new_loop_labels())
old_error_label = code.new_error_label()
generate_inner_evaluation_code(code)
# normal (non-error) exit
for entry in py_entries:
code.put_var_decref(entry)
# error/loop body exit points
exit_scope = code.new_label('exit_scope')
code.put_goto(exit_scope)
for label, old_label in ([(code.error_label, old_error_label)] +
list(zip(code.get_loop_labels(), old_loop_labels))):
if code.label_used(label):
code.put_label(label)
for entry in py_entries:
code.put_var_decref(entry)
code.put_goto(old_label)
code.put_label(exit_scope)
code.putln('} /* exit inner scope */')
code.set_loop_labels(old_loop_labels)
code.error_label = old_error_label
class ComprehensionNode(ScopedExprNode):
# A list/set/dict comprehension
child_attrs = ["loop"]
is_temp = True
def infer_type(self, env):
return self.type
def analyse_declarations(self, env):
self.append.target = self # this is used in the PyList_Append of the inner loop
self.init_scope(env)
def analyse_scoped_declarations(self, env):
self.loop.analyse_declarations(env)
def analyse_types(self, env):
if not self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def analyse_scoped_expressions(self, env):
if self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_operation_code(self, code):
if self.type is Builtin.list_type:
create_code = 'PyList_New(0)'
elif self.type is Builtin.set_type:
create_code = 'PySet_New(NULL)'
elif self.type is Builtin.dict_type:
create_code = 'PyDict_New()'
else:
raise InternalError("illegal type for comprehension: %s" % self.type)
code.putln('%s = %s; %s' % (
self.result(), create_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
self.loop.generate_execution_code(code)
def annotate(self, code):
self.loop.annotate(code)
class ComprehensionAppendNode(Node):
# Need to be careful to avoid infinite recursion:
# target must not be in child_attrs/subexprs
child_attrs = ['expr']
target = None
type = PyrexTypes.c_int_type
def analyse_expressions(self, env):
self.expr = self.expr.analyse_expressions(env)
if not self.expr.type.is_pyobject:
self.expr = self.expr.coerce_to_pyobject(env)
return self
def generate_execution_code(self, code):
if self.target.type is list_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("ListCompAppend", "Optimize.c"))
function = "__Pyx_ListComp_Append"
elif self.target.type is set_type:
function = "PySet_Add"
else:
raise InternalError(
"Invalid type for comprehension node: %s" % self.target.type)
self.expr.generate_evaluation_code(code)
code.putln(code.error_goto_if("%s(%s, (PyObject*)%s)" % (
function,
self.target.result(),
self.expr.result()
), self.pos))
self.expr.generate_disposal_code(code)
self.expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.expr.generate_function_definitions(env, code)
def annotate(self, code):
self.expr.annotate(code)
class DictComprehensionAppendNode(ComprehensionAppendNode):
child_attrs = ['key_expr', 'value_expr']
def analyse_expressions(self, env):
self.key_expr = self.key_expr.analyse_expressions(env)
if not self.key_expr.type.is_pyobject:
self.key_expr = self.key_expr.coerce_to_pyobject(env)
self.value_expr = self.value_expr.analyse_expressions(env)
if not self.value_expr.type.is_pyobject:
self.value_expr = self.value_expr.coerce_to_pyobject(env)
return self
def generate_execution_code(self, code):
self.key_expr.generate_evaluation_code(code)
self.value_expr.generate_evaluation_code(code)
code.putln(code.error_goto_if("PyDict_SetItem(%s, (PyObject*)%s, (PyObject*)%s)" % (
self.target.result(),
self.key_expr.result(),
self.value_expr.result()
), self.pos))
self.key_expr.generate_disposal_code(code)
self.key_expr.free_temps(code)
self.value_expr.generate_disposal_code(code)
self.value_expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.key_expr.generate_function_definitions(env, code)
self.value_expr.generate_function_definitions(env, code)
def annotate(self, code):
self.key_expr.annotate(code)
self.value_expr.annotate(code)
class InlinedGeneratorExpressionNode(ExprNode):
# An inlined generator expression for which the result is calculated
# inside of the loop and returned as a single, first and only Generator
# return value.
# This will only be created by transforms when replacing safe builtin
# calls on generator expressions.
#
# gen GeneratorExpressionNode the generator, not containing any YieldExprNodes
# orig_func String the name of the builtin function this node replaces
# target ExprNode or None a 'target' for a ComprehensionAppend node
subexprs = ["gen"]
orig_func = None
target = None
is_temp = True
type = py_object_type
def __init__(self, pos, gen, comprehension_type=None, **kwargs):
gbody = gen.def_node.gbody
gbody.is_inlined = True
if comprehension_type is not None:
assert comprehension_type in (list_type, set_type, dict_type), comprehension_type
gbody.inlined_comprehension_type = comprehension_type
kwargs.update(
target=RawCNameExprNode(pos, comprehension_type, Naming.retval_cname),
type=comprehension_type,
)
super(InlinedGeneratorExpressionNode, self).__init__(pos, gen=gen, **kwargs)
def may_be_none(self):
return self.orig_func not in ('any', 'all', 'sorted')
def infer_type(self, env):
return self.type
def analyse_types(self, env):
self.gen = self.gen.analyse_expressions(env)
return self
def generate_result_code(self, code):
code.putln("%s = __Pyx_Generator_Next(%s); %s" % (
self.result(), self.gen.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
class MergedSequenceNode(ExprNode):
"""
Merge a sequence of iterables into a set/list/tuple.
The target collection is determined by self.type, which must be set externally.
args [ExprNode]
"""
subexprs = ['args']
is_temp = True
gil_message = "Constructing Python collection"
def __init__(self, pos, args, type):
if type in (list_type, tuple_type) and args and args[0].is_sequence_constructor:
# construct a list directly from the first argument that we can then extend
if args[0].type is not list_type:
args[0] = ListNode(args[0].pos, args=args[0].args, is_temp=True)
ExprNode.__init__(self, pos, args=args, type=type)
def calculate_constant_result(self):
result = []
for item in self.args:
if item.is_sequence_constructor and item.mult_factor:
if item.mult_factor.constant_result <= 0:
continue
# otherwise, adding each item once should be enough
if item.is_set_literal or item.is_sequence_constructor:
# process items in order
items = (arg.constant_result for arg in item.args)
else:
items = item.constant_result
result.extend(items)
if self.type is set_type:
result = set(result)
elif self.type is tuple_type:
result = tuple(result)
else:
assert self.type is list_type
self.constant_result = result
def compile_time_value(self, denv):
result = []
for item in self.args:
if item.is_sequence_constructor and item.mult_factor:
if item.mult_factor.compile_time_value(denv) <= 0:
continue
if item.is_set_literal or item.is_sequence_constructor:
# process items in order
items = (arg.compile_time_value(denv) for arg in item.args)
else:
items = item.compile_time_value(denv)
result.extend(items)
if self.type is set_type:
try:
result = set(result)
except Exception as e:
self.compile_time_value_error(e)
elif self.type is tuple_type:
result = tuple(result)
else:
assert self.type is list_type
return result
def type_dependencies(self, env):
return ()
def infer_type(self, env):
return self.type
def analyse_types(self, env):
args = [
arg.analyse_types(env).coerce_to_pyobject(env).as_none_safe_node(
# FIXME: CPython's error message starts with the runtime function name
'argument after * must be an iterable, not NoneType')
for arg in self.args
]
if len(args) == 1 and args[0].type is self.type:
# strip this intermediate node and use the bare collection
return args[0]
assert self.type in (set_type, list_type, tuple_type)
self.args = args
return self
def may_be_none(self):
return False
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
is_set = self.type is set_type
args = iter(self.args)
item = next(args)
item.generate_evaluation_code(code)
if (is_set and item.is_set_literal or
not is_set and item.is_sequence_constructor and item.type is list_type):
code.putln("%s = %s;" % (self.result(), item.py_result()))
item.generate_post_assignment_code(code)
else:
code.putln("%s = %s(%s); %s" % (
self.result(),
'PySet_New' if is_set else 'PySequence_List',
item.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
item.generate_disposal_code(code)
item.free_temps(code)
helpers = set()
if is_set:
add_func = "PySet_Add"
extend_func = "__Pyx_PySet_Update"
else:
add_func = "__Pyx_ListComp_Append"
extend_func = "__Pyx_PyList_Extend"
for item in args:
if (is_set and (item.is_set_literal or item.is_sequence_constructor) or
(item.is_sequence_constructor and not item.mult_factor)):
if not is_set and item.args:
helpers.add(("ListCompAppend", "Optimize.c"))
for arg in item.args:
arg.generate_evaluation_code(code)
code.put_error_if_neg(arg.pos, "%s(%s, %s)" % (
add_func,
self.result(),
arg.py_result()))
arg.generate_disposal_code(code)
arg.free_temps(code)
continue
if is_set:
helpers.add(("PySet_Update", "Builtins.c"))
else:
helpers.add(("ListExtend", "Optimize.c"))
item.generate_evaluation_code(code)
code.put_error_if_neg(item.pos, "%s(%s, %s)" % (
extend_func,
self.result(),
item.py_result()))
item.generate_disposal_code(code)
item.free_temps(code)
if self.type is tuple_type:
code.putln("{")
code.putln("PyObject *%s = PyList_AsTuple(%s);" % (
Naming.quick_temp_cname,
self.result()))
code.put_decref(self.result(), py_object_type)
code.putln("%s = %s; %s" % (
self.result(),
Naming.quick_temp_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
code.putln("}")
for helper in sorted(helpers):
code.globalstate.use_utility_code(UtilityCode.load_cached(*helper))
def annotate(self, code):
for item in self.args:
item.annotate(code)
class SetNode(ExprNode):
"""
Set constructor.
"""
subexprs = ['args']
type = set_type
is_set_literal = True
gil_message = "Constructing Python set"
def analyse_types(self, env):
for i in range(len(self.args)):
arg = self.args[i]
arg = arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
self.type = set_type
self.is_temp = 1
return self
def may_be_none(self):
return False
def calculate_constant_result(self):
self.constant_result = set([arg.constant_result for arg in self.args])
def compile_time_value(self, denv):
values = [arg.compile_time_value(denv) for arg in self.args]
try:
return set(values)
except Exception as e:
self.compile_time_value_error(e)
def generate_evaluation_code(self, code):
for arg in self.args:
arg.generate_evaluation_code(code)
self.allocate_temp_result(code)
code.putln(
"%s = PySet_New(0); %s" % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
for arg in self.args:
code.put_error_if_neg(
self.pos,
"PySet_Add(%s, %s)" % (self.result(), arg.py_result()))
arg.generate_disposal_code(code)
arg.free_temps(code)
class DictNode(ExprNode):
# Dictionary constructor.
#
# key_value_pairs [DictItemNode]
# exclude_null_values [boolean] Do not add NULL values to dict
#
# obj_conversion_errors [PyrexError] used internally
subexprs = ['key_value_pairs']
is_temp = 1
exclude_null_values = False
type = dict_type
is_dict_literal = True
reject_duplicates = False
obj_conversion_errors = []
@classmethod
def from_pairs(cls, pos, pairs):
return cls(pos, key_value_pairs=[
DictItemNode(pos, key=k, value=v) for k, v in pairs])
def calculate_constant_result(self):
self.constant_result = dict([
item.constant_result for item in self.key_value_pairs])
def compile_time_value(self, denv):
pairs = [(item.key.compile_time_value(denv), item.value.compile_time_value(denv))
for item in self.key_value_pairs]
try:
return dict(pairs)
except Exception as e:
self.compile_time_value_error(e)
def type_dependencies(self, env):
return ()
def infer_type(self, env):
# TOOD: Infer struct constructors.
return dict_type
def analyse_types(self, env):
hold_errors()
self.key_value_pairs = [ item.analyse_types(env)
for item in self.key_value_pairs ]
self.obj_conversion_errors = held_errors()
release_errors(ignore=True)
return self
def may_be_none(self):
return False
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
self.release_errors()
if self.type.is_struct_or_union:
if not dict_type.subtype_of(dst_type):
error(self.pos, "Cannot interpret struct as non-dict type '%s'" % dst_type)
return DictNode(self.pos, key_value_pairs=[
DictItemNode(item.pos, key=item.key.coerce_to_pyobject(env),
value=item.value.coerce_to_pyobject(env))
for item in self.key_value_pairs])
if not self.type.subtype_of(dst_type):
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
elif dst_type.is_struct_or_union:
self.type = dst_type
if not dst_type.is_struct and len(self.key_value_pairs) != 1:
error(self.pos, "Exactly one field must be specified to convert to union '%s'" % dst_type)
elif dst_type.is_struct and len(self.key_value_pairs) < len(dst_type.scope.var_entries):
warning(self.pos, "Not all members given for struct '%s'" % dst_type, 1)
for item in self.key_value_pairs:
if isinstance(item.key, CoerceToPyTypeNode):
item.key = item.key.arg
if not item.key.is_string_literal:
error(item.key.pos, "Invalid struct field identifier")
item.key = StringNode(item.key.pos, value="<error>")
else:
key = str(item.key.value) # converts string literals to unicode in Py3
member = dst_type.scope.lookup_here(key)
if not member:
error(item.key.pos, "struct '%s' has no field '%s'" % (dst_type, key))
else:
value = item.value
if isinstance(value, CoerceToPyTypeNode):
value = value.arg
item.value = value.coerce_to(member.type, env)
else:
self.type = error_type
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
return self
def release_errors(self):
for err in self.obj_conversion_errors:
report_error(err)
self.obj_conversion_errors = []
gil_message = "Constructing Python dict"
def generate_evaluation_code(self, code):
# Custom method used here because key-value
# pairs are evaluated and used one at a time.
code.mark_pos(self.pos)
self.allocate_temp_result(code)
is_dict = self.type.is_pyobject
if is_dict:
self.release_errors()
code.putln(
"%s = PyDict_New(); %s" % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
keys_seen = set()
key_type = None
needs_error_helper = False
for item in self.key_value_pairs:
item.generate_evaluation_code(code)
if is_dict:
if self.exclude_null_values:
code.putln('if (%s) {' % item.value.py_result())
key = item.key
if self.reject_duplicates:
if keys_seen is not None:
# avoid runtime 'in' checks for literals that we can do at compile time
if not key.is_string_literal:
keys_seen = None
elif key.value in keys_seen:
# FIXME: this could be a compile time error, at least in Cython code
keys_seen = None
elif key_type is not type(key.value):
if key_type is None:
key_type = type(key.value)
keys_seen.add(key.value)
else:
# different types => may not be able to compare at compile time
keys_seen = None
else:
keys_seen.add(key.value)
if keys_seen is None:
code.putln('if (unlikely(PyDict_Contains(%s, %s))) {' % (
self.result(), key.py_result()))
# currently only used in function calls
needs_error_helper = True
code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % (
key.py_result(),
code.error_goto(item.pos)))
code.putln("} else {")
code.put_error_if_neg(self.pos, "PyDict_SetItem(%s, %s, %s)" % (
self.result(),
item.key.py_result(),
item.value.py_result()))
if self.reject_duplicates and keys_seen is None:
code.putln('}')
if self.exclude_null_values:
code.putln('}')
else:
code.putln("%s.%s = %s;" % (
self.result(),
item.key.value,
item.value.result()))
item.generate_disposal_code(code)
item.free_temps(code)
if needs_error_helper:
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseDoubleKeywords", "FunctionArguments.c"))
def annotate(self, code):
for item in self.key_value_pairs:
item.annotate(code)
class DictItemNode(ExprNode):
# Represents a single item in a DictNode
#
# key ExprNode
# value ExprNode
subexprs = ['key', 'value']
nogil_check = None # Parent DictNode takes care of it
def calculate_constant_result(self):
self.constant_result = (
self.key.constant_result, self.value.constant_result)
def analyse_types(self, env):
self.key = self.key.analyse_types(env)
self.value = self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
self.value = self.value.coerce_to_pyobject(env)
return self
def generate_evaluation_code(self, code):
self.key.generate_evaluation_code(code)
self.value.generate_evaluation_code(code)
def generate_disposal_code(self, code):
self.key.generate_disposal_code(code)
self.value.generate_disposal_code(code)
def free_temps(self, code):
self.key.free_temps(code)
self.value.free_temps(code)
def __iter__(self):
return iter([self.key, self.value])
class SortedDictKeysNode(ExprNode):
# build sorted list of dict keys, e.g. for dir()
subexprs = ['arg']
is_temp = True
def __init__(self, arg):
ExprNode.__init__(self, arg.pos, arg=arg)
self.type = Builtin.list_type
def analyse_types(self, env):
arg = self.arg.analyse_types(env)
if arg.type is Builtin.dict_type:
arg = arg.as_none_safe_node(
"'NoneType' object is not iterable")
self.arg = arg
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
dict_result = self.arg.py_result()
if self.arg.type is Builtin.dict_type:
code.putln('%s = PyDict_Keys(%s); %s' % (
self.result(), dict_result,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
else:
# originally used PyMapping_Keys() here, but that may return a tuple
code.globalstate.use_utility_code(UtilityCode.load_cached(
'PyObjectCallMethod0', 'ObjectHandling.c'))
keys_cname = code.intern_identifier(StringEncoding.EncodedString("keys"))
code.putln('%s = __Pyx_PyObject_CallMethod0(%s, %s); %s' % (
self.result(), dict_result, keys_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.putln("if (unlikely(!PyList_Check(%s))) {" % self.result())
code.put_decref_set(self.result(), "PySequence_List(%s)" % self.result())
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.py_result())
code.putln("}")
code.put_error_if_neg(
self.pos, 'PyList_Sort(%s)' % self.py_result())
class ModuleNameMixin(object):
def get_py_mod_name(self, code):
return code.get_py_string_const(
self.module_name, identifier=True)
def get_py_qualified_name(self, code):
return code.get_py_string_const(
self.qualname, identifier=True)
class ClassNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# bases ExprNode Base class tuple
# dict ExprNode Class dict (not owned by this node)
# doc ExprNode or None Doc string
# module_name EncodedString Name of defining module
subexprs = ['bases', 'doc']
type = py_object_type
is_temp = True
def infer_type(self, env):
# TODO: could return 'type' in some cases
return py_object_type
def analyse_types(self, env):
self.bases = self.bases.analyse_types(env)
if self.doc:
self.doc = self.doc.analyse_types(env)
self.doc = self.doc.coerce_to_pyobject(env)
env.use_utility_code(UtilityCode.load_cached("CreateClass", "ObjectHandling.c"))
return self
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def generate_result_code(self, code):
cname = code.intern_identifier(self.name)
if self.doc:
code.put_error_if_neg(self.pos,
'PyDict_SetItem(%s, %s, %s)' % (
self.dict.py_result(),
code.intern_identifier(
StringEncoding.EncodedString("__doc__")),
self.doc.py_result()))
py_mod_name = self.get_py_mod_name(code)
qualname = self.get_py_qualified_name(code)
code.putln(
'%s = __Pyx_CreateClass(%s, %s, %s, %s, %s); %s' % (
self.result(),
self.bases.py_result(),
self.dict.py_result(),
cname,
qualname,
py_mod_name,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class Py3ClassNode(ExprNode):
# Helper class used in the implementation of Python3+
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# dict ExprNode Class dict (not owned by this node)
# module_name EncodedString Name of defining module
# calculate_metaclass bool should call CalculateMetaclass()
# allow_py2_metaclass bool should look for Py2 metaclass
subexprs = []
type = py_object_type
is_temp = True
def infer_type(self, env):
# TODO: could return 'type' in some cases
return py_object_type
def analyse_types(self, env):
return self
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def generate_result_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("Py3ClassCreate", "ObjectHandling.c"))
cname = code.intern_identifier(self.name)
if self.mkw:
mkw = self.mkw.py_result()
else:
mkw = 'NULL'
if self.metaclass:
metaclass = self.metaclass.result()
else:
metaclass = "((PyObject*)&__Pyx_DefaultClassType)"
code.putln(
'%s = __Pyx_Py3ClassCreate(%s, %s, %s, %s, %s, %d, %d); %s' % (
self.result(),
metaclass,
cname,
self.bases.py_result(),
self.dict.py_result(),
mkw,
self.calculate_metaclass,
self.allow_py2_metaclass,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PyClassMetaclassNode(ExprNode):
# Helper class holds Python3 metaclass object
#
# bases ExprNode Base class tuple (not owned by this node)
# mkw ExprNode Class keyword arguments (not owned by this node)
subexprs = []
def analyse_types(self, env):
self.type = py_object_type
self.is_temp = True
return self
def may_be_none(self):
return True
def generate_result_code(self, code):
if self.mkw:
code.globalstate.use_utility_code(
UtilityCode.load_cached("Py3MetaclassGet", "ObjectHandling.c"))
call = "__Pyx_Py3MetaclassGet(%s, %s)" % (
self.bases.result(),
self.mkw.result())
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("CalculateMetaclass", "ObjectHandling.c"))
call = "__Pyx_CalculateMetaclass(NULL, %s)" % (
self.bases.result())
code.putln(
"%s = %s; %s" % (
self.result(), call,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PyClassNamespaceNode(ExprNode, ModuleNameMixin):
# Helper class holds Python3 namespace object
#
# All this are not owned by this node
# metaclass ExprNode Metaclass object
# bases ExprNode Base class tuple
# mkw ExprNode Class keyword arguments
# doc ExprNode or None Doc string (owned)
subexprs = ['doc']
def analyse_types(self, env):
if self.doc:
self.doc = self.doc.analyse_types(env)
self.doc = self.doc.coerce_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
return self
def may_be_none(self):
return True
def generate_result_code(self, code):
cname = code.intern_identifier(self.name)
py_mod_name = self.get_py_mod_name(code)
qualname = self.get_py_qualified_name(code)
if self.doc:
doc_code = self.doc.result()
else:
doc_code = '(PyObject *) NULL'
if self.mkw:
mkw = self.mkw.py_result()
else:
mkw = '(PyObject *) NULL'
if self.metaclass:
metaclass = self.metaclass.result()
else:
metaclass = "(PyObject *) NULL"
code.putln(
"%s = __Pyx_Py3MetaclassPrepare(%s, %s, %s, %s, %s, %s, %s); %s" % (
self.result(),
metaclass,
self.bases.result(),
cname,
qualname,
mkw,
py_mod_name,
doc_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class ClassCellInjectorNode(ExprNode):
# Initialize CyFunction.func_classobj
is_temp = True
type = py_object_type
subexprs = []
is_active = False
def analyse_expressions(self, env):
if self.is_active:
env.use_utility_code(
UtilityCode.load_cached("CyFunctionClassCell", "CythonFunction.c"))
return self
def generate_evaluation_code(self, code):
if self.is_active:
self.allocate_temp_result(code)
code.putln(
'%s = PyList_New(0); %s' % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
def generate_injection_code(self, code, classobj_cname):
if self.is_active:
code.put_error_if_neg(self.pos, '__Pyx_CyFunction_InitClassCell(%s, %s)' % (
self.result(), classobj_cname))
class ClassCellNode(ExprNode):
# Class Cell for noargs super()
subexprs = []
is_temp = True
is_generator = False
type = py_object_type
def analyse_types(self, env):
return self
def generate_result_code(self, code):
if not self.is_generator:
code.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % (
self.result(),
Naming.self_cname))
else:
code.putln('%s = %s->classobj;' % (
self.result(), Naming.generator_cname))
code.putln(
'if (!%s) { PyErr_SetString(PyExc_SystemError, '
'"super(): empty __class__ cell"); %s }' % (
self.result(),
code.error_goto(self.pos)))
code.put_incref(self.result(), py_object_type)
class BoundMethodNode(ExprNode):
# Helper class used in the implementation of Python
# class definitions. Constructs an bound method
# object from a class and a function.
#
# function ExprNode Function object
# self_object ExprNode self object
subexprs = ['function']
def analyse_types(self, env):
self.function = self.function.analyse_types(env)
self.type = py_object_type
self.is_temp = 1
return self
gil_message = "Constructing a bound method"
def generate_result_code(self, code):
code.putln(
"%s = __Pyx_PyMethod_New(%s, %s, (PyObject*)%s->ob_type); %s" % (
self.result(),
self.function.py_result(),
self.self_object.py_result(),
self.self_object.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class UnboundMethodNode(ExprNode):
# Helper class used in the implementation of Python
# class definitions. Constructs an unbound method
# object from a class and a function.
#
# function ExprNode Function object
type = py_object_type
is_temp = 1
subexprs = ['function']
def analyse_types(self, env):
self.function = self.function.analyse_types(env)
return self
def may_be_none(self):
return False
gil_message = "Constructing an unbound method"
def generate_result_code(self, code):
class_cname = code.pyclass_stack[-1].classobj.result()
code.putln(
"%s = __Pyx_PyMethod_New(%s, 0, %s); %s" % (
self.result(),
self.function.py_result(),
class_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PyCFunctionNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# functions. Constructs a PyCFunction object
# from a PyMethodDef struct.
#
# pymethdef_cname string PyMethodDef structure
# self_object ExprNode or None
# binding bool
# def_node DefNode the Python function node
# module_name EncodedString Name of defining module
# code_object CodeObjectNode the PyCodeObject creator node
subexprs = ['code_object', 'defaults_tuple', 'defaults_kwdict',
'annotations_dict']
self_object = None
code_object = None
binding = False
def_node = None
defaults = None
defaults_struct = None
defaults_pyobjects = 0
defaults_tuple = None
defaults_kwdict = None
annotations_dict = None
type = py_object_type
is_temp = 1
specialized_cpdefs = None
is_specialization = False
@classmethod
def from_defnode(cls, node, binding):
return cls(node.pos,
def_node=node,
pymethdef_cname=node.entry.pymethdef_cname,
binding=binding or node.specialized_cpdefs,
specialized_cpdefs=node.specialized_cpdefs,
code_object=CodeObjectNode(node))
def analyse_types(self, env):
if self.binding:
self.analyse_default_args(env)
return self
def analyse_default_args(self, env):
"""
Handle non-literal function's default arguments.
"""
nonliteral_objects = []
nonliteral_other = []
default_args = []
default_kwargs = []
annotations = []
for arg in self.def_node.args:
if arg.default:
if not arg.default.is_literal:
arg.is_dynamic = True
if arg.type.is_pyobject:
nonliteral_objects.append(arg)
else:
nonliteral_other.append(arg)
else:
arg.default = DefaultLiteralArgNode(arg.pos, arg.default)
if arg.kw_only:
default_kwargs.append(arg)
else:
default_args.append(arg)
if arg.annotation:
arg.annotation = arg.annotation.analyse_types(env)
if not arg.annotation.type.is_pyobject:
arg.annotation = arg.annotation.coerce_to_pyobject(env)
annotations.append((arg.pos, arg.name, arg.annotation))
for arg in (self.def_node.star_arg, self.def_node.starstar_arg):
if arg and arg.annotation:
arg.annotation = arg.annotation.analyse_types(env)
if not arg.annotation.type.is_pyobject:
arg.annotation = arg.annotation.coerce_to_pyobject(env)
annotations.append((arg.pos, arg.name, arg.annotation))
if self.def_node.return_type_annotation:
annotations.append((self.def_node.return_type_annotation.pos,
StringEncoding.EncodedString("return"),
self.def_node.return_type_annotation))
if nonliteral_objects or nonliteral_other:
module_scope = env.global_scope()
cname = module_scope.next_id(Naming.defaults_struct_prefix)
scope = Symtab.StructOrUnionScope(cname)
self.defaults = []
for arg in nonliteral_objects:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
allow_pyobject=True)
self.defaults.append((arg, entry))
for arg in nonliteral_other:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
allow_pyobject=False)
self.defaults.append((arg, entry))
entry = module_scope.declare_struct_or_union(
None, 'struct', scope, 1, None, cname=cname)
self.defaults_struct = scope
self.defaults_pyobjects = len(nonliteral_objects)
for arg, entry in self.defaults:
arg.default_value = '%s->%s' % (
Naming.dynamic_args_cname, entry.cname)
self.def_node.defaults_struct = self.defaults_struct.name
if default_args or default_kwargs:
if self.defaults_struct is None:
if default_args:
defaults_tuple = TupleNode(self.pos, args=[
arg.default for arg in default_args])
self.defaults_tuple = defaults_tuple.analyse_types(env).coerce_to_pyobject(env)
if default_kwargs:
defaults_kwdict = DictNode(self.pos, key_value_pairs=[
DictItemNode(
arg.pos,
key=IdentifierStringNode(arg.pos, value=arg.name),
value=arg.default)
for arg in default_kwargs])
self.defaults_kwdict = defaults_kwdict.analyse_types(env)
else:
if default_args:
defaults_tuple = DefaultsTupleNode(
self.pos, default_args, self.defaults_struct)
else:
defaults_tuple = NoneNode(self.pos)
if default_kwargs:
defaults_kwdict = DefaultsKwDictNode(
self.pos, default_kwargs, self.defaults_struct)
else:
defaults_kwdict = NoneNode(self.pos)
defaults_getter = Nodes.DefNode(
self.pos, args=[], star_arg=None, starstar_arg=None,
body=Nodes.ReturnStatNode(
self.pos, return_type=py_object_type,
value=TupleNode(
self.pos, args=[defaults_tuple, defaults_kwdict])),
decorators=None,
name=StringEncoding.EncodedString("__defaults__"))
defaults_getter.analyse_declarations(env)
defaults_getter = defaults_getter.analyse_expressions(env)
defaults_getter.body = defaults_getter.body.analyse_expressions(
defaults_getter.local_scope)
defaults_getter.py_wrapper_required = False
defaults_getter.pymethdef_required = False
self.def_node.defaults_getter = defaults_getter
if annotations:
annotations_dict = DictNode(self.pos, key_value_pairs=[
DictItemNode(
pos, key=IdentifierStringNode(pos, value=name),
value=value)
for pos, name, value in annotations])
self.annotations_dict = annotations_dict.analyse_types(env)
def may_be_none(self):
return False
gil_message = "Constructing Python function"
def self_result_code(self):
if self.self_object is None:
self_result = "NULL"
else:
self_result = self.self_object.py_result()
return self_result
def generate_result_code(self, code):
if self.binding:
self.generate_cyfunction_code(code)
else:
self.generate_pycfunction_code(code)
def generate_pycfunction_code(self, code):
py_mod_name = self.get_py_mod_name(code)
code.putln(
'%s = PyCFunction_NewEx(&%s, %s, %s); %s' % (
self.result(),
self.pymethdef_cname,
self.self_result_code(),
py_mod_name,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
def generate_cyfunction_code(self, code):
if self.specialized_cpdefs:
def_node = self.specialized_cpdefs[0]
else:
def_node = self.def_node
if self.specialized_cpdefs or self.is_specialization:
code.globalstate.use_utility_code(
UtilityCode.load_cached("FusedFunction", "CythonFunction.c"))
constructor = "__pyx_FusedFunction_NewEx"
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("CythonFunction", "CythonFunction.c"))
constructor = "__Pyx_CyFunction_NewEx"
if self.code_object:
code_object_result = self.code_object.py_result()
else:
code_object_result = 'NULL'
flags = []
if def_node.is_staticmethod:
flags.append('__Pyx_CYFUNCTION_STATICMETHOD')
elif def_node.is_classmethod:
flags.append('__Pyx_CYFUNCTION_CLASSMETHOD')
if def_node.local_scope.parent_scope.is_c_class_scope:
flags.append('__Pyx_CYFUNCTION_CCLASS')
if flags:
flags = ' | '.join(flags)
else:
flags = '0'
code.putln(
'%s = %s(&%s, %s, %s, %s, %s, %s, %s); %s' % (
self.result(),
constructor,
self.pymethdef_cname,
flags,
self.get_py_qualified_name(code),
self.self_result_code(),
self.get_py_mod_name(code),
Naming.moddict_cname,
code_object_result,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
if def_node.requires_classobj:
assert code.pyclass_stack, "pyclass_stack is empty"
class_node = code.pyclass_stack[-1]
code.put_incref(self.py_result(), py_object_type)
code.putln(
'PyList_Append(%s, %s);' % (
class_node.class_cell.result(),
self.result()))
code.put_giveref(self.py_result())
if self.defaults:
code.putln(
'if (!__Pyx_CyFunction_InitDefaults(%s, sizeof(%s), %d)) %s' % (
self.result(), self.defaults_struct.name,
self.defaults_pyobjects, code.error_goto(self.pos)))
defaults = '__Pyx_CyFunction_Defaults(%s, %s)' % (
self.defaults_struct.name, self.result())
for arg, entry in self.defaults:
arg.generate_assignment_code(code, target='%s->%s' % (
defaults, entry.cname))
if self.defaults_tuple:
code.putln('__Pyx_CyFunction_SetDefaultsTuple(%s, %s);' % (
self.result(), self.defaults_tuple.py_result()))
if self.defaults_kwdict:
code.putln('__Pyx_CyFunction_SetDefaultsKwDict(%s, %s);' % (
self.result(), self.defaults_kwdict.py_result()))
if def_node.defaults_getter:
code.putln('__Pyx_CyFunction_SetDefaultsGetter(%s, %s);' % (
self.result(), def_node.defaults_getter.entry.pyfunc_cname))
if self.annotations_dict:
code.putln('__Pyx_CyFunction_SetAnnotationsDict(%s, %s);' % (
self.result(), self.annotations_dict.py_result()))
class InnerFunctionNode(PyCFunctionNode):
# Special PyCFunctionNode that depends on a closure class
#
binding = True
needs_self_code = True
def self_result_code(self):
if self.needs_self_code:
return "((PyObject*)%s)" % Naming.cur_scope_cname
return "NULL"
class CodeObjectNode(ExprNode):
# Create a PyCodeObject for a CyFunction instance.
#
# def_node DefNode the Python function node
# varnames TupleNode a tuple with all local variable names
subexprs = ['varnames']
is_temp = False
result_code = None
def __init__(self, def_node):
ExprNode.__init__(self, def_node.pos, def_node=def_node)
args = list(def_node.args)
# if we have args/kwargs, then the first two in var_entries are those
local_vars = [arg for arg in def_node.local_scope.var_entries if arg.name]
self.varnames = TupleNode(
def_node.pos,
args=[IdentifierStringNode(arg.pos, value=arg.name)
for arg in args + local_vars],
is_temp=0,
is_literal=1)
def may_be_none(self):
return False
def calculate_result_code(self, code=None):
if self.result_code is None:
self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2)
return self.result_code
def generate_result_code(self, code):
if self.result_code is None:
self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
func = self.def_node
func_name = code.get_py_string_const(
func.name, identifier=True, is_str=False, unicode_value=func.name)
# FIXME: better way to get the module file path at module init time? Encoding to use?
file_path = StringEncoding.bytes_literal(func.pos[0].get_filenametable_entry().encode('utf8'), 'utf8')
file_path_const = code.get_py_string_const(file_path, identifier=False, is_str=True)
flags = []
if self.def_node.star_arg:
flags.append('CO_VARARGS')
if self.def_node.starstar_arg:
flags.append('CO_VARKEYWORDS')
code.putln("%s = (PyObject*)__Pyx_PyCode_New(%d, %d, %d, 0, %s, %s, %s, %s, %s, %s, %s, %s, %s, %d, %s); %s" % (
self.result_code,
len(func.args) - func.num_kwonly_args, # argcount
func.num_kwonly_args, # kwonlyargcount (Py3 only)
len(self.varnames.args), # nlocals
'|'.join(flags) or '0', # flags
Naming.empty_bytes, # code
Naming.empty_tuple, # consts
Naming.empty_tuple, # names (FIXME)
self.varnames.result(), # varnames
Naming.empty_tuple, # freevars (FIXME)
Naming.empty_tuple, # cellvars (FIXME)
file_path_const, # filename
func_name, # name
self.pos[1], # firstlineno
Naming.empty_bytes, # lnotab
code.error_goto_if_null(self.result_code, self.pos),
))
class DefaultLiteralArgNode(ExprNode):
# CyFunction's literal argument default value
#
# Evaluate literal only once.
subexprs = []
is_literal = True
is_temp = False
def __init__(self, pos, arg):
super(DefaultLiteralArgNode, self).__init__(pos)
self.arg = arg
self.type = self.arg.type
self.evaluated = False
def analyse_types(self, env):
return self
def generate_result_code(self, code):
pass
def generate_evaluation_code(self, code):
if not self.evaluated:
self.arg.generate_evaluation_code(code)
self.evaluated = True
def result(self):
return self.type.cast_code(self.arg.result())
class DefaultNonLiteralArgNode(ExprNode):
# CyFunction's non-literal argument default value
subexprs = []
def __init__(self, pos, arg, defaults_struct):
super(DefaultNonLiteralArgNode, self).__init__(pos)
self.arg = arg
self.defaults_struct = defaults_struct
def analyse_types(self, env):
self.type = self.arg.type
self.is_temp = False
return self
def generate_result_code(self, code):
pass
def result(self):
return '__Pyx_CyFunction_Defaults(%s, %s)->%s' % (
self.defaults_struct.name, Naming.self_cname,
self.defaults_struct.lookup(self.arg.name).cname)
class DefaultsTupleNode(TupleNode):
# CyFunction's __defaults__ tuple
def __init__(self, pos, defaults, defaults_struct):
args = []
for arg in defaults:
if not arg.default.is_literal:
arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
else:
arg = arg.default
args.append(arg)
super(DefaultsTupleNode, self).__init__(pos, args=args)
def analyse_types(self, env, skip_children=False):
return super(DefaultsTupleNode, self).analyse_types(env, skip_children).coerce_to_pyobject(env)
class DefaultsKwDictNode(DictNode):
# CyFunction's __kwdefaults__ dict
def __init__(self, pos, defaults, defaults_struct):
items = []
for arg in defaults:
name = IdentifierStringNode(arg.pos, value=arg.name)
if not arg.default.is_literal:
arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
else:
arg = arg.default
items.append(DictItemNode(arg.pos, key=name, value=arg))
super(DefaultsKwDictNode, self).__init__(pos, key_value_pairs=items)
class LambdaNode(InnerFunctionNode):
# Lambda expression node (only used as a function reference)
#
# args [CArgDeclNode] formal arguments
# star_arg PyArgDeclNode or None * argument
# starstar_arg PyArgDeclNode or None ** argument
# lambda_name string a module-globally unique lambda name
# result_expr ExprNode
# def_node DefNode the underlying function 'def' node
child_attrs = ['def_node']
name = StringEncoding.EncodedString('<lambda>')
def analyse_declarations(self, env):
self.lambda_name = self.def_node.lambda_name = env.next_id('lambda')
self.def_node.no_assignment_synthesis = True
self.def_node.pymethdef_required = True
self.def_node.analyse_declarations(env)
self.def_node.is_cyfunction = True
self.pymethdef_cname = self.def_node.entry.pymethdef_cname
env.add_lambda_def(self.def_node)
def analyse_types(self, env):
self.def_node = self.def_node.analyse_expressions(env)
return super(LambdaNode, self).analyse_types(env)
def generate_result_code(self, code):
self.def_node.generate_execution_code(code)
super(LambdaNode, self).generate_result_code(code)
class GeneratorExpressionNode(LambdaNode):
# A generator expression, e.g. (i for i in range(10))
#
# Result is a generator.
#
# loop ForStatNode the for-loop, containing a YieldExprNode
# def_node DefNode the underlying generator 'def' node
name = StringEncoding.EncodedString('genexpr')
binding = False
def analyse_declarations(self, env):
self.genexpr_name = env.next_id('genexpr')
super(GeneratorExpressionNode, self).analyse_declarations(env)
# No pymethdef required
self.def_node.pymethdef_required = False
self.def_node.py_wrapper_required = False
self.def_node.is_cyfunction = False
# Force genexpr signature
self.def_node.entry.signature = TypeSlots.pyfunction_noargs
def generate_result_code(self, code):
code.putln(
'%s = %s(%s); %s' % (
self.result(),
self.def_node.entry.pyfunc_cname,
self.self_result_code(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class YieldExprNode(ExprNode):
# Yield expression node
#
# arg ExprNode the value to return from the generator
# label_num integer yield label number
# is_yield_from boolean is a YieldFromExprNode to delegate to another generator
subexprs = ['arg']
type = py_object_type
label_num = 0
is_yield_from = False
is_await = False
expr_keyword = 'yield'
def analyse_types(self, env):
if not self.label_num:
error(self.pos, "'%s' not supported here" % self.expr_keyword)
self.is_temp = 1
if self.arg is not None:
self.arg = self.arg.analyse_types(env)
if not self.arg.type.is_pyobject:
self.coerce_yield_argument(env)
return self
def coerce_yield_argument(self, env):
self.arg = self.arg.coerce_to_pyobject(env)
def generate_evaluation_code(self, code):
if self.arg:
self.arg.generate_evaluation_code(code)
self.arg.make_owned_reference(code)
code.putln(
"%s = %s;" % (
Naming.retval_cname,
self.arg.result_as(py_object_type)))
self.arg.generate_post_assignment_code(code)
self.arg.free_temps(code)
else:
code.put_init_to_py_none(Naming.retval_cname, py_object_type)
self.generate_yield_code(code)
def generate_yield_code(self, code):
"""
Generate the code to return the argument in 'Naming.retval_cname'
and to continue at the yield label.
"""
label_num, label_name = code.new_yield_label()
code.use_label(label_name)
saved = []
code.funcstate.closure_temps.reset()
for cname, type, manage_ref in code.funcstate.temps_in_use():
save_cname = code.funcstate.closure_temps.allocate_temp(type)
saved.append((cname, save_cname, type))
if type.is_pyobject:
code.put_xgiveref(cname)
code.putln('%s->%s = %s;' % (Naming.cur_scope_cname, save_cname, cname))
code.put_xgiveref(Naming.retval_cname)
code.put_finish_refcount_context()
code.putln("/* return from generator, yielding value */")
code.putln("%s->resume_label = %d;" % (
Naming.generator_cname, label_num))
code.putln("return %s;" % Naming.retval_cname)
code.put_label(label_name)
for cname, save_cname, type in saved:
code.putln('%s = %s->%s;' % (cname, Naming.cur_scope_cname, save_cname))
if type.is_pyobject:
code.putln('%s->%s = 0;' % (Naming.cur_scope_cname, save_cname))
code.put_xgotref(cname)
code.putln(code.error_goto_if_null(Naming.sent_value_cname, self.pos))
if self.result_is_used:
self.allocate_temp_result(code)
code.put('%s = %s; ' % (self.result(), Naming.sent_value_cname))
code.put_incref(self.result(), py_object_type)
class YieldFromExprNode(YieldExprNode):
# "yield from GEN" expression
is_yield_from = True
expr_keyword = 'yield from'
def coerce_yield_argument(self, env):
if not self.arg.type.is_string:
# FIXME: support C arrays and C++ iterators?
error(self.pos, "yielding from non-Python object not supported")
self.arg = self.arg.coerce_to_pyobject(env)
def yield_from_func(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("GeneratorYieldFrom", "Coroutine.c"))
return "__Pyx_Generator_Yield_From"
def generate_evaluation_code(self, code, source_cname=None, decref_source=False):
if source_cname is None:
self.arg.generate_evaluation_code(code)
code.putln("%s = %s(%s, %s);" % (
Naming.retval_cname,
self.yield_from_func(code),
Naming.generator_cname,
self.arg.py_result() if source_cname is None else source_cname))
if source_cname is None:
self.arg.generate_disposal_code(code)
self.arg.free_temps(code)
elif decref_source:
code.put_decref_clear(source_cname, py_object_type)
code.put_xgotref(Naming.retval_cname)
code.putln("if (likely(%s)) {" % Naming.retval_cname)
self.generate_yield_code(code)
code.putln("} else {")
# either error or sub-generator has normally terminated: return value => node result
if self.result_is_used:
self.fetch_iteration_result(code)
else:
self.handle_iteration_exception(code)
code.putln("}")
def fetch_iteration_result(self, code):
# YieldExprNode has allocated the result temp for us
code.putln("%s = NULL;" % self.result())
code.put_error_if_neg(self.pos, "__Pyx_PyGen_FetchStopIterationValue(&%s)" % self.result())
code.put_gotref(self.result())
def handle_iteration_exception(self, code):
code.putln("PyObject* exc_type = PyErr_Occurred();")
code.putln("if (exc_type) {")
code.putln("if (likely(exc_type == PyExc_StopIteration ||"
" PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();")
code.putln("else %s" % code.error_goto(self.pos))
code.putln("}")
class AwaitExprNode(YieldFromExprNode):
# 'await' expression node
#
# arg ExprNode the Awaitable value to await
# label_num integer yield label number
is_await = True
expr_keyword = 'await'
def coerce_yield_argument(self, env):
if self.arg is not None:
# FIXME: use same check as in YieldFromExprNode.coerce_yield_argument() ?
self.arg = self.arg.coerce_to_pyobject(env)
def yield_from_func(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("CoroutineYieldFrom", "Coroutine.c"))
return "__Pyx_Coroutine_Yield_From"
class AwaitIterNextExprNode(AwaitExprNode):
# 'await' expression node as part of 'async for' iteration
#
# Breaks out of loop on StopAsyncIteration exception.
def fetch_iteration_result(self, code):
assert code.break_label, "AwaitIterNextExprNode outside of 'async for' loop"
code.globalstate.use_utility_code(UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"))
code.putln("PyObject* exc_type = PyErr_Occurred();")
code.putln("if (exc_type && likely(exc_type == __Pyx_PyExc_StopAsyncIteration ||"
" PyErr_GivenExceptionMatches(exc_type, __Pyx_PyExc_StopAsyncIteration))) {")
code.putln("PyErr_Clear();")
code.putln("break;")
code.putln("}")
super(AwaitIterNextExprNode, self).fetch_iteration_result(code)
class GlobalsExprNode(AtomicExprNode):
type = dict_type
is_temp = 1
def analyse_types(self, env):
env.use_utility_code(Builtin.globals_utility_code)
return self
gil_message = "Constructing globals dict"
def may_be_none(self):
return False
def generate_result_code(self, code):
code.putln('%s = __Pyx_Globals(); %s' % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
class LocalsDictItemNode(DictItemNode):
def analyse_types(self, env):
self.key = self.key.analyse_types(env)
self.value = self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
if self.value.type.can_coerce_to_pyobject(env):
self.value = self.value.coerce_to_pyobject(env)
else:
self.value = None
return self
class FuncLocalsExprNode(DictNode):
def __init__(self, pos, env):
local_vars = sorted([
entry.name for entry in env.entries.values() if entry.name])
items = [LocalsDictItemNode(
pos, key=IdentifierStringNode(pos, value=var),
value=NameNode(pos, name=var, allow_null=True))
for var in local_vars]
DictNode.__init__(self, pos, key_value_pairs=items,
exclude_null_values=True)
def analyse_types(self, env):
node = super(FuncLocalsExprNode, self).analyse_types(env)
node.key_value_pairs = [ i for i in node.key_value_pairs
if i.value is not None ]
return node
class PyClassLocalsExprNode(AtomicExprNode):
def __init__(self, pos, pyclass_dict):
AtomicExprNode.__init__(self, pos)
self.pyclass_dict = pyclass_dict
def analyse_types(self, env):
self.type = self.pyclass_dict.type
self.is_temp = False
return self
def may_be_none(self):
return False
def result(self):
return self.pyclass_dict.result()
def generate_result_code(self, code):
pass
def LocalsExprNode(pos, scope_node, env):
if env.is_module_scope:
return GlobalsExprNode(pos)
if env.is_py_class_scope:
return PyClassLocalsExprNode(pos, scope_node.dict)
return FuncLocalsExprNode(pos, env)
#-------------------------------------------------------------------
#
# Unary operator nodes
#
#-------------------------------------------------------------------
compile_time_unary_operators = {
'not': operator.not_,
'~': operator.inv,
'-': operator.neg,
'+': operator.pos,
}
class UnopNode(ExprNode):
# operator string
# operand ExprNode
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when the operand is not a pyobject.
# - Check operand type and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand']
infix = True
def calculate_constant_result(self):
func = compile_time_unary_operators[self.operator]
self.constant_result = func(self.operand.constant_result)
def compile_time_value(self, denv):
func = compile_time_unary_operators.get(self.operator)
if not func:
error(self.pos,
"Unary '%s' not supported in compile-time expression"
% self.operator)
operand = self.operand.compile_time_value(denv)
try:
return func(operand)
except Exception as e:
self.compile_time_value_error(e)
def infer_type(self, env):
operand_type = self.operand.infer_type(env)
if operand_type.is_cpp_class or operand_type.is_ptr:
cpp_type = operand_type.find_cpp_operation_type(self.operator)
if cpp_type is not None:
return cpp_type
return self.infer_unop_type(env, operand_type)
def infer_unop_type(self, env, operand_type):
if operand_type.is_pyobject:
return py_object_type
else:
return operand_type
def may_be_none(self):
if self.operand.type and self.operand.type.is_builtin_type:
if self.operand.type is not type_type:
return False
return ExprNode.may_be_none(self)
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
if self.is_py_operation():
self.coerce_operand_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
elif self.is_cpp_operation():
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
return self
def check_const(self):
return self.operand.check_const()
def is_py_operation(self):
return self.operand.type.is_pyobject or self.operand.type.is_ctuple
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
def is_cpp_operation(self):
type = self.operand.type
return type.is_cpp_class
def coerce_operand_to_pyobject(self, env):
self.operand = self.operand.coerce_to_pyobject(env)
def generate_result_code(self, code):
if self.operand.type.is_pyobject:
self.generate_py_operation_code(code)
elif self.is_temp:
if self.is_cpp_operation() and self.exception_check == '+':
translate_cpp_exception(code, self.pos,
"%s = %s %s;" % (self.result(), self.operator, self.operand.result()),
self.exception_value, self.in_nogil_context)
else:
code.putln("%s = %s %s;" % (self.result(), self.operator, self.operand.result()))
def generate_py_operation_code(self, code):
function = self.py_operation_function(code)
code.putln(
"%s = %s(%s); %s" % (
self.result(),
function,
self.operand.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
def type_error(self):
if not self.operand.type.is_error:
error(self.pos, "Invalid operand type for '%s' (%s)" %
(self.operator, self.operand.type))
self.type = PyrexTypes.error_type
def analyse_cpp_operation(self, env, overload_check=True):
entry = env.lookup_operator(self.operator, [self.operand])
if overload_check and not entry:
self.type_error()
return
if entry:
self.exception_check = entry.type.exception_check
self.exception_value = entry.type.exception_value
if self.exception_check == '+':
self.is_temp = True
if self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
else:
self.exception_check = ''
self.exception_value = ''
cpp_type = self.operand.type.find_cpp_operation_type(self.operator)
if overload_check and cpp_type is None:
error(self.pos, "'%s' operator not defined for %s" % (
self.operator, type))
self.type_error()
return
self.type = cpp_type
class NotNode(UnopNode):
# 'not' operator
#
# operand ExprNode
operator = '!'
type = PyrexTypes.c_bint_type
def calculate_constant_result(self):
self.constant_result = not self.operand.constant_result
def compile_time_value(self, denv):
operand = self.operand.compile_time_value(denv)
try:
return not operand
except Exception as e:
self.compile_time_value_error(e)
def infer_unop_type(self, env, operand_type):
return PyrexTypes.c_bint_type
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
operand_type = self.operand.type
if operand_type.is_cpp_class:
self.analyse_cpp_operation(env)
else:
self.operand = self.operand.coerce_to_boolean(env)
return self
def calculate_result_code(self):
return "(!%s)" % self.operand.result()
class UnaryPlusNode(UnopNode):
# unary '+' operator
operator = '+'
def analyse_c_operation(self, env):
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
def py_operation_function(self, code):
return "PyNumber_Positive"
def calculate_result_code(self):
if self.is_cpp_operation():
return "(+%s)" % self.operand.result()
else:
return self.operand.result()
class UnaryMinusNode(UnopNode):
# unary '-' operator
operator = '-'
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
if self.type.is_complex:
self.infix = False
def py_operation_function(self, code):
return "PyNumber_Negative"
def calculate_result_code(self):
if self.infix:
return "(-%s)" % self.operand.result()
else:
return "%s(%s)" % (self.operand.type.unary_op('-'), self.operand.result())
def get_constant_c_result_code(self):
value = self.operand.get_constant_c_result_code()
if value:
return "(-%s)" % value
class TildeNode(UnopNode):
# unary '~' operator
def analyse_c_operation(self, env):
if self.operand.type.is_int:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
def py_operation_function(self, code):
return "PyNumber_Invert"
def calculate_result_code(self):
return "(~%s)" % self.operand.result()
class CUnopNode(UnopNode):
def is_py_operation(self):
return False
class DereferenceNode(CUnopNode):
# unary * operator
operator = '*'
def infer_unop_type(self, env, operand_type):
if operand_type.is_ptr:
return operand_type.base_type
else:
return PyrexTypes.error_type
def analyse_c_operation(self, env):
if self.operand.type.is_ptr:
self.type = self.operand.type.base_type
else:
self.type_error()
def calculate_result_code(self):
return "(*%s)" % self.operand.result()
class DecrementIncrementNode(CUnopNode):
# unary ++/-- operator
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_ptr:
self.type = self.operand.type
else:
self.type_error()
def calculate_result_code(self):
if self.is_prefix:
return "(%s%s)" % (self.operator, self.operand.result())
else:
return "(%s%s)" % (self.operand.result(), self.operator)
def inc_dec_constructor(is_prefix, operator):
return lambda pos, **kwds: DecrementIncrementNode(pos, is_prefix=is_prefix, operator=operator, **kwds)
class AmpersandNode(CUnopNode):
# The C address-of operator.
#
# operand ExprNode
operator = '&'
def infer_unop_type(self, env, operand_type):
return PyrexTypes.c_ptr_type(operand_type)
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
argtype = self.operand.type
if argtype.is_cpp_class:
self.analyse_cpp_operation(env, overload_check=False)
if not (argtype.is_cfunction or argtype.is_reference or self.operand.is_addressable()):
if argtype.is_memoryviewslice:
self.error("Cannot take address of memoryview slice")
else:
self.error("Taking address of non-lvalue (type %s)" % argtype)
return self
if argtype.is_pyobject:
self.error("Cannot take address of Python variable")
return self
if not argtype.is_cpp_class or not self.type:
self.type = PyrexTypes.c_ptr_type(argtype)
return self
def check_const(self):
return self.operand.check_const_addr()
def error(self, mess):
error(self.pos, mess)
self.type = PyrexTypes.error_type
self.result_code = "<error>"
def calculate_result_code(self):
return "(&%s)" % self.operand.result()
def generate_result_code(self, code):
if (self.operand.type.is_cpp_class and self.exception_check == '+'):
translate_cpp_exception(code, self.pos,
"%s = %s %s;" % (self.result(), self.operator, self.operand.result()),
self.exception_value, self.in_nogil_context)
unop_node_classes = {
"+": UnaryPlusNode,
"-": UnaryMinusNode,
"~": TildeNode,
}
def unop_node(pos, operator, operand):
# Construct unnop node of appropriate class for
# given operator.
if isinstance(operand, IntNode) and operator == '-':
return IntNode(pos = operand.pos, value = str(-Utils.str_to_number(operand.value)),
longness=operand.longness, unsigned=operand.unsigned)
elif isinstance(operand, UnopNode) and operand.operator == operator in '+-':
warning(pos, "Python has no increment/decrement operator: %s%sx == %s(%sx) == x" % ((operator,)*4), 5)
return unop_node_classes[operator](pos,
operator = operator,
operand = operand)
class TypecastNode(ExprNode):
# C type cast
#
# operand ExprNode
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# typecheck boolean
#
# If used from a transform, one can if wanted specify the attribute
# "type" directly and leave base_type and declarator to None
subexprs = ['operand']
base_type = declarator = type = None
def type_dependencies(self, env):
return ()
def infer_type(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
return self.type
def analyse_types(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
if self.operand.has_constant_result():
# Must be done after self.type is resolved.
self.calculate_constant_result()
if self.type.is_cfunction:
error(self.pos,
"Cannot cast to a function type")
self.type = PyrexTypes.error_type
self.operand = self.operand.analyse_types(env)
if self.type is PyrexTypes.c_bint_type:
# short circuit this to a coercion
return self.operand.coerce_to_boolean(env)
to_py = self.type.is_pyobject
from_py = self.operand.type.is_pyobject
if from_py and not to_py and self.operand.is_ephemeral():
if not self.type.is_numeric and not self.type.is_cpp_class:
error(self.pos, "Casting temporary Python object to non-numeric non-Python type")
if to_py and not from_py:
if self.type is bytes_type and self.operand.type.is_int:
return CoerceIntToBytesNode(self.operand, env)
elif self.operand.type.can_coerce_to_pyobject(env):
self.result_ctype = py_object_type
base_type = self.base_type.analyse(env)
self.operand = self.operand.coerce_to(base_type, env)
else:
if self.operand.type.is_ptr:
if not (self.operand.type.base_type.is_void or self.operand.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast from pointers of primitive types")
else:
# Should this be an error?
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.operand.type, self.type))
self.operand = self.operand.coerce_to_simple(env)
elif from_py and not to_py:
if self.type.create_from_py_utility_code(env):
self.operand = self.operand.coerce_to(self.type, env)
elif self.type.is_ptr:
if not (self.type.base_type.is_void or self.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast to pointers of primitive types")
else:
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.type, self.operand.type))
elif from_py and to_py:
if self.typecheck:
self.operand = PyTypeTestNode(self.operand, self.type, env, notnone=True)
elif isinstance(self.operand, SliceIndexNode):
# This cast can influence the created type of string slices.
self.operand = self.operand.coerce_to(self.type, env)
elif self.type.is_complex and self.operand.type.is_complex:
self.operand = self.operand.coerce_to_simple(env)
elif self.operand.type.is_fused:
self.operand = self.operand.coerce_to(self.type, env)
#self.type = self.operand.type
return self
def is_simple(self):
# either temp or a C cast => no side effects other than the operand's
return self.operand.is_simple()
def is_ephemeral(self):
# either temp or a C cast => no side effects other than the operand's
return self.operand.is_ephemeral()
def nonlocally_immutable(self):
return self.is_temp or self.operand.nonlocally_immutable()
def nogil_check(self, env):
if self.type and self.type.is_pyobject and self.is_temp:
self.gil_error()
def check_const(self):
return self.operand.check_const()
def calculate_constant_result(self):
self.constant_result = self.calculate_result_code(self.operand.constant_result)
def calculate_result_code(self, operand_result = None):
if operand_result is None:
operand_result = self.operand.result()
if self.type.is_complex:
operand_result = self.operand.result()
if self.operand.type.is_complex:
real_part = self.type.real_type.cast_code("__Pyx_CREAL(%s)" % operand_result)
imag_part = self.type.real_type.cast_code("__Pyx_CIMAG(%s)" % operand_result)
else:
real_part = self.type.real_type.cast_code(operand_result)
imag_part = "0"
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
imag_part)
else:
return self.type.cast_code(operand_result)
def get_constant_c_result_code(self):
operand_result = self.operand.get_constant_c_result_code()
if operand_result:
return self.type.cast_code(operand_result)
def result_as(self, type):
if self.type.is_pyobject and not self.is_temp:
# Optimise away some unnecessary casting
return self.operand.result_as(type)
else:
return ExprNode.result_as(self, type)
def generate_result_code(self, code):
if self.is_temp:
code.putln(
"%s = (PyObject *)%s;" % (
self.result(),
self.operand.result()))
code.put_incref(self.result(), self.ctype())
ERR_START = "Start may not be given"
ERR_NOT_STOP = "Stop must be provided to indicate shape"
ERR_STEPS = ("Strides may only be given to indicate contiguity. "
"Consider slicing it after conversion")
ERR_NOT_POINTER = "Can only create cython.array from pointer or array"
ERR_BASE_TYPE = "Pointer base type does not match cython.array base type"
class CythonArrayNode(ExprNode):
"""
Used when a pointer of base_type is cast to a memoryviewslice with that
base type. i.e.
<int[:M:1, :N]> p
creates a fortran-contiguous cython.array.
We leave the type set to object so coercions to object are more efficient
and less work. Acquiring a memoryviewslice from this will be just as
efficient. ExprNode.coerce_to() will do the additional typecheck on
self.compile_time_type
This also handles <int[:, :]> my_c_array
operand ExprNode the thing we're casting
base_type_node MemoryViewSliceTypeNode the cast expression node
"""
subexprs = ['operand', 'shapes']
shapes = None
is_temp = True
mode = "c"
array_dtype = None
shape_type = PyrexTypes.c_py_ssize_t_type
def analyse_types(self, env):
from . import MemoryView
self.operand = self.operand.analyse_types(env)
if self.array_dtype:
array_dtype = self.array_dtype
else:
array_dtype = self.base_type_node.base_type_node.analyse(env)
axes = self.base_type_node.axes
self.type = error_type
self.shapes = []
ndim = len(axes)
# Base type of the pointer or C array we are converting
base_type = self.operand.type
if not self.operand.type.is_ptr and not self.operand.type.is_array:
error(self.operand.pos, ERR_NOT_POINTER)
return self
# Dimension sizes of C array
array_dimension_sizes = []
if base_type.is_array:
while base_type.is_array:
array_dimension_sizes.append(base_type.size)
base_type = base_type.base_type
elif base_type.is_ptr:
base_type = base_type.base_type
else:
error(self.pos, "unexpected base type %s found" % base_type)
return self
if not (base_type.same_as(array_dtype) or base_type.is_void):
error(self.operand.pos, ERR_BASE_TYPE)
return self
elif self.operand.type.is_array and len(array_dimension_sizes) != ndim:
error(self.operand.pos,
"Expected %d dimensions, array has %d dimensions" %
(ndim, len(array_dimension_sizes)))
return self
# Verify the start, stop and step values
# In case of a C array, use the size of C array in each dimension to
# get an automatic cast
for axis_no, axis in enumerate(axes):
if not axis.start.is_none:
error(axis.start.pos, ERR_START)
return self
if axis.stop.is_none:
if array_dimension_sizes:
dimsize = array_dimension_sizes[axis_no]
axis.stop = IntNode(self.pos, value=str(dimsize),
constant_result=dimsize,
type=PyrexTypes.c_int_type)
else:
error(axis.pos, ERR_NOT_STOP)
return self
axis.stop = axis.stop.analyse_types(env)
shape = axis.stop.coerce_to(self.shape_type, env)
if not shape.is_literal:
shape.coerce_to_temp(env)
self.shapes.append(shape)
first_or_last = axis_no in (0, ndim - 1)
if not axis.step.is_none and first_or_last:
# '1' in the first or last dimension denotes F or C contiguity
axis.step = axis.step.analyse_types(env)
if (not axis.step.type.is_int and axis.step.is_literal and not
axis.step.type.is_error):
error(axis.step.pos, "Expected an integer literal")
return self
if axis.step.compile_time_value(env) != 1:
error(axis.step.pos, ERR_STEPS)
return self
if axis_no == 0:
self.mode = "fortran"
elif not axis.step.is_none and not first_or_last:
# step provided in some other dimension
error(axis.step.pos, ERR_STEPS)
return self
if not self.operand.is_name:
self.operand = self.operand.coerce_to_temp(env)
axes = [('direct', 'follow')] * len(axes)
if self.mode == "fortran":
axes[0] = ('direct', 'contig')
else:
axes[-1] = ('direct', 'contig')
self.coercion_type = PyrexTypes.MemoryViewSliceType(array_dtype, axes)
self.coercion_type.validate_memslice_dtype(self.pos)
self.type = self.get_cython_array_type(env)
MemoryView.use_cython_array_utility_code(env)
env.use_utility_code(MemoryView.typeinfo_to_format_code)
return self
def allocate_temp_result(self, code):
if self.temp_code:
raise RuntimeError("temp allocated mulitple times")
self.temp_code = code.funcstate.allocate_temp(self.type, True)
def infer_type(self, env):
return self.get_cython_array_type(env)
def get_cython_array_type(self, env):
return env.global_scope().context.cython_scope.viewscope.lookup("array").type
def generate_result_code(self, code):
from . import Buffer
shapes = [self.shape_type.cast_code(shape.result())
for shape in self.shapes]
dtype = self.coercion_type.dtype
shapes_temp = code.funcstate.allocate_temp(py_object_type, True)
format_temp = code.funcstate.allocate_temp(py_object_type, True)
itemsize = "sizeof(%s)" % dtype.empty_declaration_code()
type_info = Buffer.get_type_information_cname(code, dtype)
if self.operand.type.is_ptr:
code.putln("if (!%s) {" % self.operand.result())
code.putln( 'PyErr_SetString(PyExc_ValueError,'
'"Cannot create cython.array from NULL pointer");')
code.putln(code.error_goto(self.operand.pos))
code.putln("}")
code.putln("%s = __pyx_format_from_typeinfo(&%s);" %
(format_temp, type_info))
buildvalue_fmt = " __PYX_BUILD_PY_SSIZE_T " * len(shapes)
code.putln('%s = Py_BuildValue((char*) "(" %s ")", %s);' % (
shapes_temp, buildvalue_fmt, ", ".join(shapes)))
err = "!%s || !%s || !PyBytes_AsString(%s)" % (format_temp,
shapes_temp,
format_temp)
code.putln(code.error_goto_if(err, self.pos))
code.put_gotref(format_temp)
code.put_gotref(shapes_temp)
tup = (self.result(), shapes_temp, itemsize, format_temp,
self.mode, self.operand.result())
code.putln('%s = __pyx_array_new('
'%s, %s, PyBytes_AS_STRING(%s), '
'(char *) "%s", (char *) %s);' % tup)
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.result())
def dispose(temp):
code.put_decref_clear(temp, py_object_type)
code.funcstate.release_temp(temp)
dispose(shapes_temp)
dispose(format_temp)
@classmethod
def from_carray(cls, src_node, env):
"""
Given a C array type, return a CythonArrayNode
"""
pos = src_node.pos
base_type = src_node.type
none_node = NoneNode(pos)
axes = []
while base_type.is_array:
axes.append(SliceNode(pos, start=none_node, stop=none_node,
step=none_node))
base_type = base_type.base_type
axes[-1].step = IntNode(pos, value="1", is_c_literal=True)
memslicenode = Nodes.MemoryViewSliceTypeNode(pos, axes=axes,
base_type_node=base_type)
result = CythonArrayNode(pos, base_type_node=memslicenode,
operand=src_node, array_dtype=base_type)
result = result.analyse_types(env)
return result
class SizeofNode(ExprNode):
# Abstract base class for sizeof(x) expression nodes.
type = PyrexTypes.c_size_t_type
def check_const(self):
return True
def generate_result_code(self, code):
pass
class SizeofTypeNode(SizeofNode):
# C sizeof function applied to a type
#
# base_type CBaseTypeNode
# declarator CDeclaratorNode
subexprs = []
arg_type = None
def analyse_types(self, env):
# we may have incorrectly interpreted a dotted name as a type rather than an attribute
# this could be better handled by more uniformly treating types as runtime-available objects
if 0 and self.base_type.module_path:
path = self.base_type.module_path
obj = env.lookup(path[0])
if obj.as_module is None:
operand = NameNode(pos=self.pos, name=path[0])
for attr in path[1:]:
operand = AttributeNode(pos=self.pos, obj=operand, attribute=attr)
operand = AttributeNode(pos=self.pos, obj=operand, attribute=self.base_type.name)
self.operand = operand
self.__class__ = SizeofVarNode
node = self.analyse_types(env)
return node
if self.arg_type is None:
base_type = self.base_type.analyse(env)
_, arg_type = self.declarator.analyse(base_type, env)
self.arg_type = arg_type
self.check_type()
return self
def check_type(self):
arg_type = self.arg_type
if arg_type.is_pyobject and not arg_type.is_extension_type:
error(self.pos, "Cannot take sizeof Python object")
elif arg_type.is_void:
error(self.pos, "Cannot take sizeof void")
elif not arg_type.is_complete():
error(self.pos, "Cannot take sizeof incomplete type '%s'" % arg_type)
def calculate_result_code(self):
if self.arg_type.is_extension_type:
# the size of the pointer is boring
# we want the size of the actual struct
arg_code = self.arg_type.declaration_code("", deref=1)
else:
arg_code = self.arg_type.empty_declaration_code()
return "(sizeof(%s))" % arg_code
class SizeofVarNode(SizeofNode):
# C sizeof function applied to a variable
#
# operand ExprNode
subexprs = ['operand']
def analyse_types(self, env):
# We may actually be looking at a type rather than a variable...
# If we are, traditional analysis would fail...
operand_as_type = self.operand.analyse_as_type(env)
if operand_as_type:
self.arg_type = operand_as_type
if self.arg_type.is_fused:
self.arg_type = self.arg_type.specialize(env.fused_to_specific)
self.__class__ = SizeofTypeNode
self.check_type()
else:
self.operand = self.operand.analyse_types(env)
return self
def calculate_result_code(self):
return "(sizeof(%s))" % self.operand.result()
def generate_result_code(self, code):
pass
class TypeofNode(ExprNode):
# Compile-time type of an expression, as a string.
#
# operand ExprNode
# literal StringNode # internal
literal = None
type = py_object_type
subexprs = ['literal'] # 'operand' will be ignored after type analysis!
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
value = StringEncoding.EncodedString(str(self.operand.type)) #self.operand.type.typeof_name())
literal = StringNode(self.pos, value=value)
literal = literal.analyse_types(env)
self.literal = literal.coerce_to_pyobject(env)
return self
def may_be_none(self):
return False
def generate_evaluation_code(self, code):
self.literal.generate_evaluation_code(code)
def calculate_result_code(self):
return self.literal.calculate_result_code()
#-------------------------------------------------------------------
#
# Binary operator nodes
#
#-------------------------------------------------------------------
try:
matmul_operator = operator.matmul
except AttributeError:
def matmul_operator(a, b):
try:
func = a.__matmul__
except AttributeError:
func = b.__rmatmul__
return func(a, b)
compile_time_binary_operators = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
'is': operator.is_,
'is_not': operator.is_not,
'+': operator.add,
'&': operator.and_,
'/': operator.truediv,
'//': operator.floordiv,
'<<': operator.lshift,
'%': operator.mod,
'*': operator.mul,
'|': operator.or_,
'**': operator.pow,
'>>': operator.rshift,
'-': operator.sub,
'^': operator.xor,
'@': matmul_operator,
'in': lambda x, seq: x in seq,
'not_in': lambda x, seq: x not in seq,
}
def get_compile_time_binop(node):
func = compile_time_binary_operators.get(node.operator)
if not func:
error(node.pos,
"Binary '%s' not supported in compile-time expression"
% node.operator)
return func
class BinopNode(ExprNode):
# operator string
# operand1 ExprNode
# operand2 ExprNode
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when neither operand is a pyobject.
# - Check operand types and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand1', 'operand2']
inplace = False
def calculate_constant_result(self):
func = compile_time_binary_operators[self.operator]
self.constant_result = func(
self.operand1.constant_result,
self.operand2.constant_result)
def compile_time_value(self, denv):
func = get_compile_time_binop(self)
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
try:
return func(operand1, operand2)
except Exception as e:
self.compile_time_value_error(e)
def infer_type(self, env):
return self.result_type(self.operand1.infer_type(env),
self.operand2.infer_type(env))
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
self.operand2 = self.operand2.analyse_types(env)
self.analyse_operation(env)
return self
def analyse_operation(self, env):
if self.is_py_operation():
self.coerce_operands_to_pyobjects(env)
self.type = self.result_type(self.operand1.type,
self.operand2.type)
assert self.type.is_pyobject
self.is_temp = 1
elif self.is_cpp_operation():
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
def is_py_operation(self):
return self.is_py_operation_types(self.operand1.type, self.operand2.type)
def is_py_operation_types(self, type1, type2):
return type1.is_pyobject or type2.is_pyobject or type1.is_ctuple or type2.is_ctuple
def is_cpp_operation(self):
return (self.operand1.type.is_cpp_class
or self.operand2.type.is_cpp_class)
def analyse_cpp_operation(self, env):
entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
if not entry:
self.type_error()
return
func_type = entry.type
self.exception_check = func_type.exception_check
self.exception_value = func_type.exception_value
if self.exception_check == '+':
# Used by NumBinopNodes to break up expressions involving multiple
# operators so that exceptions can be handled properly.
self.is_temp = 1
if self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
if func_type.is_ptr:
func_type = func_type.base_type
if len(func_type.args) == 1:
self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
else:
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type
def result_type(self, type1, type2):
if self.is_py_operation_types(type1, type2):
if type2.is_string:
type2 = Builtin.bytes_type
elif type2.is_pyunicode_ptr:
type2 = Builtin.unicode_type
if type1.is_string:
type1 = Builtin.bytes_type
elif type1.is_pyunicode_ptr:
type1 = Builtin.unicode_type
if type1.is_builtin_type or type2.is_builtin_type:
if type1 is type2 and self.operator in '**%+|&^':
# FIXME: at least these operators should be safe - others?
return type1
result_type = self.infer_builtin_types_operation(type1, type2)
if result_type is not None:
return result_type
return py_object_type
else:
return self.compute_c_result_type(type1, type2)
def infer_builtin_types_operation(self, type1, type2):
return None
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
def coerce_operands_to_pyobjects(self, env):
self.operand1 = self.operand1.coerce_to_pyobject(env)
self.operand2 = self.operand2.coerce_to_pyobject(env)
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
def is_ephemeral(self):
return (super(BinopNode, self).is_ephemeral() or
self.operand1.is_ephemeral() or self.operand2.is_ephemeral())
def generate_result_code(self, code):
#print "BinopNode.generate_result_code:", self.operand1, self.operand2 ###
if self.operand1.type.is_pyobject:
function = self.py_operation_function(code)
if self.operator == '**':
extra_args = ", Py_None"
else:
extra_args = ""
code.putln(
"%s = %s(%s, %s%s); %s" % (
self.result(),
function,
self.operand1.py_result(),
self.operand2.py_result(),
extra_args,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif self.is_temp:
# C++ overloaded operators with exception values are currently all
# handled through temporaries.
if self.is_cpp_operation() and self.exception_check == '+':
translate_cpp_exception(code, self.pos,
"%s = %s;" % (self.result(), self.calculate_result_code()),
self.exception_value, self.in_nogil_context)
else:
code.putln("%s = %s;" % (self.result(), self.calculate_result_code()))
def type_error(self):
if not (self.operand1.type.is_error
or self.operand2.type.is_error):
error(self.pos, "Invalid operand types for '%s' (%s; %s)" %
(self.operator, self.operand1.type,
self.operand2.type))
self.type = PyrexTypes.error_type
class CBinopNode(BinopNode):
def analyse_types(self, env):
node = BinopNode.analyse_types(self, env)
if node.is_py_operation():
node.type = PyrexTypes.error_type
return node
def py_operation_function(self, code):
return ""
def calculate_result_code(self):
return "(%s %s %s)" % (
self.operand1.result(),
self.operator,
self.operand2.result())
def compute_c_result_type(self, type1, type2):
cpp_type = None
if type1.is_cpp_class or type1.is_ptr:
cpp_type = type1.find_cpp_operation_type(self.operator, type2)
# FIXME: handle the reversed case?
#if cpp_type is None and (type2.is_cpp_class or type2.is_ptr):
# cpp_type = type2.find_cpp_operation_type(self.operator, type1)
# FIXME: do we need to handle other cases here?
return cpp_type
def c_binop_constructor(operator):
def make_binop_node(pos, **operands):
return CBinopNode(pos, operator=operator, **operands)
return make_binop_node
class NumBinopNode(BinopNode):
# Binary operation taking numeric arguments.
infix = True
overflow_check = False
overflow_bit_node = None
def analyse_c_operation(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
self.type = self.compute_c_result_type(type1, type2)
if not self.type:
self.type_error()
return
if self.type.is_complex:
self.infix = False
if (self.type.is_int
and env.directives['overflowcheck']
and self.operator in self.overflow_op_names):
if (self.operator in ('+', '*')
and self.operand1.has_constant_result()
and not self.operand2.has_constant_result()):
self.operand1, self.operand2 = self.operand2, self.operand1
self.overflow_check = True
self.overflow_fold = env.directives['overflowcheck.fold']
self.func = self.type.overflow_check_binop(
self.overflow_op_names[self.operator],
env,
const_rhs = self.operand2.has_constant_result())
self.is_temp = True
if not self.infix or (type1.is_numeric and type2.is_numeric):
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
def compute_c_result_type(self, type1, type2):
if self.c_types_okay(type1, type2):
widest_type = PyrexTypes.widest_numeric_type(type1, type2)
if widest_type is PyrexTypes.c_bint_type:
if self.operator not in '|^&':
# False + False == 0 # not False!
widest_type = PyrexTypes.c_int_type
else:
widest_type = PyrexTypes.widest_numeric_type(
widest_type, PyrexTypes.c_int_type)
return widest_type
else:
return None
def may_be_none(self):
if self.type and self.type.is_builtin_type:
# if we know the result type, we know the operation, so it can't be None
return False
type1 = self.operand1.type
type2 = self.operand2.type
if type1 and type1.is_builtin_type and type2 and type2.is_builtin_type:
# XXX: I can't think of any case where a binary operation
# on builtin types evaluates to None - add a special case
# here if there is one.
return False
return super(NumBinopNode, self).may_be_none()
def get_constant_c_result_code(self):
value1 = self.operand1.get_constant_c_result_code()
value2 = self.operand2.get_constant_c_result_code()
if value1 and value2:
return "(%s %s %s)" % (value1, self.operator, value2)
else:
return None
def c_types_okay(self, type1, type2):
#print "NumBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_numeric or type1.is_enum) \
and (type2.is_numeric or type2.is_enum)
def generate_evaluation_code(self, code):
if self.overflow_check:
self.overflow_bit_node = self
self.overflow_bit = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = 0;" % self.overflow_bit)
super(NumBinopNode, self).generate_evaluation_code(code)
if self.overflow_check:
code.putln("if (unlikely(%s)) {" % self.overflow_bit)
code.putln('PyErr_SetString(PyExc_OverflowError, "value too large");')
code.putln(code.error_goto(self.pos))
code.putln("}")
code.funcstate.release_temp(self.overflow_bit)
def calculate_result_code(self):
if self.overflow_bit_node is not None:
return "%s(%s, %s, &%s)" % (
self.func,
self.operand1.result(),
self.operand2.result(),
self.overflow_bit_node.overflow_bit)
elif self.type.is_cpp_class or self.infix:
return "(%s %s %s)" % (
self.operand1.result(),
self.operator,
self.operand2.result())
else:
func = self.type.binary_op(self.operator)
if func is None:
error(self.pos, "binary operator %s not supported for %s" % (self.operator, self.type))
return "%s(%s, %s)" % (
func,
self.operand1.result(),
self.operand2.result())
def is_py_operation_types(self, type1, type2):
return (type1.is_unicode_char or
type2.is_unicode_char or
BinopNode.is_py_operation_types(self, type1, type2))
def py_operation_function(self, code):
function_name = self.py_functions[self.operator]
if self.inplace:
function_name = function_name.replace('PyNumber_', 'PyNumber_InPlace')
return function_name
py_functions = {
"|": "PyNumber_Or",
"^": "PyNumber_Xor",
"&": "PyNumber_And",
"<<": "PyNumber_Lshift",
">>": "PyNumber_Rshift",
"+": "PyNumber_Add",
"-": "PyNumber_Subtract",
"*": "PyNumber_Multiply",
"@": "__Pyx_PyNumber_MatrixMultiply",
"/": "__Pyx_PyNumber_Divide",
"//": "PyNumber_FloorDivide",
"%": "PyNumber_Remainder",
"**": "PyNumber_Power",
}
overflow_op_names = {
"+": "add",
"-": "sub",
"*": "mul",
"<<": "lshift",
}
class IntBinopNode(NumBinopNode):
# Binary operation taking integer arguments.
def c_types_okay(self, type1, type2):
#print "IntBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_int or type1.is_enum) \
and (type2.is_int or type2.is_enum)
class AddNode(NumBinopNode):
# '+' operator.
def is_py_operation_types(self, type1, type2):
if type1.is_string and type2.is_string or type1.is_pyunicode_ptr and type2.is_pyunicode_ptr:
return 1
else:
return NumBinopNode.is_py_operation_types(self, type1, type2)
def infer_builtin_types_operation(self, type1, type2):
# b'abc' + 'abc' raises an exception in Py3,
# so we can safely infer the Py2 type for bytes here
string_types = (bytes_type, str_type, basestring_type, unicode_type)
if type1 in string_types and type2 in string_types:
return string_types[max(string_types.index(type1),
string_types.index(type2))]
return None
def compute_c_result_type(self, type1, type2):
#print "AddNode.compute_c_result_type:", type1, self.operator, type2 ###
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
elif (type2.is_ptr or type2.is_array) and (type1.is_int or type1.is_enum):
return type2
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
def py_operation_function(self, code):
type1, type2 = self.operand1.type, self.operand2.type
if type1 is unicode_type or type2 is unicode_type:
if type1.is_builtin_type and type2.is_builtin_type:
if self.operand1.may_be_none() or self.operand2.may_be_none():
return '__Pyx_PyUnicode_ConcatSafe'
else:
return '__Pyx_PyUnicode_Concat'
return super(AddNode, self).py_operation_function(code)
class SubNode(NumBinopNode):
# '-' operator.
def compute_c_result_type(self, type1, type2):
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
elif (type1.is_ptr or type1.is_array) and (type2.is_ptr or type2.is_array):
return PyrexTypes.c_ptrdiff_t_type
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
class MulNode(NumBinopNode):
# '*' operator.
def is_py_operation_types(self, type1, type2):
if ((type1.is_string and type2.is_int) or
(type2.is_string and type1.is_int)):
return 1
else:
return NumBinopNode.is_py_operation_types(self, type1, type2)
def infer_builtin_types_operation(self, type1, type2):
# let's assume that whatever builtin type you multiply a string with
# will either return a string of the same type or fail with an exception
string_types = (bytes_type, str_type, basestring_type, unicode_type)
if type1 in string_types and type2.is_builtin_type:
return type1
if type2 in string_types and type1.is_builtin_type:
return type2
# multiplication of containers/numbers with an integer value
# always (?) returns the same type
if type1.is_int:
return type2
if type2.is_int:
return type1
return None
class MatMultNode(NumBinopNode):
# '@' operator.
def is_py_operation_types(self, type1, type2):
return True
def generate_evaluation_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("MatrixMultiply", "ObjectHandling.c"))
super(MatMultNode, self).generate_evaluation_code(code)
class DivNode(NumBinopNode):
# '/' or '//' operator.
cdivision = None
truedivision = None # == "unknown" if operator == '/'
ctruedivision = False
cdivision_warnings = False
zerodivision_check = None
def find_compile_time_binary_operator(self, op1, op2):
func = compile_time_binary_operators[self.operator]
if self.operator == '/' and self.truedivision is None:
# => true div for floats, floor div for integers
if isinstance(op1, _py_int_types) and isinstance(op2, _py_int_types):
func = compile_time_binary_operators['//']
return func
def calculate_constant_result(self):
op1 = self.operand1.constant_result
op2 = self.operand2.constant_result
func = self.find_compile_time_binary_operator(op1, op2)
self.constant_result = func(
self.operand1.constant_result,
self.operand2.constant_result)
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
try:
func = self.find_compile_time_binary_operator(
operand1, operand2)
return func(operand1, operand2)
except Exception as e:
self.compile_time_value_error(e)
def _check_truedivision(self, env):
if self.cdivision or env.directives['cdivision']:
self.ctruedivision = False
else:
self.ctruedivision = self.truedivision
def infer_type(self, env):
self._check_truedivision(env)
return self.result_type(
self.operand1.infer_type(env),
self.operand2.infer_type(env))
def analyse_operation(self, env):
self._check_truedivision(env)
NumBinopNode.analyse_operation(self, env)
if self.is_cpp_operation():
self.cdivision = True
if not self.type.is_pyobject:
self.zerodivision_check = (
self.cdivision is None and not env.directives['cdivision']
and (not self.operand2.has_constant_result() or
self.operand2.constant_result == 0))
if self.zerodivision_check or env.directives['cdivision_warnings']:
# Need to check ahead of time to warn or raise zero division error
self.operand1 = self.operand1.coerce_to_simple(env)
self.operand2 = self.operand2.coerce_to_simple(env)
def compute_c_result_type(self, type1, type2):
if self.operator == '/' and self.ctruedivision:
if not type1.is_float and not type2.is_float:
widest_type = PyrexTypes.widest_numeric_type(type1, PyrexTypes.c_double_type)
widest_type = PyrexTypes.widest_numeric_type(type2, widest_type)
return widest_type
return NumBinopNode.compute_c_result_type(self, type1, type2)
def zero_division_message(self):
if self.type.is_int:
return "integer division or modulo by zero"
else:
return "float division"
def generate_evaluation_code(self, code):
if not self.type.is_pyobject and not self.type.is_complex:
if self.cdivision is None:
self.cdivision = (code.globalstate.directives['cdivision']
or not self.type.signed
or self.type.is_float)
if not self.cdivision:
code.globalstate.use_utility_code(
UtilityCode.load_cached("DivInt", "CMath.c").specialize(self.type))
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
def generate_div_warning_code(self, code):
in_nogil = self.in_nogil_context
if not self.type.is_pyobject:
if self.zerodivision_check:
if not self.infix:
zero_test = "%s(%s)" % (self.type.unary_op('zero'), self.operand2.result())
else:
zero_test = "%s == 0" % self.operand2.result()
code.putln("if (unlikely(%s)) {" % zero_test)
if in_nogil:
code.put_ensure_gil()
code.putln('PyErr_SetString(PyExc_ZeroDivisionError, "%s");' % self.zero_division_message())
if in_nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
if self.type.is_int and self.type.signed and self.operator != '%':
code.globalstate.use_utility_code(UtilityCode.load_cached("UnaryNegOverflows", "Overflow.c"))
if self.operand2.type.signed == 2:
# explicitly signed, no runtime check needed
minus1_check = 'unlikely(%s == -1)' % self.operand2.result()
else:
type_of_op2 = self.operand2.type.empty_declaration_code()
minus1_check = '(!(((%s)-1) > 0)) && unlikely(%s == (%s)-1)' % (
type_of_op2, self.operand2.result(), type_of_op2)
code.putln("else if (sizeof(%s) == sizeof(long) && %s "
" && unlikely(UNARY_NEG_WOULD_OVERFLOW(%s))) {" % (
self.type.empty_declaration_code(),
minus1_check,
self.operand1.result()))
if in_nogil:
code.put_ensure_gil()
code.putln('PyErr_SetString(PyExc_OverflowError, "value too large to perform division");')
if in_nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
if code.globalstate.directives['cdivision_warnings'] and self.operator != '/':
code.globalstate.use_utility_code(
UtilityCode.load_cached("CDivisionWarning", "CMath.c"))
code.putln("if (unlikely((%s < 0) ^ (%s < 0))) {" % (
self.operand1.result(),
self.operand2.result()))
warning_code = "__Pyx_cdivision_warning(%(FILENAME)s, %(LINENO)s)" % {
'FILENAME': Naming.filename_cname,
'LINENO': Naming.lineno_cname,
}
if in_nogil:
result_code = 'result'
code.putln("int %s;" % result_code)
code.put_ensure_gil()
code.putln(code.set_error_info(self.pos, used=True))
code.putln("%s = %s;" % (result_code, warning_code))
code.put_release_ensured_gil()
else:
result_code = warning_code
code.putln(code.set_error_info(self.pos, used=True))
code.put("if (unlikely(%s)) " % result_code)
code.put_goto(code.error_label)
code.putln("}")
def calculate_result_code(self):
if self.type.is_complex:
return NumBinopNode.calculate_result_code(self)
elif self.type.is_float and self.operator == '//':
return "floor(%s / %s)" % (
self.operand1.result(),
self.operand2.result())
elif self.truedivision or self.cdivision:
op1 = self.operand1.result()
op2 = self.operand2.result()
if self.truedivision:
if self.type != self.operand1.type:
op1 = self.type.cast_code(op1)
if self.type != self.operand2.type:
op2 = self.type.cast_code(op2)
return "(%s / %s)" % (op1, op2)
else:
return "__Pyx_div_%s(%s, %s)" % (
self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
class ModNode(DivNode):
# '%' operator.
def is_py_operation_types(self, type1, type2):
return (type1.is_string
or type2.is_string
or NumBinopNode.is_py_operation_types(self, type1, type2))
def infer_builtin_types_operation(self, type1, type2):
# b'%s' % xyz raises an exception in Py3, so it's safe to infer the type for Py2
if type1 is unicode_type:
# None + xyz may be implemented by RHS
if type2.is_builtin_type or not self.operand1.may_be_none():
return type1
elif type1 in (bytes_type, str_type, basestring_type):
if type2 is unicode_type:
return type2
elif type2.is_numeric:
return type1
elif type1 is bytes_type and not type2.is_builtin_type:
return None # RHS might implement '% operator differently in Py3
else:
return basestring_type # either str or unicode, can't tell
return None
def zero_division_message(self):
if self.type.is_int:
return "integer division or modulo by zero"
else:
return "float divmod()"
def analyse_operation(self, env):
DivNode.analyse_operation(self, env)
if not self.type.is_pyobject:
if self.cdivision is None:
self.cdivision = env.directives['cdivision'] or not self.type.signed
if not self.cdivision and not self.type.is_int and not self.type.is_float:
error(self.pos, "mod operator not supported for type '%s'" % self.type)
def generate_evaluation_code(self, code):
if not self.type.is_pyobject and not self.cdivision:
if self.type.is_int:
code.globalstate.use_utility_code(
UtilityCode.load_cached("ModInt", "CMath.c").specialize(self.type))
else: # float
code.globalstate.use_utility_code(
UtilityCode.load_cached("ModFloat", "CMath.c").specialize(
self.type, math_h_modifier=self.type.math_h_modifier))
# NOTE: skipping over DivNode here
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
def calculate_result_code(self):
if self.cdivision:
if self.type.is_float:
return "fmod%s(%s, %s)" % (
self.type.math_h_modifier,
self.operand1.result(),
self.operand2.result())
else:
return "(%s %% %s)" % (
self.operand1.result(),
self.operand2.result())
else:
return "__Pyx_mod_%s(%s, %s)" % (
self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
def py_operation_function(self, code):
if self.operand1.type is unicode_type:
if self.operand1.may_be_none():
return '__Pyx_PyUnicode_FormatSafe'
else:
return 'PyUnicode_Format'
elif self.operand1.type is str_type:
if self.operand1.may_be_none():
return '__Pyx_PyString_FormatSafe'
else:
return '__Pyx_PyString_Format'
return super(ModNode, self).py_operation_function(code)
class PowNode(NumBinopNode):
# '**' operator.
def analyse_c_operation(self, env):
NumBinopNode.analyse_c_operation(self, env)
if self.type.is_complex:
if self.type.real_type.is_float:
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
self.pow_func = "__Pyx_c_pow" + self.type.real_type.math_h_modifier
else:
error(self.pos, "complex int powers not supported")
self.pow_func = "<error>"
elif self.type.is_float:
self.pow_func = "pow" + self.type.math_h_modifier
elif self.type.is_int:
self.pow_func = "__Pyx_pow_%s" % self.type.empty_declaration_code().replace(' ', '_')
env.use_utility_code(
UtilityCode.load_cached("IntPow", "CMath.c").specialize(
func_name=self.pow_func,
type=self.type.empty_declaration_code(),
signed=self.type.signed and 1 or 0))
elif not self.type.is_error:
error(self.pos, "got unexpected types for C power operator: %s, %s" %
(self.operand1.type, self.operand2.type))
def calculate_result_code(self):
# Work around MSVC overloading ambiguity.
def typecast(operand):
if self.type == operand.type:
return operand.result()
else:
return self.type.cast_code(operand.result())
return "%s(%s, %s)" % (
self.pow_func,
typecast(self.operand1),
typecast(self.operand2))
def py_operation_function(self, code):
if (self.type.is_pyobject and
self.operand1.constant_result == 2 and
isinstance(self.operand1.constant_result, _py_int_types) and
self.operand2.type is py_object_type):
code.globalstate.use_utility_code(UtilityCode.load_cached('PyNumberPow2', 'Optimize.c'))
if self.inplace:
return '__Pyx_PyNumber_InPlacePowerOf2'
else:
return '__Pyx_PyNumber_PowerOf2'
return super(PowNode, self).py_operation_function(code)
class BoolBinopNode(ExprNode):
"""
Short-circuiting boolean operation.
Note that this node provides the same code generation method as
BoolBinopResultNode to simplify expression nesting.
operator string "and"/"or"
operand1 BoolBinopNode/BoolBinopResultNode left operand
operand2 BoolBinopNode/BoolBinopResultNode right operand
"""
subexprs = ['operand1', 'operand2']
is_temp = True
operator = None
operand1 = None
operand2 = None
def infer_type(self, env):
type1 = self.operand1.infer_type(env)
type2 = self.operand2.infer_type(env)
return PyrexTypes.independent_spanning_type(type1, type2)
def may_be_none(self):
if self.operator == 'or':
return self.operand2.may_be_none()
else:
return self.operand1.may_be_none() or self.operand2.may_be_none()
def calculate_constant_result(self):
operand1 = self.operand1.constant_result
operand2 = self.operand2.constant_result
if self.operator == 'and':
self.constant_result = operand1 and operand2
else:
self.constant_result = operand1 or operand2
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
if self.operator == 'and':
return operand1 and operand2
else:
return operand1 or operand2
def is_ephemeral(self):
return self.operand1.is_ephemeral() or self.operand2.is_ephemeral()
def analyse_types(self, env):
# Note: we do not do any coercion here as we most likely do not know the final type anyway.
# We even accept to set self.type to ErrorType if both operands do not have a spanning type.
# The coercion to the final type and to a "simple" value is left to coerce_to().
operand1 = self.operand1.analyse_types(env)
operand2 = self.operand2.analyse_types(env)
self.type = PyrexTypes.independent_spanning_type(
operand1.type, operand2.type)
self.operand1 = self._wrap_operand(operand1, env)
self.operand2 = self._wrap_operand(operand2, env)
return self
def _wrap_operand(self, operand, env):
if not isinstance(operand, (BoolBinopNode, BoolBinopResultNode)):
operand = BoolBinopResultNode(operand, self.type, env)
return operand
def wrap_operands(self, env):
"""
Must get called by transforms that want to create a correct BoolBinopNode
after the type analysis phase.
"""
self.operand1 = self._wrap_operand(self.operand1, env)
self.operand2 = self._wrap_operand(self.operand2, env)
def coerce_to_boolean(self, env):
return self.coerce_to(PyrexTypes.c_bint_type, env)
def coerce_to(self, dst_type, env):
operand1 = self.operand1.coerce_to(dst_type, env)
operand2 = self.operand2.coerce_to(dst_type, env)
return BoolBinopNode.from_node(
self, type=dst_type,
operator=self.operator,
operand1=operand1, operand2=operand2)
def generate_bool_evaluation_code(self, code, final_result_temp, and_label, or_label, end_label, fall_through):
code.mark_pos(self.pos)
outer_labels = (and_label, or_label)
if self.operator == 'and':
my_label = and_label = code.new_label('next_and')
else:
my_label = or_label = code.new_label('next_or')
self.operand1.generate_bool_evaluation_code(
code, final_result_temp, and_label, or_label, end_label, my_label)
and_label, or_label = outer_labels
code.put_label(my_label)
self.operand2.generate_bool_evaluation_code(
code, final_result_temp, and_label, or_label, end_label, fall_through)
def generate_evaluation_code(self, code):
self.allocate_temp_result(code)
or_label = and_label = None
end_label = code.new_label('bool_binop_done')
self.generate_bool_evaluation_code(code, self.result(), and_label, or_label, end_label, end_label)
code.put_label(end_label)
gil_message = "Truth-testing Python object"
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
def generate_subexpr_disposal_code(self, code):
pass # nothing to do here, all done in generate_evaluation_code()
def free_subexpr_temps(self, code):
pass # nothing to do here, all done in generate_evaluation_code()
def generate_operand1_test(self, code):
# Generate code to test the truth of the first operand.
if self.type.is_pyobject:
test_result = code.funcstate.allocate_temp(
PyrexTypes.c_bint_type, manage_ref=False)
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
test_result,
self.operand1.py_result(),
code.error_goto_if_neg(test_result, self.pos)))
else:
test_result = self.operand1.result()
return (test_result, self.type.is_pyobject)
class BoolBinopResultNode(ExprNode):
"""
Intermediate result of a short-circuiting and/or expression.
Tests the result for 'truthiness' and takes care of coercing the final result
of the overall expression to the target type.
Note that this node provides the same code generation method as
BoolBinopNode to simplify expression nesting.
arg ExprNode the argument to test
value ExprNode the coerced result value node
"""
subexprs = ['arg', 'value']
is_temp = True
arg = None
value = None
def __init__(self, arg, result_type, env):
# using 'arg' multiple times, so it must be a simple/temp value
arg = arg.coerce_to_simple(env)
# wrap in ProxyNode, in case a transform wants to replace self.arg later
arg = ProxyNode(arg)
super(BoolBinopResultNode, self).__init__(
arg.pos, arg=arg, type=result_type,
value=CloneNode(arg).coerce_to(result_type, env))
def coerce_to_boolean(self, env):
return self.coerce_to(PyrexTypes.c_bint_type, env)
def coerce_to(self, dst_type, env):
# unwrap, coerce, rewrap
arg = self.arg.arg
if dst_type is PyrexTypes.c_bint_type:
arg = arg.coerce_to_boolean(env)
# TODO: unwrap more coercion nodes?
return BoolBinopResultNode(arg, dst_type, env)
def nogil_check(self, env):
# let's leave all errors to BoolBinopNode
pass
def generate_operand_test(self, code):
# Generate code to test the truth of the first operand.
if self.arg.type.is_pyobject:
test_result = code.funcstate.allocate_temp(
PyrexTypes.c_bint_type, manage_ref=False)
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
test_result,
self.arg.py_result(),
code.error_goto_if_neg(test_result, self.pos)))
else:
test_result = self.arg.result()
return (test_result, self.arg.type.is_pyobject)
def generate_bool_evaluation_code(self, code, final_result_temp, and_label, or_label, end_label, fall_through):
code.mark_pos(self.pos)
# x => x
# x and ... or ... => next 'and' / 'or'
# False ... or x => next 'or'
# True and x => next 'and'
# True or x => True (operand)
self.arg.generate_evaluation_code(code)
if and_label or or_label:
test_result, uses_temp = self.generate_operand_test(code)
if uses_temp and (and_label and or_label):
# cannot become final result => free early
# disposal: uses_temp and (and_label and or_label)
self.arg.generate_disposal_code(code)
sense = '!' if or_label else ''
code.putln("if (%s%s) {" % (sense, test_result))
if uses_temp:
code.funcstate.release_temp(test_result)
if not uses_temp or not (and_label and or_label):
# disposal: (not uses_temp) or {not (and_label and or_label) [if]}
self.arg.generate_disposal_code(code)
if or_label and or_label != fall_through:
# value is false => short-circuit to next 'or'
code.put_goto(or_label)
if and_label:
# value is true => go to next 'and'
if or_label:
code.putln("} else {")
if not uses_temp:
# disposal: (not uses_temp) and {(and_label and or_label) [else]}
self.arg.generate_disposal_code(code)
if and_label != fall_through:
code.put_goto(and_label)
if not and_label or not or_label:
# if no next 'and' or 'or', we provide the result
if and_label or or_label:
code.putln("} else {")
self.value.generate_evaluation_code(code)
self.value.make_owned_reference(code)
code.putln("%s = %s;" % (final_result_temp, self.value.result()))
self.value.generate_post_assignment_code(code)
# disposal: {not (and_label and or_label) [else]}
self.arg.generate_disposal_code(code)
self.value.free_temps(code)
if end_label != fall_through:
code.put_goto(end_label)
if and_label or or_label:
code.putln("}")
self.arg.free_temps(code)
class CondExprNode(ExprNode):
# Short-circuiting conditional expression.
#
# test ExprNode
# true_val ExprNode
# false_val ExprNode
true_val = None
false_val = None
subexprs = ['test', 'true_val', 'false_val']
def type_dependencies(self, env):
return self.true_val.type_dependencies(env) + self.false_val.type_dependencies(env)
def infer_type(self, env):
return PyrexTypes.independent_spanning_type(
self.true_val.infer_type(env),
self.false_val.infer_type(env))
def calculate_constant_result(self):
if self.test.constant_result:
self.constant_result = self.true_val.constant_result
else:
self.constant_result = self.false_val.constant_result
def is_ephemeral(self):
return self.true_val.is_ephemeral() or self.false_val.is_ephemeral()
def analyse_types(self, env):
self.test = self.test.analyse_types(env).coerce_to_boolean(env)
self.true_val = self.true_val.analyse_types(env)
self.false_val = self.false_val.analyse_types(env)
self.is_temp = 1
return self.analyse_result_type(env)
def analyse_result_type(self, env):
self.type = PyrexTypes.independent_spanning_type(
self.true_val.type, self.false_val.type)
if self.type.is_reference:
self.type = PyrexTypes.CFakeReferenceType(self.type.ref_base_type)
if self.type.is_pyobject:
self.result_ctype = py_object_type
elif self.true_val.is_ephemeral() or self.false_val.is_ephemeral():
error(self.pos, "Unsafe C derivative of temporary Python reference used in conditional expression")
if self.true_val.type.is_pyobject or self.false_val.type.is_pyobject:
self.true_val = self.true_val.coerce_to(self.type, env)
self.false_val = self.false_val.coerce_to(self.type, env)
if self.type.is_error:
self.type_error()
return self
def coerce_to(self, dst_type, env):
self.true_val = self.true_val.coerce_to(dst_type, env)
self.false_val = self.false_val.coerce_to(dst_type, env)
self.result_ctype = None
return self.analyse_result_type(env)
def type_error(self):
if not (self.true_val.type.is_error or self.false_val.type.is_error):
error(self.pos, "Incompatible types in conditional expression (%s; %s)" %
(self.true_val.type, self.false_val.type))
self.type = PyrexTypes.error_type
def check_const(self):
return (self.test.check_const()
and self.true_val.check_const()
and self.false_val.check_const())
def generate_evaluation_code(self, code):
# Because subexprs may not be evaluated we can use a more optimal
# subexpr allocation strategy than the default, so override evaluation_code.
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.test.generate_evaluation_code(code)
code.putln("if (%s) {" % self.test.result())
self.eval_and_get(code, self.true_val)
code.putln("} else {")
self.eval_and_get(code, self.false_val)
code.putln("}")
self.test.generate_disposal_code(code)
self.test.free_temps(code)
def eval_and_get(self, code, expr):
expr.generate_evaluation_code(code)
if self.type.is_memoryviewslice:
expr.make_owned_memoryviewslice(code)
else:
expr.make_owned_reference(code)
code.putln('%s = %s;' % (self.result(), expr.result_as(self.ctype())))
expr.generate_post_assignment_code(code)
expr.free_temps(code)
def generate_subexpr_disposal_code(self, code):
pass # done explicitly above (cleanup must separately happen within the if/else blocks)
def free_subexpr_temps(self, code):
pass # done explicitly above (cleanup must separately happen within the if/else blocks)
richcmp_constants = {
"<" : "Py_LT",
"<=": "Py_LE",
"==": "Py_EQ",
"!=": "Py_NE",
"<>": "Py_NE",
">" : "Py_GT",
">=": "Py_GE",
# the following are faked by special compare functions
"in" : "Py_EQ",
"not_in": "Py_NE",
}
class CmpNode(object):
# Mixin class containing code common to PrimaryCmpNodes
# and CascadedCmpNodes.
special_bool_cmp_function = None
special_bool_cmp_utility_code = None
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def calculate_cascaded_constant_result(self, operand1_result):
func = compile_time_binary_operators[self.operator]
operand2_result = self.operand2.constant_result
if (isinstance(operand1_result, any_string_type) and
isinstance(operand2_result, any_string_type) and
type(operand1_result) != type(operand2_result)):
# string comparison of different types isn't portable
return
if self.operator in ('in', 'not_in'):
if isinstance(self.operand2, (ListNode, TupleNode, SetNode)):
if not self.operand2.args:
self.constant_result = self.operator == 'not_in'
return
elif isinstance(self.operand2, ListNode) and not self.cascade:
# tuples are more efficient to store than lists
self.operand2 = self.operand2.as_tuple()
elif isinstance(self.operand2, DictNode):
if not self.operand2.key_value_pairs:
self.constant_result = self.operator == 'not_in'
return
self.constant_result = func(operand1_result, operand2_result)
def cascaded_compile_time_value(self, operand1, denv):
func = get_compile_time_binop(self)
operand2 = self.operand2.compile_time_value(denv)
try:
result = func(operand1, operand2)
except Exception as e:
self.compile_time_value_error(e)
result = None
if result:
cascade = self.cascade
if cascade:
result = result and cascade.cascaded_compile_time_value(operand2, denv)
return result
def is_cpp_comparison(self):
return self.operand1.type.is_cpp_class or self.operand2.type.is_cpp_class
def find_common_int_type(self, env, op, operand1, operand2):
# type1 != type2 and at least one of the types is not a C int
type1 = operand1.type
type2 = operand2.type
type1_can_be_int = False
type2_can_be_int = False
if operand1.is_string_literal and operand1.can_coerce_to_char_literal():
type1_can_be_int = True
if operand2.is_string_literal and operand2.can_coerce_to_char_literal():
type2_can_be_int = True
if type1.is_int:
if type2_can_be_int:
return type1
elif type2.is_int:
if type1_can_be_int:
return type2
elif type1_can_be_int:
if type2_can_be_int:
if Builtin.unicode_type in (type1, type2):
return PyrexTypes.c_py_ucs4_type
else:
return PyrexTypes.c_uchar_type
return None
def find_common_type(self, env, op, operand1, common_type=None):
operand2 = self.operand2
type1 = operand1.type
type2 = operand2.type
new_common_type = None
# catch general errors
if type1 == str_type and (type2.is_string or type2 in (bytes_type, unicode_type)) or \
type2 == str_type and (type1.is_string or type1 in (bytes_type, unicode_type)):
error(self.pos, "Comparisons between bytes/unicode and str are not portable to Python 3")
new_common_type = error_type
# try to use numeric comparisons where possible
elif type1.is_complex or type2.is_complex:
if op not in ('==', '!=') \
and (type1.is_complex or type1.is_numeric) \
and (type2.is_complex or type2.is_numeric):
error(self.pos, "complex types are unordered")
new_common_type = error_type
elif type1.is_pyobject:
new_common_type = type1
elif type2.is_pyobject:
new_common_type = type2
else:
new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
elif type1.is_numeric and type2.is_numeric:
new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
elif common_type is None or not common_type.is_pyobject:
new_common_type = self.find_common_int_type(env, op, operand1, operand2)
if new_common_type is None:
# fall back to generic type compatibility tests
if type1.is_ctuple or type2.is_ctuple:
new_common_type = py_object_type
elif type1 == type2:
new_common_type = type1
elif type1.is_pyobject or type2.is_pyobject:
if type2.is_numeric or type2.is_string:
if operand2.check_for_coercion_error(type1, env):
new_common_type = error_type
else:
new_common_type = py_object_type
elif type1.is_numeric or type1.is_string:
if operand1.check_for_coercion_error(type2, env):
new_common_type = error_type
else:
new_common_type = py_object_type
elif py_object_type.assignable_from(type1) and py_object_type.assignable_from(type2):
new_common_type = py_object_type
else:
# one Python type and one non-Python type, not assignable
self.invalid_types_error(operand1, op, operand2)
new_common_type = error_type
elif type1.assignable_from(type2):
new_common_type = type1
elif type2.assignable_from(type1):
new_common_type = type2
else:
# C types that we couldn't handle up to here are an error
self.invalid_types_error(operand1, op, operand2)
new_common_type = error_type
if new_common_type.is_string and (isinstance(operand1, BytesNode) or
isinstance(operand2, BytesNode)):
# special case when comparing char* to bytes literal: must
# compare string values!
new_common_type = bytes_type
# recursively merge types
if common_type is None or new_common_type.is_error:
common_type = new_common_type
else:
# we could do a lot better by splitting the comparison
# into a non-Python part and a Python part, but this is
# safer for now
common_type = PyrexTypes.spanning_type(common_type, new_common_type)
if self.cascade:
common_type = self.cascade.find_common_type(env, self.operator, operand2, common_type)
return common_type
def invalid_types_error(self, operand1, op, operand2):
error(self.pos, "Invalid types for '%s' (%s, %s)" %
(op, operand1.type, operand2.type))
def is_python_comparison(self):
return (not self.is_ptr_contains()
and not self.is_c_string_contains()
and (self.has_python_operands()
or (self.cascade and self.cascade.is_python_comparison())
or self.operator in ('in', 'not_in')))
def coerce_operands_to(self, dst_type, env):
operand2 = self.operand2
if operand2.type != dst_type:
self.operand2 = operand2.coerce_to(dst_type, env)
if self.cascade:
self.cascade.coerce_operands_to(dst_type, env)
def is_python_result(self):
return ((self.has_python_operands() and
self.special_bool_cmp_function is None and
self.operator not in ('is', 'is_not', 'in', 'not_in') and
not self.is_c_string_contains() and
not self.is_ptr_contains())
or (self.cascade and self.cascade.is_python_result()))
def is_c_string_contains(self):
return self.operator in ('in', 'not_in') and \
((self.operand1.type.is_int
and (self.operand2.type.is_string or self.operand2.type is bytes_type)) or
(self.operand1.type.is_unicode_char
and self.operand2.type is unicode_type))
def is_ptr_contains(self):
if self.operator in ('in', 'not_in'):
container_type = self.operand2.type
return (container_type.is_ptr or container_type.is_array) \
and not container_type.is_string
def find_special_bool_compare_function(self, env, operand1, result_is_bool=False):
# note: currently operand1 must get coerced to a Python object if we succeed here!
if self.operator in ('==', '!='):
type1, type2 = operand1.type, self.operand2.type
if result_is_bool or (type1.is_builtin_type and type2.is_builtin_type):
if type1 is Builtin.unicode_type or type2 is Builtin.unicode_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
return True
elif type1 is Builtin.bytes_type or type2 is Builtin.bytes_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("BytesEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyBytes_Equals"
return True
elif type1 is Builtin.basestring_type or type2 is Builtin.basestring_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
return True
elif type1 is Builtin.str_type or type2 is Builtin.str_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("StrEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyString_Equals"
return True
elif self.operator in ('in', 'not_in'):
if self.operand2.type is Builtin.dict_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyDictContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PyDict_ContainsTF"
return True
elif self.operand2.type is Builtin.unicode_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyUnicodeContains", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_ContainsTF"
return True
else:
if not self.operand2.type.is_pyobject:
self.operand2 = self.operand2.coerce_to_pyobject(env)
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySequenceContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PySequence_ContainsTF"
return True
return False
def generate_operation_code(self, code, result_code,
operand1, op , operand2):
if self.type.is_pyobject:
error_clause = code.error_goto_if_null
got_ref = "__Pyx_XGOTREF(%s); " % result_code
if self.special_bool_cmp_function:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyBoolOrNullFromLong", "ObjectHandling.c"))
coerce_result = "__Pyx_PyBoolOrNull_FromLong"
else:
coerce_result = "__Pyx_PyBool_FromLong"
else:
error_clause = code.error_goto_if_neg
got_ref = ""
coerce_result = ""
if self.special_bool_cmp_function:
if operand1.type.is_pyobject:
result1 = operand1.py_result()
else:
result1 = operand1.result()
if operand2.type.is_pyobject:
result2 = operand2.py_result()
else:
result2 = operand2.result()
if self.special_bool_cmp_utility_code:
code.globalstate.use_utility_code(self.special_bool_cmp_utility_code)
code.putln(
"%s = %s(%s(%s, %s, %s)); %s%s" % (
result_code,
coerce_result,
self.special_bool_cmp_function,
result1, result2, richcmp_constants[op],
got_ref,
error_clause(result_code, self.pos)))
elif operand1.type.is_pyobject and op not in ('is', 'is_not'):
assert op not in ('in', 'not_in'), op
code.putln("%s = PyObject_RichCompare(%s, %s, %s); %s%s" % (
result_code,
operand1.py_result(),
operand2.py_result(),
richcmp_constants[op],
got_ref,
error_clause(result_code, self.pos)))
elif operand1.type.is_complex:
code.putln("%s = %s(%s%s(%s, %s));" % (
result_code,
coerce_result,
op == "!=" and "!" or "",
operand1.type.unary_op('eq'),
operand1.result(),
operand2.result()))
else:
type1 = operand1.type
type2 = operand2.type
if (type1.is_extension_type or type2.is_extension_type) \
and not type1.same_as(type2):
common_type = py_object_type
elif type1.is_numeric:
common_type = PyrexTypes.widest_numeric_type(type1, type2)
else:
common_type = type1
code1 = operand1.result_as(common_type)
code2 = operand2.result_as(common_type)
statement = "%s = %s(%s %s %s);" % (
result_code,
coerce_result,
code1,
self.c_operator(op),
code2)
if self.is_cpp_comparison() and self.exception_check == '+':
translate_cpp_exception(code, self.pos, statement, self.exception_value, self.in_nogil_context)
code.putln(statement)
def c_operator(self, op):
if op == 'is':
return "=="
elif op == 'is_not':
return "!="
else:
return op
class PrimaryCmpNode(ExprNode, CmpNode):
# Non-cascaded comparison or first comparison of
# a cascaded sequence.
#
# operator string
# operand1 ExprNode
# operand2 ExprNode
# cascade CascadedCmpNode
# We don't use the subexprs mechanism, because
# things here are too complicated for it to handle.
# Instead, we override all the framework methods
# which use it.
child_attrs = ['operand1', 'operand2', 'coerced_operand2', 'cascade']
cascade = None
coerced_operand2 = None
is_memslice_nonecheck = False
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def type_dependencies(self, env):
return ()
def calculate_constant_result(self):
assert not self.cascade
self.calculate_cascaded_constant_result(self.operand1.constant_result)
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
return self.cascaded_compile_time_value(operand1, denv)
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
self.operand2 = self.operand2.analyse_types(env)
if self.is_cpp_comparison():
self.analyse_cpp_comparison(env)
if self.cascade:
error(self.pos, "Cascading comparison not yet supported for cpp types.")
return self
if self.analyse_memoryviewslice_comparison(env):
return self
if self.cascade:
self.cascade = self.cascade.analyse_types(env)
if self.operator in ('in', 'not_in'):
if self.is_c_string_contains():
self.is_pycmp = False
common_type = None
if self.cascade:
error(self.pos, "Cascading comparison not yet supported for 'int_val in string'.")
return self
if self.operand2.type is unicode_type:
env.use_utility_code(UtilityCode.load_cached("PyUCS4InUnicode", "StringTools.c"))
else:
if self.operand1.type is PyrexTypes.c_uchar_type:
self.operand1 = self.operand1.coerce_to(PyrexTypes.c_char_type, env)
if self.operand2.type is not bytes_type:
self.operand2 = self.operand2.coerce_to(bytes_type, env)
env.use_utility_code(UtilityCode.load_cached("BytesContains", "StringTools.c"))
self.operand2 = self.operand2.as_none_safe_node(
"argument of type 'NoneType' is not iterable")
elif self.is_ptr_contains():
if self.cascade:
error(self.pos, "Cascading comparison not supported for 'val in sliced pointer'.")
self.type = PyrexTypes.c_bint_type
# Will be transformed by IterationTransform
return self
elif self.find_special_bool_compare_function(env, self.operand1):
if not self.operand1.type.is_pyobject:
self.operand1 = self.operand1.coerce_to_pyobject(env)
common_type = None # if coercion needed, the method call above has already done it
self.is_pycmp = False # result is bint
else:
common_type = py_object_type
self.is_pycmp = True
elif self.find_special_bool_compare_function(env, self.operand1):
if not self.operand1.type.is_pyobject:
self.operand1 = self.operand1.coerce_to_pyobject(env)
common_type = None # if coercion needed, the method call above has already done it
self.is_pycmp = False # result is bint
else:
common_type = self.find_common_type(env, self.operator, self.operand1)
self.is_pycmp = common_type.is_pyobject
if common_type is not None and not common_type.is_error:
if self.operand1.type != common_type:
self.operand1 = self.operand1.coerce_to(common_type, env)
self.coerce_operands_to(common_type, env)
if self.cascade:
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
operand2 = self.cascade.optimise_comparison(self.operand2, env)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
if self.is_python_result():
self.type = PyrexTypes.py_object_type
else:
self.type = PyrexTypes.c_bint_type
cdr = self.cascade
while cdr:
cdr.type = self.type
cdr = cdr.cascade
if self.is_pycmp or self.cascade or self.special_bool_cmp_function:
# 1) owned reference, 2) reused value, 3) potential function error return value
self.is_temp = 1
return self
def analyse_cpp_comparison(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
self.is_pycmp = False
entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
if entry is None:
error(self.pos, "Invalid types for '%s' (%s, %s)" %
(self.operator, type1, type2))
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
func_type = entry.type
if func_type.is_ptr:
func_type = func_type.base_type
self.exception_check = func_type.exception_check
self.exception_value = func_type.exception_value
if self.exception_check == '+':
self.is_temp = True
if self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
if len(func_type.args) == 1:
self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
else:
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type
def analyse_memoryviewslice_comparison(self, env):
have_none = self.operand1.is_none or self.operand2.is_none
have_slice = (self.operand1.type.is_memoryviewslice or
self.operand2.type.is_memoryviewslice)
ops = ('==', '!=', 'is', 'is_not')
if have_slice and have_none and self.operator in ops:
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
self.is_memslice_nonecheck = True
return True
return False
def coerce_to_boolean(self, env):
if self.is_pycmp:
# coercing to bool => may allow for more efficient comparison code
if self.find_special_bool_compare_function(
env, self.operand1, result_is_bool=True):
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
self.is_temp = 1
if self.cascade:
operand2 = self.cascade.optimise_comparison(
self.operand2, env, result_is_bool=True)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
return self
# TODO: check if we can optimise parts of the cascade here
return ExprNode.coerce_to_boolean(self, env)
def has_python_operands(self):
return (self.operand1.type.is_pyobject
or self.operand2.type.is_pyobject)
def check_const(self):
if self.cascade:
self.not_const()
return False
else:
return self.operand1.check_const() and self.operand2.check_const()
def calculate_result_code(self):
if self.operand1.type.is_complex:
if self.operator == "!=":
negation = "!"
else:
negation = ""
return "(%s%s(%s, %s))" % (
negation,
self.operand1.type.binary_op('=='),
self.operand1.result(),
self.operand2.result())
elif self.is_c_string_contains():
if self.operand2.type is unicode_type:
method = "__Pyx_UnicodeContainsUCS4"
else:
method = "__Pyx_BytesContains"
if self.operator == "not_in":
negation = "!"
else:
negation = ""
return "(%s%s(%s, %s))" % (
negation,
method,
self.operand2.result(),
self.operand1.result())
else:
result1 = self.operand1.result()
result2 = self.operand2.result()
if self.is_memslice_nonecheck:
if self.operand1.type.is_memoryviewslice:
result1 = "((PyObject *) %s.memview)" % result1
else:
result2 = "((PyObject *) %s.memview)" % result2
return "(%s %s %s)" % (
result1,
self.c_operator(self.operator),
result2)
def generate_evaluation_code(self, code):
self.operand1.generate_evaluation_code(code)
self.operand2.generate_evaluation_code(code)
if self.is_temp:
self.allocate_temp_result(code)
self.generate_operation_code(code, self.result(),
self.operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
code, self.result(), self.coerced_operand2 or self.operand2,
needs_evaluation=self.coerced_operand2 is not None)
self.operand1.generate_disposal_code(code)
self.operand1.free_temps(code)
self.operand2.generate_disposal_code(code)
self.operand2.free_temps(code)
def generate_subexpr_disposal_code(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.generate_disposal_code(code)
self.operand2.generate_disposal_code(code)
def free_subexpr_temps(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.free_temps(code)
self.operand2.free_temps(code)
def annotate(self, code):
self.operand1.annotate(code)
self.operand2.annotate(code)
if self.cascade:
self.cascade.annotate(code)
class CascadedCmpNode(Node, CmpNode):
# A CascadedCmpNode is not a complete expression node. It
# hangs off the side of another comparison node, shares
# its left operand with that node, and shares its result
# with the PrimaryCmpNode at the head of the chain.
#
# operator string
# operand2 ExprNode
# cascade CascadedCmpNode
child_attrs = ['operand2', 'coerced_operand2', 'cascade']
cascade = None
coerced_operand2 = None
constant_result = constant_value_not_set # FIXME: where to calculate this?
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def type_dependencies(self, env):
return ()
def has_constant_result(self):
return self.constant_result is not constant_value_not_set and \
self.constant_result is not not_a_constant
def analyse_types(self, env):
self.operand2 = self.operand2.analyse_types(env)
if self.cascade:
self.cascade = self.cascade.analyse_types(env)
return self
def has_python_operands(self):
return self.operand2.type.is_pyobject
def is_cpp_comparison(self):
# cascaded comparisons aren't currently implemented for c++ classes.
return False
def optimise_comparison(self, operand1, env, result_is_bool=False):
if self.find_special_bool_compare_function(env, operand1, result_is_bool):
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
if not operand1.type.is_pyobject:
operand1 = operand1.coerce_to_pyobject(env)
if self.cascade:
operand2 = self.cascade.optimise_comparison(self.operand2, env, result_is_bool)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
return operand1
def coerce_operands_to_pyobjects(self, env):
self.operand2 = self.operand2.coerce_to_pyobject(env)
if self.operand2.type is dict_type and self.operator in ('in', 'not_in'):
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
if self.cascade:
self.cascade.coerce_operands_to_pyobjects(env)
def coerce_cascaded_operands_to_temp(self, env):
if self.cascade:
#self.operand2 = self.operand2.coerce_to_temp(env) #CTT
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
def generate_evaluation_code(self, code, result, operand1, needs_evaluation=False):
if self.type.is_pyobject:
code.putln("if (__Pyx_PyObject_IsTrue(%s)) {" % result)
code.put_decref(result, self.type)
else:
code.putln("if (%s) {" % result)
if needs_evaluation:
operand1.generate_evaluation_code(code)
self.operand2.generate_evaluation_code(code)
self.generate_operation_code(code, result,
operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
code, result, self.coerced_operand2 or self.operand2,
needs_evaluation=self.coerced_operand2 is not None)
if needs_evaluation:
operand1.generate_disposal_code(code)
operand1.free_temps(code)
# Cascaded cmp result is always temp
self.operand2.generate_disposal_code(code)
self.operand2.free_temps(code)
code.putln("}")
def annotate(self, code):
self.operand2.annotate(code)
if self.cascade:
self.cascade.annotate(code)
binop_node_classes = {
"or": BoolBinopNode,
"and": BoolBinopNode,
"|": IntBinopNode,
"^": IntBinopNode,
"&": IntBinopNode,
"<<": IntBinopNode,
">>": IntBinopNode,
"+": AddNode,
"-": SubNode,
"*": MulNode,
"@": MatMultNode,
"/": DivNode,
"//": DivNode,
"%": ModNode,
"**": PowNode,
}
def binop_node(pos, operator, operand1, operand2, inplace=False, **kwargs):
# Construct binop node of appropriate class for
# given operator.
return binop_node_classes[operator](
pos,
operator=operator,
operand1=operand1,
operand2=operand2,
inplace=inplace,
**kwargs)
#-------------------------------------------------------------------
#
# Coercion nodes
#
# Coercion nodes are special in that they are created during
# the analyse_types phase of parse tree processing.
# Their __init__ methods consequently incorporate some aspects
# of that phase.
#
#-------------------------------------------------------------------
class CoercionNode(ExprNode):
# Abstract base class for coercion nodes.
#
# arg ExprNode node being coerced
subexprs = ['arg']
constant_result = not_a_constant
def __init__(self, arg):
super(CoercionNode, self).__init__(arg.pos)
self.arg = arg
if debug_coercion:
print("%s Coercing %s" % (self, self.arg))
def calculate_constant_result(self):
# constant folding can break type coercion, so this is disabled
pass
def annotate(self, code):
self.arg.annotate(code)
if self.arg.type != self.type:
file, line, col = self.pos
code.annotate((file, line, col-1), AnnotationItem(
style='coerce', tag='coerce', text='[%s] to [%s]' % (self.arg.type, self.type)))
class CoerceToMemViewSliceNode(CoercionNode):
"""
Coerce an object to a memoryview slice. This holds a new reference in
a managed temp.
"""
def __init__(self, arg, dst_type, env):
assert dst_type.is_memoryviewslice
assert not arg.type.is_memoryviewslice
CoercionNode.__init__(self, arg)
self.type = dst_type
self.is_temp = 1
self.env = env
self.use_managed_ref = True
self.arg = arg
def generate_result_code(self, code):
self.type.create_from_py_utility_code(self.env)
code.putln("%s = %s(%s);" % (self.result(),
self.type.from_py_function,
self.arg.py_result()))
error_cond = self.type.error_condition(self.result())
code.putln(code.error_goto_if(error_cond, self.pos))
class CastNode(CoercionNode):
# Wrap a node in a C type cast.
def __init__(self, arg, new_type):
CoercionNode.__init__(self, arg)
self.type = new_type
def may_be_none(self):
return self.arg.may_be_none()
def calculate_result_code(self):
return self.arg.result_as(self.type)
def generate_result_code(self, code):
self.arg.generate_result_code(code)
class PyTypeTestNode(CoercionNode):
# This node is used to check that a generic Python
# object is an instance of a particular extension type.
# This node borrows the result of its argument node.
exact_builtin_type = True
def __init__(self, arg, dst_type, env, notnone=False):
# The arg is know to be a Python object, and
# the dst_type is known to be an extension type.
assert dst_type.is_extension_type or dst_type.is_builtin_type, "PyTypeTest on non extension type"
CoercionNode.__init__(self, arg)
self.type = dst_type
self.result_ctype = arg.ctype()
self.notnone = notnone
nogil_check = Node.gil_error
gil_message = "Python type test"
def analyse_types(self, env):
return self
def may_be_none(self):
if self.notnone:
return False
return self.arg.may_be_none()
def is_simple(self):
return self.arg.is_simple()
def result_in_temp(self):
return self.arg.result_in_temp()
def is_ephemeral(self):
return self.arg.is_ephemeral()
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
def calculate_constant_result(self):
# FIXME
pass
def calculate_result_code(self):
return self.arg.result()
def generate_result_code(self, code):
if self.type.typeobj_is_available():
if self.type.is_builtin_type:
type_test = self.type.type_test_code(
self.arg.py_result(),
self.notnone, exact=self.exact_builtin_type)
else:
type_test = self.type.type_test_code(
self.arg.py_result(), self.notnone)
code.globalstate.use_utility_code(
UtilityCode.load_cached("ExtTypeTest", "ObjectHandling.c"))
code.putln("if (!(%s)) %s" % (
type_test, code.error_goto(self.pos)))
else:
error(self.pos, "Cannot test type of extern C class "
"without type object name specification")
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class NoneCheckNode(CoercionNode):
# This node is used to check that a Python object is not None and
# raises an appropriate exception (as specified by the creating
# transform).
is_nonecheck = True
def __init__(self, arg, exception_type_cname, exception_message,
exception_format_args):
CoercionNode.__init__(self, arg)
self.type = arg.type
self.result_ctype = arg.ctype()
self.exception_type_cname = exception_type_cname
self.exception_message = exception_message
self.exception_format_args = tuple(exception_format_args or ())
nogil_check = None # this node only guards an operation that would fail already
def analyse_types(self, env):
return self
def may_be_none(self):
return False
def is_simple(self):
return self.arg.is_simple()
def result_in_temp(self):
return self.arg.result_in_temp()
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
def calculate_result_code(self):
return self.arg.result()
def condition(self):
if self.type.is_pyobject:
return self.arg.py_result()
elif self.type.is_memoryviewslice:
return "((PyObject *) %s.memview)" % self.arg.result()
else:
raise Exception("unsupported type")
def put_nonecheck(self, code):
code.putln(
"if (unlikely(%s == Py_None)) {" % self.condition())
if self.in_nogil_context:
code.put_ensure_gil()
escape = StringEncoding.escape_byte_string
if self.exception_format_args:
code.putln('PyErr_Format(%s, "%s", %s);' % (
self.exception_type_cname,
StringEncoding.escape_byte_string(
self.exception_message.encode('UTF-8')),
', '.join([ '"%s"' % escape(str(arg).encode('UTF-8'))
for arg in self.exception_format_args ])))
else:
code.putln('PyErr_SetString(%s, "%s");' % (
self.exception_type_cname,
escape(self.exception_message.encode('UTF-8'))))
if self.in_nogil_context:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
def generate_result_code(self, code):
self.put_nonecheck(code)
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class CoerceToPyTypeNode(CoercionNode):
# This node is used to convert a C data type
# to a Python object.
type = py_object_type
target_type = py_object_type
is_temp = 1
def __init__(self, arg, env, type=py_object_type):
if not arg.type.create_to_py_utility_code(env):
error(arg.pos, "Cannot convert '%s' to Python object" % arg.type)
elif arg.type.is_complex:
# special case: complex coercion is so complex that it
# uses a macro ("__pyx_PyComplex_FromComplex()"), for
# which the argument must be simple
arg = arg.coerce_to_simple(env)
CoercionNode.__init__(self, arg)
if type is py_object_type:
# be specific about some known types
if arg.type.is_string or arg.type.is_cpp_string:
self.type = default_str_type(env)
elif arg.type.is_pyunicode_ptr or arg.type.is_unicode_char:
self.type = unicode_type
elif arg.type.is_complex:
self.type = Builtin.complex_type
self.target_type = self.type
elif arg.type.is_string or arg.type.is_cpp_string:
if (type not in (bytes_type, bytearray_type)
and not env.directives['c_string_encoding']):
error(arg.pos,
"default encoding required for conversion from '%s' to '%s'" %
(arg.type, type))
self.type = self.target_type = type
else:
# FIXME: check that the target type and the resulting type are compatible
self.target_type = type
gil_message = "Converting to Python object"
def may_be_none(self):
# FIXME: is this always safe?
return False
def coerce_to_boolean(self, env):
arg_type = self.arg.type
if (arg_type == PyrexTypes.c_bint_type or
(arg_type.is_pyobject and arg_type.name == 'bool')):
return self.arg.coerce_to_temp(env)
else:
return CoerceToBooleanNode(self, env)
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.arg.type.is_int:
return self.arg
else:
return self.arg.coerce_to(PyrexTypes.c_long_type, env)
def analyse_types(self, env):
# The arg is always already analysed
return self
def generate_result_code(self, code):
code.putln('%s; %s' % (
self.arg.type.to_py_call_code(
self.arg.result(),
self.result(),
self.target_type),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class CoerceIntToBytesNode(CoerceToPyTypeNode):
# This node is used to convert a C int type to a Python bytes
# object.
is_temp = 1
def __init__(self, arg, env):
arg = arg.coerce_to_simple(env)
CoercionNode.__init__(self, arg)
self.type = Builtin.bytes_type
def generate_result_code(self, code):
arg = self.arg
arg_result = arg.result()
if arg.type not in (PyrexTypes.c_char_type,
PyrexTypes.c_uchar_type,
PyrexTypes.c_schar_type):
if arg.type.signed:
code.putln("if ((%s < 0) || (%s > 255)) {" % (
arg_result, arg_result))
else:
code.putln("if (%s > 255) {" % arg_result)
code.putln('PyErr_SetString(PyExc_OverflowError, '
'"value too large to pack into a byte"); %s' % (
code.error_goto(self.pos)))
code.putln('}')
temp = None
if arg.type is not PyrexTypes.c_char_type:
temp = code.funcstate.allocate_temp(PyrexTypes.c_char_type, manage_ref=False)
code.putln("%s = (char)%s;" % (temp, arg_result))
arg_result = temp
code.putln('%s = PyBytes_FromStringAndSize(&%s, 1); %s' % (
self.result(),
arg_result,
code.error_goto_if_null(self.result(), self.pos)))
if temp is not None:
code.funcstate.release_temp(temp)
code.put_gotref(self.py_result())
class CoerceFromPyTypeNode(CoercionNode):
# This node is used to convert a Python object
# to a C data type.
def __init__(self, result_type, arg, env):
CoercionNode.__init__(self, arg)
self.type = result_type
self.is_temp = 1
if not result_type.create_from_py_utility_code(env):
error(arg.pos,
"Cannot convert Python object to '%s'" % result_type)
if self.type.is_string or self.type.is_pyunicode_ptr:
if self.arg.is_name and self.arg.entry and self.arg.entry.is_pyglobal:
warning(arg.pos,
"Obtaining '%s' from externally modifiable global Python value" % result_type,
level=1)
def analyse_types(self, env):
# The arg is always already analysed
return self
def is_ephemeral(self):
return (self.type.is_ptr and not self.type.is_array) and self.arg.is_ephemeral()
def generate_result_code(self, code):
code.putln(self.type.from_py_call_code(
self.arg.py_result(), self.result(), self.pos, code))
if self.type.is_pyobject:
code.put_gotref(self.py_result())
def nogil_check(self, env):
error(self.pos, "Coercion from Python not allowed without the GIL")
class CoerceToBooleanNode(CoercionNode):
# This node is used when a result needs to be used
# in a boolean context.
type = PyrexTypes.c_bint_type
_special_builtins = {
Builtin.list_type: 'PyList_GET_SIZE',
Builtin.tuple_type: 'PyTuple_GET_SIZE',
Builtin.set_type: 'PySet_GET_SIZE',
Builtin.frozenset_type: 'PySet_GET_SIZE',
Builtin.bytes_type: 'PyBytes_GET_SIZE',
Builtin.unicode_type: 'PyUnicode_GET_SIZE',
}
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
if arg.type.is_pyobject:
self.is_temp = 1
def nogil_check(self, env):
if self.arg.type.is_pyobject and self._special_builtins.get(self.arg.type) is None:
self.gil_error()
gil_message = "Truth-testing Python object"
def check_const(self):
if self.is_temp:
self.not_const()
return False
return self.arg.check_const()
def calculate_result_code(self):
return "(%s != 0)" % self.arg.result()
def generate_result_code(self, code):
if not self.is_temp:
return
test_func = self._special_builtins.get(self.arg.type)
if test_func is not None:
code.putln("%s = (%s != Py_None) && (%s(%s) != 0);" % (
self.result(),
self.arg.py_result(),
test_func,
self.arg.py_result()))
else:
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_neg(self.result(), self.pos)))
class CoerceToComplexNode(CoercionNode):
def __init__(self, arg, dst_type, env):
if arg.type.is_complex:
arg = arg.coerce_to_simple(env)
self.type = dst_type
CoercionNode.__init__(self, arg)
dst_type.create_declaration_utility_code(env)
def calculate_result_code(self):
if self.arg.type.is_complex:
real_part = "__Pyx_CREAL(%s)" % self.arg.result()
imag_part = "__Pyx_CIMAG(%s)" % self.arg.result()
else:
real_part = self.arg.result()
imag_part = "0"
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
imag_part)
def generate_result_code(self, code):
pass
class CoerceToTempNode(CoercionNode):
# This node is used to force the result of another node
# to be stored in a temporary. It is only used if the
# argument node's result is not already in a temporary.
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
self.type = self.arg.type.as_argument_type()
self.constant_result = self.arg.constant_result
self.is_temp = 1
if self.type.is_pyobject:
self.result_ctype = py_object_type
gil_message = "Creating temporary Python reference"
def analyse_types(self, env):
# The arg is always already analysed
return self
def coerce_to_boolean(self, env):
self.arg = self.arg.coerce_to_boolean(env)
if self.arg.is_simple():
return self.arg
self.type = self.arg.type
self.result_ctype = self.type
return self
def generate_result_code(self, code):
#self.arg.generate_evaluation_code(code) # Already done
# by generic generate_subexpr_evaluation_code!
code.putln("%s = %s;" % (
self.result(), self.arg.result_as(self.ctype())))
if self.use_managed_ref:
if self.type.is_pyobject:
code.put_incref(self.result(), self.ctype())
elif self.type.is_memoryviewslice:
code.put_incref_memoryviewslice(self.result(),
not self.in_nogil_context)
class ProxyNode(CoercionNode):
"""
A node that should not be replaced by transforms or other means,
and hence can be useful to wrap the argument to a clone node
MyNode -> ProxyNode -> ArgNode
CloneNode -^
"""
nogil_check = None
def __init__(self, arg):
super(ProxyNode, self).__init__(arg)
self.constant_result = arg.constant_result
self._proxy_type()
def analyse_types(self, env):
self.arg = self.arg.analyse_expressions(env)
self._proxy_type()
return self
def infer_type(self, env):
return self.arg.infer_type(env)
def _proxy_type(self):
if hasattr(self.arg, 'type'):
self.type = self.arg.type
self.result_ctype = self.arg.result_ctype
if hasattr(self.arg, 'entry'):
self.entry = self.arg.entry
def generate_result_code(self, code):
self.arg.generate_result_code(code)
def result(self):
return self.arg.result()
def is_simple(self):
return self.arg.is_simple()
def may_be_none(self):
return self.arg.may_be_none()
def generate_evaluation_code(self, code):
self.arg.generate_evaluation_code(code)
def generate_disposal_code(self, code):
self.arg.generate_disposal_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class CloneNode(CoercionNode):
# This node is employed when the result of another node needs
# to be used multiple times. The argument node's result must
# be in a temporary. This node "borrows" the result from the
# argument node, and does not generate any evaluation or
# disposal code for it. The original owner of the argument
# node is responsible for doing those things.
subexprs = [] # Arg is not considered a subexpr
nogil_check = None
def __init__(self, arg):
CoercionNode.__init__(self, arg)
self.constant_result = arg.constant_result
if hasattr(arg, 'type'):
self.type = arg.type
self.result_ctype = arg.result_ctype
if hasattr(arg, 'entry'):
self.entry = arg.entry
def result(self):
return self.arg.result()
def may_be_none(self):
return self.arg.may_be_none()
def type_dependencies(self, env):
return self.arg.type_dependencies(env)
def infer_type(self, env):
return self.arg.infer_type(env)
def analyse_types(self, env):
self.type = self.arg.type
self.result_ctype = self.arg.result_ctype
self.is_temp = 1
if hasattr(self.arg, 'entry'):
self.entry = self.arg.entry
return self
def coerce_to(self, dest_type, env):
if self.arg.is_literal:
return self.arg.coerce_to(dest_type, env)
return super(CloneNode, self).coerce_to(dest_type, env)
def is_simple(self):
return True # result is always in a temp (or a name)
def generate_evaluation_code(self, code):
pass
def generate_result_code(self, code):
pass
def generate_disposal_code(self, code):
pass
def free_temps(self, code):
pass
class CMethodSelfCloneNode(CloneNode):
# Special CloneNode for the self argument of builtin C methods
# that accepts subtypes of the builtin type. This is safe only
# for 'final' subtypes, as subtypes of the declared type may
# override the C method.
def coerce_to(self, dst_type, env):
if dst_type.is_builtin_type and self.type.subtype_of(dst_type):
return self
return CloneNode.coerce_to(self, dst_type, env)
class ModuleRefNode(ExprNode):
# Simple returns the module object
type = py_object_type
is_temp = False
subexprs = []
def analyse_types(self, env):
return self
def may_be_none(self):
return False
def calculate_result_code(self):
return Naming.module_cname
def generate_result_code(self, code):
pass
class DocstringRefNode(ExprNode):
# Extracts the docstring of the body element
subexprs = ['body']
type = py_object_type
is_temp = True
def __init__(self, pos, body):
ExprNode.__init__(self, pos)
assert body.type.is_pyobject
self.body = body
def analyse_types(self, env):
return self
def generate_result_code(self, code):
code.putln('%s = __Pyx_GetAttr(%s, %s); %s' % (
self.result(), self.body.result(),
code.intern_identifier(StringEncoding.EncodedString("__doc__")),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
#------------------------------------------------------------------------------------
#
# Runtime support code
#
#------------------------------------------------------------------------------------
pyerr_occurred_withgil_utility_code= UtilityCode(
proto = """
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void); /* proto */
""",
impl = """
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void) {
int err;
#ifdef WITH_THREAD
PyGILState_STATE _save = PyGILState_Ensure();
#endif
err = !!PyErr_Occurred();
#ifdef WITH_THREAD
PyGILState_Release(_save);
#endif
return err;
}
"""
)
#------------------------------------------------------------------------------------
raise_unbound_local_error_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
""",
impl = """
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
""")
raise_closure_name_error_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname);
""",
impl = """
static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname) {
PyErr_Format(PyExc_NameError, "free variable '%s' referenced before assignment in enclosing scope", varname);
}
""")
# Don't inline the function, it should really never be called in production
raise_unbound_memoryview_utility_code_nogil = UtilityCode(
proto = """
static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname);
""",
impl = """
static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname) {
#ifdef WITH_THREAD
PyGILState_STATE gilstate = PyGILState_Ensure();
#endif
__Pyx_RaiseUnboundLocalError(varname);
#ifdef WITH_THREAD
PyGILState_Release(gilstate);
#endif
}
""",
requires = [raise_unbound_local_error_utility_code])
#------------------------------------------------------------------------------------
raise_too_many_values_to_unpack = UtilityCode.load_cached("RaiseTooManyValuesToUnpack", "ObjectHandling.c")
raise_need_more_values_to_unpack = UtilityCode.load_cached("RaiseNeedMoreValuesToUnpack", "ObjectHandling.c")
tuple_unpacking_error_code = UtilityCode.load_cached("UnpackTupleError", "ObjectHandling.c")
|
fperez/cython
|
Cython/Compiler/ExprNodes.py
|
Python
|
apache-2.0
| 497,288
|
[
"VisIt"
] |
0883554ab3a6441e281b52858fdbf5c8ab786e14c2d11e1c0e706ca40ab10dcd
|
"""
Utilities for constructing node features or bond features.
Some functions are based on chainer-chemistry or dgl-lifesci.
Repositories:
- https://github.com/chainer/chainer-chemistry
- https://github.com/awslabs/dgl-lifesci
"""
import os
import logging
from typing import List, Union, Tuple
import numpy as np
from deepchem.utils.typing import RDKitAtom, RDKitBond, RDKitMol
logger = logging.getLogger(__name__)
DEFAULT_ATOM_TYPE_SET = [
"C",
"N",
"O",
"F",
"P",
"S",
"Cl",
"Br",
"I",
]
DEFAULT_HYBRIDIZATION_SET = ["SP", "SP2", "SP3"]
DEFAULT_TOTAL_NUM_Hs_SET = [0, 1, 2, 3, 4]
DEFAULT_FORMAL_CHARGE_SET = [-2, -1, 0, 1, 2]
DEFAULT_TOTAL_DEGREE_SET = [0, 1, 2, 3, 4, 5]
DEFAULT_RING_SIZE_SET = [3, 4, 5, 6, 7, 8]
DEFAULT_BOND_TYPE_SET = ["SINGLE", "DOUBLE", "TRIPLE", "AROMATIC"]
DEFAULT_BOND_STEREO_SET = ["STEREONONE", "STEREOANY", "STEREOZ", "STEREOE"]
DEFAULT_GRAPH_DISTANCE_SET = [1, 2, 3, 4, 5, 6, 7]
DEFAULT_ATOM_IMPLICIT_VALENCE_SET = [0, 1, 2, 3, 4, 5, 6]
DEFAULT_ATOM_EXPLICIT_VALENCE_SET = [1, 2, 3, 4, 5, 6]
class _ChemicalFeaturesFactory:
"""This is a singleton class for RDKit base features."""
_instance = None
@classmethod
def get_instance(cls):
try:
from rdkit import RDConfig
from rdkit.Chem import ChemicalFeatures
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
if not cls._instance:
fdefName = os.path.join(RDConfig.RDDataDir, 'BaseFeatures.fdef')
cls._instance = ChemicalFeatures.BuildFeatureFactory(fdefName)
return cls._instance
def one_hot_encode(val: Union[int, str],
allowable_set: Union[List[str], List[int]],
include_unknown_set: bool = False) -> List[float]:
"""One hot encoder for elements of a provided set.
Examples
--------
>>> one_hot_encode("a", ["a", "b", "c"])
[1.0, 0.0, 0.0]
>>> one_hot_encode(2, [0, 1, 2])
[0.0, 0.0, 1.0]
>>> one_hot_encode(3, [0, 1, 2])
[0.0, 0.0, 0.0]
>>> one_hot_encode(3, [0, 1, 2], True)
[0.0, 0.0, 0.0, 1.0]
Parameters
----------
val: int or str
The value must be present in `allowable_set`.
allowable_set: List[int] or List[str]
List of allowable quantities.
include_unknown_set: bool, default False
If true, the index of all values not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
An one-hot vector of val.
If `include_unknown_set` is False, the length is `len(allowable_set)`.
If `include_unknown_set` is True, the length is `len(allowable_set) + 1`.
Raises
------
ValueError
If include_unknown_set is False and `val` is not in `allowable_set`.
"""
if include_unknown_set is False:
if val not in allowable_set:
logger.info("input {0} not in allowable set {1}:".format(
val, allowable_set))
# init an one-hot vector
if include_unknown_set is False:
one_hot_legnth = len(allowable_set)
else:
one_hot_legnth = len(allowable_set) + 1
one_hot = [0.0 for _ in range(one_hot_legnth)]
try:
one_hot[allowable_set.index(val)] = 1.0 # type: ignore
except:
if include_unknown_set:
# If include_unknown_set is True, set the last index is 1.
one_hot[-1] = 1.0
else:
pass
return one_hot
#################################################################
# atom (node) featurization
#################################################################
def get_atom_type_one_hot(atom: RDKitAtom,
allowable_set: List[str] = DEFAULT_ATOM_TYPE_SET,
include_unknown_set: bool = True) -> List[float]:
"""Get an one-hot feature of an atom type.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
allowable_set: List[str]
The atom types to consider. The default set is
`["C", "N", "O", "F", "P", "S", "Cl", "Br", "I"]`.
include_unknown_set: bool, default True
If true, the index of all atom not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
An one-hot vector of atom types.
If `include_unknown_set` is False, the length is `len(allowable_set)`.
If `include_unknown_set` is True, the length is `len(allowable_set) + 1`.
"""
return one_hot_encode(atom.GetSymbol(), allowable_set, include_unknown_set)
def construct_hydrogen_bonding_info(mol: RDKitMol) -> List[Tuple[int, str]]:
"""Construct hydrogen bonding infos about a molecule.
Parameters
---------
mol: rdkit.Chem.rdchem.Mol
RDKit mol object
Returns
-------
List[Tuple[int, str]]
A list of tuple `(atom_index, hydrogen_bonding_type)`.
The `hydrogen_bonding_type` value is "Acceptor" or "Donor".
"""
factory = _ChemicalFeaturesFactory.get_instance()
feats = factory.GetFeaturesForMol(mol)
hydrogen_bonding = []
for f in feats:
hydrogen_bonding.append((f.GetAtomIds()[0], f.GetFamily()))
return hydrogen_bonding
def get_atom_hydrogen_bonding_one_hot(
atom: RDKitAtom, hydrogen_bonding: List[Tuple[int, str]]) -> List[float]:
"""Get an one-hot feat about whether an atom accepts electrons or donates electrons.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
hydrogen_bonding: List[Tuple[int, str]]
The return value of `construct_hydrogen_bonding_info`.
The value is a list of tuple `(atom_index, hydrogen_bonding)` like (1, "Acceptor").
Returns
-------
List[float]
A one-hot vector of the ring size type. The first element
indicates "Donor", and the second element indicates "Acceptor".
"""
one_hot = [0.0, 0.0]
atom_idx = atom.GetIdx()
for hydrogen_bonding_tuple in hydrogen_bonding:
if hydrogen_bonding_tuple[0] == atom_idx:
if hydrogen_bonding_tuple[1] == "Donor":
one_hot[0] = 1.0
elif hydrogen_bonding_tuple[1] == "Acceptor":
one_hot[1] = 1.0
return one_hot
def get_atom_is_in_aromatic_one_hot(atom: RDKitAtom) -> List[float]:
"""Get ans one-hot feature about whether an atom is in aromatic system or not.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
Returns
-------
List[float]
A vector of whether an atom is in aromatic system or not.
"""
return [float(atom.GetIsAromatic())]
def get_atom_hybridization_one_hot(
atom: RDKitAtom,
allowable_set: List[str] = DEFAULT_HYBRIDIZATION_SET,
include_unknown_set: bool = False) -> List[float]:
"""Get an one-hot feature of hybridization type.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
allowable_set: List[str]
The hybridization types to consider. The default set is `["SP", "SP2", "SP3"]`
include_unknown_set: bool, default False
If true, the index of all types not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
An one-hot vector of the hybridization type.
If `include_unknown_set` is False, the length is `len(allowable_set)`.
If `include_unknown_set` is True, the length is `len(allowable_set) + 1`.
"""
return one_hot_encode(
str(atom.GetHybridization()), allowable_set, include_unknown_set)
def get_atom_total_num_Hs_one_hot(
atom: RDKitAtom,
allowable_set: List[int] = DEFAULT_TOTAL_NUM_Hs_SET,
include_unknown_set: bool = True) -> List[float]:
"""Get an one-hot feature of the number of hydrogens which an atom has.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
allowable_set: List[int]
The number of hydrogens to consider. The default set is `[0, 1, ..., 4]`
include_unknown_set: bool, default True
If true, the index of all types not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
A one-hot vector of the number of hydrogens which an atom has.
If `include_unknown_set` is False, the length is `len(allowable_set)`.
If `include_unknown_set` is True, the length is `len(allowable_set) + 1`.
"""
return one_hot_encode(atom.GetTotalNumHs(), allowable_set,
include_unknown_set)
def get_atom_chirality_one_hot(atom: RDKitAtom) -> List[float]:
"""Get an one-hot feature about an atom chirality type.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
Returns
-------
List[float]
A one-hot vector of the chirality type. The first element
indicates "R", and the second element indicates "S".
"""
one_hot = [0.0, 0.0]
try:
chiral_type = atom.GetProp('_CIPCode')
if chiral_type == "R":
one_hot[0] = 1.0
elif chiral_type == "S":
one_hot[1] = 1.0
except:
pass
return one_hot
def get_atom_formal_charge(atom: RDKitAtom) -> List[float]:
"""Get a formal charge of an atom.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
Returns
-------
List[float]
A vector of the formal charge.
"""
return [float(atom.GetFormalCharge())]
def get_atom_formal_charge_one_hot(
atom: RDKitAtom,
allowable_set: List[int] = DEFAULT_FORMAL_CHARGE_SET,
include_unknown_set: bool = True) -> List[float]:
"""Get one hot encoding of formal charge of an atom.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
allowable_set: List[int]
The degree to consider. The default set is `[-2, -1, ..., 2]`
include_unknown_set: bool, default True
If true, the index of all types not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
A vector of the formal charge.
"""
return one_hot_encode(atom.GetFormalCharge(), allowable_set,
include_unknown_set)
def get_atom_partial_charge(atom: RDKitAtom) -> List[float]:
"""Get a partial charge of an atom.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
Returns
-------
List[float]
A vector of the parital charge.
Notes
-----
Before using this function, you must calculate `GasteigerCharge`
like `AllChem.ComputeGasteigerCharges(mol)`.
"""
gasteiger_charge = atom.GetProp('_GasteigerCharge')
if gasteiger_charge in ['-nan', 'nan', '-inf', 'inf']:
gasteiger_charge = 0.0
return [float(gasteiger_charge)]
def get_atom_total_degree_one_hot(
atom: RDKitAtom,
allowable_set: List[int] = DEFAULT_TOTAL_DEGREE_SET,
include_unknown_set: bool = True) -> List[float]:
"""Get an one-hot feature of the degree which an atom has.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
allowable_set: List[int]
The degree to consider. The default set is `[0, 1, ..., 5]`
include_unknown_set: bool, default True
If true, the index of all types not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
A one-hot vector of the degree which an atom has.
If `include_unknown_set` is False, the length is `len(allowable_set)`.
If `include_unknown_set` is True, the length is `len(allowable_set) + 1`.
"""
return one_hot_encode(atom.GetTotalDegree(), allowable_set,
include_unknown_set)
def get_atom_implicit_valence_one_hot(
atom: RDKitAtom,
allowable_set: List[int] = DEFAULT_ATOM_IMPLICIT_VALENCE_SET,
include_unknown_set: bool = True) -> List[float]:
"""Get an one-hot feature of implicit valence of an atom.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
allowable_set: List[int]
Atom implicit valence to consider. The default set is `[0, 1, ..., 6]`
include_unknown_set: bool, default True
If true, the index of all types not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
A one-hot vector of implicit valence an atom has.
If `include_unknown_set` is False, the length is `len(allowable_set)`.
If `include_unknown_set` is True, the length is `len(allowable_set) + 1`.
"""
return one_hot_encode(atom.GetImplicitValence(), allowable_set,
include_unknown_set)
def get_atom_explicit_valence_one_hot(
atom: RDKitAtom,
allowable_set: List[int] = DEFAULT_ATOM_EXPLICIT_VALENCE_SET,
include_unknown_set: bool = True) -> List[float]:
"""Get an one-hot feature of explicit valence of an atom.
Parameters
---------
atom: rdkit.Chem.rdchem.Atom
RDKit atom object
allowable_set: List[int]
Atom explicit valence to consider. The default set is `[1, ..., 6]`
include_unknown_set: bool, default True
If true, the index of all types not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
A one-hot vector of explicit valence an atom has.
If `include_unknown_set` is False, the length is `len(allowable_set)`.
If `include_unknown_set` is True, the length is `len(allowable_set) + 1`.
"""
return one_hot_encode(atom.GetExplicitValence(), allowable_set,
include_unknown_set)
#################################################################
# bond (edge) featurization
#################################################################
def get_bond_type_one_hot(bond: RDKitBond,
allowable_set: List[str] = DEFAULT_BOND_TYPE_SET,
include_unknown_set: bool = False) -> List[float]:
"""Get an one-hot feature of bond type.
Parameters
---------
bond: rdkit.Chem.rdchem.Bond
RDKit bond object
allowable_set: List[str]
The bond types to consider. The default set is `["SINGLE", "DOUBLE", "TRIPLE", "AROMATIC"]`.
include_unknown_set: bool, default False
If true, the index of all types not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
A one-hot vector of the bond type.
If `include_unknown_set` is False, the length is `len(allowable_set)`.
If `include_unknown_set` is True, the length is `len(allowable_set) + 1`.
"""
return one_hot_encode(
str(bond.GetBondType()), allowable_set, include_unknown_set)
def get_bond_is_in_same_ring_one_hot(bond: RDKitBond) -> List[float]:
"""Get an one-hot feature about whether atoms of a bond is in the same ring or not.
Parameters
---------
bond: rdkit.Chem.rdchem.Bond
RDKit bond object
Returns
-------
List[float]
A one-hot vector of whether a bond is in the same ring or not.
"""
return [int(bond.IsInRing())]
def get_bond_is_conjugated_one_hot(bond: RDKitBond) -> List[float]:
"""Get an one-hot feature about whether a bond is conjugated or not.
Parameters
---------
bond: rdkit.Chem.rdchem.Bond
RDKit bond object
Returns
-------
List[float]
A one-hot vector of whether a bond is conjugated or not.
"""
return [int(bond.GetIsConjugated())]
def get_bond_stereo_one_hot(bond: RDKitBond,
allowable_set: List[str] = DEFAULT_BOND_STEREO_SET,
include_unknown_set: bool = True) -> List[float]:
"""Get an one-hot feature of the stereo configuration of a bond.
Parameters
---------
bond: rdkit.Chem.rdchem.Bond
RDKit bond object
allowable_set: List[str]
The stereo configuration types to consider.
The default set is `["STEREONONE", "STEREOANY", "STEREOZ", "STEREOE"]`.
include_unknown_set: bool, default True
If true, the index of all types not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
A one-hot vector of the stereo configuration of a bond.
If `include_unknown_set` is False, the length is `len(allowable_set)`.
If `include_unknown_set` is True, the length is `len(allowable_set) + 1`.
"""
return one_hot_encode(
str(bond.GetStereo()), allowable_set, include_unknown_set)
def get_bond_graph_distance_one_hot(
bond: RDKitBond,
graph_dist_matrix: np.ndarray,
allowable_set: List[int] = DEFAULT_GRAPH_DISTANCE_SET,
include_unknown_set: bool = True) -> List[float]:
"""Get an one-hot feature of graph distance.
Parameters
---------
bond: rdkit.Chem.rdchem.Bond
RDKit bond object
graph_dist_matrix: np.ndarray
The return value of `Chem.GetDistanceMatrix(mol)`. The shape is `(num_atoms, num_atoms)`.
allowable_set: List[int]
The graph distance types to consider. The default set is `[1, 2, ..., 7]`.
include_unknown_set: bool, default False
If true, the index of all types not in `allowable_set` is `len(allowable_set)`.
Returns
-------
List[float]
A one-hot vector of the graph distance.
If `include_unknown_set` is False, the length is `len(allowable_set)`.
If `include_unknown_set` is True, the length is `len(allowable_set) + 1`.
"""
graph_dist = graph_dist_matrix[bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()]
return one_hot_encode(graph_dist, allowable_set, include_unknown_set)
|
peastman/deepchem
|
deepchem/utils/molecule_feature_utils.py
|
Python
|
mit
| 16,782
|
[
"RDKit"
] |
d5f95a0cc0e444bb331ef930bed251fa9c821f398a74b4b94f1134e31fab106e
|
import cv2, sqlite3, time, os.path, pandas, math, sys, pickle, logging, picamera
import numpy as np
logger = logging.getLogger('dispenser.recognizer')
logger.setLevel(logging.DEBUG)
# settings
TARGET_PIXEL_AREA = 1000000.0
class Recogizer:
def __init__(self):
# init camera
self.camera = picamera.PiCamera()
# read db
db = sqlite3.connect('example.db')
db.text_factory = str
c = db.cursor()
with open('mysql_create_engine_data_20150511.txt', 'r') as schema:
command = schema.read()
time.sleep(1)
# if the database hasn't been created, create it
if not os.path.isfile('example.db'):
c.execute(command)
# read into database from tab file
with open('pillbox_engine_20150511.tab', 'r') as pill_data:
df = pandas.read_csv(pill_data, delimiter='\t', low_memory=False)
df.to_sql('engine_data', db, if_exists='replace', index=False)
self.df = df
# read in pickle
if not os.path.isfile("pill_df_dict.p"):
self.pill_df_dict = self.process_img_db(df)
pickle.dump(self.pill_df_dict, open("pill_df_dict.p", "wb"))
else:
self.pill_df_dict = pickle.load(open("pill_df_dict.p", "rb"))
def take_picture(self):
filename = 'stage_image.jpg'
self.camera.capture(filename)
return cv2.imread(filename)
def get_confidence(self, pill_name):
compare_image = self.take_picture()
try:
ID_num = self.df[self.df['medicine_name'] == pill_name]['ID'].iloc[0]
except KeyError:
logger.error('Pill input name not found')
compare_df = self.process_image(compare_image)
if compare_df.empty:
raise Exception("CompareDF empty")
self.calc_results(compare_df, self.pill_df_dict)
@staticmethod
def seg_image(img):
'''
returns contours of image using canny edge detection and cv2.findContours
'''
# grayscale and gaussian blur for adaptive thresholding
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (3, 3), 3)
# Edge Detection
edge = cv2.Canny(gray, 0, 15)
# dilate image to close contours
dil_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
dilated = cv2.dilate(edge, dil_kernel, iterations=1)
# find image contours
im2, contours, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
return (contours, hierarchy)
@staticmethod
def extract_features(img, contours, debug=False):
'''
filters through list of contours, only selecting ones that fit potential pill criteria. Then uses them to mask img,
extracting histogram, mean color, and aspect ratio
'''
return_dict = {}
height, width, depth = img.shape
# close all contours
for cnt in contours:
cnt[-1] = cnt[0]
contours = sorted(contours, key=cv2.contourArea, reverse=True)[:5]
for idx, cnt in enumerate(contours):
# re-init mask
mask = np.zeros((height, width), np.uint8)
area = cv2.contourArea(cnt)
# acquire size features of contour using extreme points rectangle
x, y, w, h = cv2.boundingRect(cnt)
aspect_ratio = float(w) / h
# if contour area is too small, discard
if area < 1500:
continue
# # if contour has too few points, discard
if len(cnt) < 5:
continue
# get aspect ratio of contour from bounding rectangle.
if aspect_ratio > 5 or aspect_ratio < 0.1:
continue
# Use convex hull to close the contour
approx = cv2.convexHull(cnt, returnPoints=True)
cnt = approx
# Approx poly to smooth
cv2.fillPoly(mask, pts=[cnt], color=(255, 255, 255))
masked_data = cv2.bitwise_and(img, img, mask=mask)
if debug:
cv2.imshow('debug', masked_data)
# image has been masked, time to collect data
mean_color = cv2.mean(masked_data, mask=mask)
hist = cv2.calcHist([img], [0], mask, [256], [0, 256])
hist = cv2.normalize(hist, hist).flatten()
# Add the data to the dataframe
return_dict[idx] = pandas.Series([aspect_ratio, mean_color, hist],
index=['aspect_ratio', 'mean_color', 'histogram'])
return pandas.DataFrame(return_dict).transpose()
# resize the image while maintaining aspect ratio
@staticmethod
def resize_img(img):
ratio = float(img.shape[1]) / float(img.shape[0])
new_h = int(math.sqrt(TARGET_PIXEL_AREA / ratio) + 0.5)
new_w = int((new_h * ratio) + 0.5)
img = cv2.resize(img, (new_w, new_h))
return img
def process_image(self, img):
# trim the bottom watermark off dumbly
img = img[0:-1 - 13, 0:-1]
# resize the image to a standard area while maintaining aspect ratio
img = self.resize_img(img)
contours, hierarchy = self.seg_image(img)
# create the dataframe holding all views for a single pill product
pill_dataframe = self.extract_features(img, contours)
return pill_dataframe
def process_img_db(self, db_dataframe):
pill_df_dict = {}
# for all pills that have files
for index, row in db_dataframe.iterrows():
# read in image
filename = 'images_test/' + str(row['splimage']) + '.jpg'
img = cv2.imread(filename)
if img is None:
continue
pill_df_dict[row['ID']] = self.process_image(img)
return pill_df_dict
def calc_results(self, ID_num, compare_df, database_df):
for idx, pill in database_df.items():
results = pandas.Series()
# ensure DataFrame exists
if pill.empty:
continue
# initialize comparison parameters to max
hist_corr = sys.maxsize
aspect_difference = sys.maxsize
mean_color_distance = sys.maxsize
# iterate over each pill view in image, selecting one that best fits the comparison image
for index, view in pill.iterrows():
# compare histogram correlation
comp_hist = math.fabs(cv2.compareHist(compare_df.iloc[0]['histogram'], view['histogram'], method=0) - 1)
if comp_hist < hist_corr:
hist_corr = comp_hist
# compare aspect ratio difference
comp_asp = math.fabs(compare_df.iloc[0]['aspect_ratio'] - view['aspect_ratio'])
if comp_asp < aspect_difference:
aspect_difference = comp_asp
# compare mean color distance
comp_mean = np.linalg.norm(
np.array(compare_df.iloc[0]['mean_color'][:3]) - np.array(view['mean_color'][:3]))
if comp_mean < mean_color_distance:
mean_color_distance = comp_mean
# store result as an indexed series
pill_result = pandas.Series(
[hist_corr, aspect_difference, mean_color_distance],
index=['hist_corr', 'aspect_diff', 'mean_color_diff'], dtype=np.float16)
# add result to dataframe
results[idx] = pill_result
# normalize results by row
results_norm = results.div(results.sum(axis=1), axis=0)
# calculate net error for each pill
error_dict = {}
for idx, pill in results_norm.items():
error_dict[idx] = pill.sum(axis=0)
# Sort list by increasing error
conf_list = [x[0] for x in sorted(error_dict.items(), key=lambda x: 1 - x[1], reverse=True)]
# Print confidence of "dispensed" pill as percentile
confidence = 100 * (1.00 - conf_list.index(ID_num) / float(len(conf_list)))
logger.info("Confidence is " + str(confidence) + " for pill ID " + str(ID_num))
return confidence
|
samclane/pill-dispenser
|
pill_recog.py
|
Python
|
mit
| 8,253
|
[
"Gaussian"
] |
74ed4db8b47ea41a53fced5797498e1bd6f07318a058147069f58d468b43305f
|
from __future__ import absolute_import, print_function
from . import common_info
from . import c_spec
#----------------------------------------------------------------------------
# The "standard" conversion classes
#----------------------------------------------------------------------------
default = [c_spec.int_converter(),
c_spec.float_converter(),
c_spec.complex_converter(),
c_spec.unicode_converter(),
c_spec.string_converter(),
c_spec.list_converter(),
c_spec.dict_converter(),
c_spec.tuple_converter(),
c_spec.file_converter(),
c_spec.instance_converter(),]
#----------------------------------------------------------------------------
# add numpy array converters to the default
# converter list.
#----------------------------------------------------------------------------
try:
from . import standard_array_spec
default.append(standard_array_spec.array_converter())
except ImportError:
pass
#----------------------------------------------------------------------------
# add numpy scalar converters to the default
# converter list.
#----------------------------------------------------------------------------
try:
from . import numpy_scalar_spec
default.append(numpy_scalar_spec.numpy_complex_scalar_converter())
except ImportError:
pass
#----------------------------------------------------------------------------
# Add VTK support
#----------------------------------------------------------------------------
try:
from . import vtk_spec
default.insert(0,vtk_spec.vtk_converter())
except IndexError:
pass
#----------------------------------------------------------------------------
# Add "sentinal" catchall converter
#
# if everything else fails, this one is the last hope (it always works)
#----------------------------------------------------------------------------
default.append(c_spec.catchall_converter())
standard_info = [common_info.basic_module_info()]
standard_info += [x.generate_build_info() for x in default]
#----------------------------------------------------------------------------
# Blitz conversion classes
#
# same as default, but will convert numpy arrays to blitz C++ classes
#----------------------------------------------------------------------------
try:
from . import blitz_spec
blitz = [blitz_spec.array_converter()] + default
#-----------------------------------
# Add "sentinal" catchall converter
#
# if everything else fails, this one
# is the last hope (it always works)
#-----------------------------------
blitz.append(c_spec.catchall_converter())
except:
pass
|
valexandersaulys/prudential_insurance_kaggle
|
venv/lib/python2.7/site-packages/scipy/weave/converters.py
|
Python
|
gpl-2.0
| 2,703
|
[
"VTK"
] |
5456e2af18a9dba21ffa9242e32551f78925772944c943d15e2f2f6f24fa1f16
|
"""
Copyright (C) 2007-2010 Martin Laprise (mlaprise@gmail.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 dated June, 1991.
This software is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANDABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software Foundation,
Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from numpy import *
from scipy import *
from scipy import integrate
from scipy import stats
from core import *
# Planck constant [J*s]
h = 6.626069E-34
# Speed of light in vacuum [m/s]
c = 2.998E8
# boltzmann constant [J/K]
k = 1.3807E-23
# Ambiant temp. in Kelvin
T = 294.2
# Yb-doped Glass transition energy
epsYb = (h*c)/(10000*100)
class linearCavity():
'''
Class representing a generic linear Rare-Earth doped fiber laser
* fiber: Doped fiber
* pumpWL: List of pump wavelength [wl1 forward, wl1 backward, wl2 forward ...]
* pumpPower: List of pump power [pw1 forward, pw1 backward, pw2 forward ...]
* nbrSections: Number of longitudinal section
* aseRes: Resolution of the ASE spectrum
'''
def __init__(self, fiber, pumpWL, pumpPower, fbg1, fbg2, nbrSections = 100, aseRes = 100):
self.dopedFiber = fiber
if isinstance(fiber, YbDopedFiber or YbDopedDCOF) or (isinstance(fiber, ErDopedFiber) and pumpWL > 1.450):
self.nbrSignal = len(pumpWL)
self.nbrPump = 0
self.pumpWL = 0.980
self.signalWL = pumpWL
elif isinstance(fiber, ErDopedFiber):
self.nbrSignal = 0
self.nbrPump = len(pumpWL)
self.pumpWL = pumpWL
self.signalWL = 1.55
else:
raise TypeError
self.nbrAse = aseRes
self.aseWL = linspace(self.dopedFiber.wlMin, self.dopedFiber.wlMax, aseRes)
self.aseDeltaLambda = self.aseWL[1] - self.aseWL[0]
self.delta_nu = abs(-c/(pow(self.aseWL*1E-6,2))*self.aseDeltaLambda*1e-6)
self.fbg1 = fbg1
self.fbg2 = fbg2
#self.R1 = fbg1.reflectivity(self.aseWL)
#self.R2 = fbg2.reflectivity(self.aseWL)
# Construct the reflectivity curve with the different FBGs
self.R1 = zeros(aseRes) + 0.04
if isinstance(fbg1, list):
for fbg in fbg1:
wlDiff1 = abs(self.aseWL-fbg.braggWavelength)
self.peakR1 = where(wlDiff1==wlDiff1.min())
self.R1[self.peakR1] = fbg.maxReflectivity()
else:
wlDiff1 = abs(self.aseWL-fbg1.braggWavelength)
self.peakR1 = where(wlDiff1==wlDiff1.min())
self.R1[self.peakR1] = fbg1.maxReflectivity()
self.R2 = zeros(aseRes) + 0.04
if isinstance(fbg2, list):
self.peakR2 = self.peakR1
for fbg in fbg2:
wlDiff2 = abs(self.aseWL-fbg.braggWavelength)
self.peakR2 = where(wlDiff2==wlDiff2.min())
self.R2[self.peakR2] = fbg.maxReflectivity()
else:
wlDiff2 = abs(self.aseWL-fbg2.braggWavelength)
self.peakR2 = where(wlDiff2==wlDiff2.min())
self.R2[self.peakR2] = fbg2.maxReflectivity()
[self.sigma_em_p, self.sigma_abs_p] = fiber.crossSection(self.pumpWL)
[self.sigma_em_s, self.sigma_abs_s] = fiber.crossSection(self.signalWL)
[self.sigma_em_ase, self.sigma_abs_ase] = fiber.crossSection(self.aseWL)
self.alpha_s = fiber.bgLoss(self.signalWL)
self.alpha_p = fiber.bgLoss(self.pumpWL)
self.alpha_ase = fiber.bgLoss(self.aseWL)
self.nbrSections = nbrSections
self.z = linspace(0,fiber.length,nbrSections)
self.dz = self.dopedFiber.length / nbrSections
self.P_ase_f = zeros([self.nbrAse, nbrSections])
self.P_ase_b = zeros([self.nbrAse, nbrSections])
self.P_p_f = zeros([self.nbrPump, nbrSections])
self.P_p_b = zeros([self.nbrPump, nbrSections])
self.P_s_f = zeros([self.nbrSignal, nbrSections])
self.P_s_b = zeros([self.nbrSignal, nbrSections])
self.output = zeros(self.nbrAse)
self.N2 = zeros(nbrSections)
self.N1 = zeros(nbrSections)
# Initiale Conditions
self.P_ase_f[:,0] = 0.0
self.P_ase_b[:,-1] = 0.0
if isinstance(fiber, YbDopedFiber or YbDopedDCOF) or (isinstance(fiber, ErDopedFiber) and pumpWL > 1.450):
self.P_s_f[:,0] = pumpPower[0:self.nbrSignal]
self.P_s_b[:,-1] = pumpPower[self.nbrSignal:2*self.nbrSignal]
elif isinstance(fiber, ErDopedFiber):
self.P_p_f[:,0] = pumpPower[0:self.nbrPump]
self.P_p_b[:,-1] = pumpPower[self.nbrPump:2*self.nbrPump]
else:
raise TypeError
self.error = 1.0
def info(self):
print
def __repr__(self):
return "Generic linear Rare-Earth doped fiber laser"
def set_init_pumpPower(self, pumpPower):
'''
Set the initial condition for the pumpPower for each pump
'''
self.P_p_f[:,0] = pumpPower
def get_pumpPower(self):
'''
Get the pump power
'''
return [self.P_p_f, self.P_p_b]
def get_outputPower(self,units='linear'):
integral = integrate.simps(self.output)
outputPower = {
'linear': lambda: integral,
'dBm': lambda:10*log10(integral),
}[units]()
return outputPower
def get_signalPower(self):
'''
Get the signal power
'''
return [self.P_s_f, self.P_s_b]
def get_asePower(self):
'''
Get the ase power
'''
return [self.P_ase_f, self.P_ase_b]
def get_outputSpectrum(self, units='linear'):
'''
Integrate the ASE signal an return the spectrum in both direction
'''
[ase_f, ase_b] = {
'linear': lambda: [self.P_ase_f[:,-1]*(1-self.R2), self.P_ase_b[:,0]*(1-self.R1)],
'dBm': lambda:[10*log10(self.P_ase_f[:,-1]*(1-self.R2)), 10*log10(self.P_ase_b[:,0]*(1-self.R1))],
}[units]()
return [ase_f, ase_b]
def set_init_signalPower(self, signalPower):
'''
Set the initial condition for the pumpPower for each signal
'''
self.P_s_f[:,0] = signalPower
def set_init_asePower(self, asePower):
'''
Set the initial condition for the pumpPower for each ASE signal
'''
self.P_ase_f[:,0] = asePower
def set_wl_range(self, minWL, maxWL):
'''
Set the wavelength range of the simulation
'''
self.aseWL = linspace(minWL, maxWL, self.nbrAse)
self.aseDeltaLambda = self.aseWL[1] - self.aseWL[0]
self.R1 = self.fbg1.reflectivity(self.aseWL)
self.R2 = self.fbg2.reflectivity(self.aseWL)
def invSptProfil(self):
'''
Compute the population inversion spatial profil
'''
N2 = zeros(self.nbrSections)
N1 = zeros(self.nbrSections)
pWL = self.pumpWL
sWL = self.signalWL
aseWL = self.aseWL
# Construct the transition rate factor 1->3
W13 = zeros(self.nbrSections)
for m in arange(self.nbrPump):
W13 += (self.sigma_abs_p[m] * (self.P_p_f[m,:]/(self.dopedFiber.width(pWL[m])*1E-12))) / (h*c/(pWL[m]*1E-6))
for v in arange(self.nbrPump):
W13 += (self.sigma_abs_p[v] * (self.P_p_b[v,:]/(self.dopedFiber.width(pWL[v])*1E-12))) / (h*c/(pWL[v]*1E-6))
# Construct the transition rate factor 2->1
W21 = 0.0
for l in arange(self.nbrSignal):
W21 += (self.sigma_em_s[l] * (self.P_s_f[l,:]/(self.dopedFiber.width(sWL[l])*1E-12))) / (h*c/(sWL[l]*1E-6))
for u in arange(self.nbrSignal):
W21 += (self.sigma_em_s[u] * (self.P_s_b[u,:]/(self.dopedFiber.width(sWL[u])*1E-12))) / (h*c/(sWL[u]*1E-6))
for n in arange(self.nbrAse):
W21 += (self.sigma_em_ase[n] * (self.P_ase_f[n,:]/(self.dopedFiber.width(aseWL[n])*1E-12))) / (h*c/(aseWL[n]*1E-6))
for v in arange(self.nbrAse):
W21 += (self.sigma_em_ase[v] * (self.P_ase_b[v,:]/(self.dopedFiber.width(aseWL[v])*1E-12))) / (h*c/(aseWL[v]*1E-6))
# Construct the transition rate factor 1->2
W12 = 0.0
for l in arange(self.nbrSignal):
W12 += (self.sigma_abs_s[l] * (self.P_s_f[l,:]/(self.dopedFiber.width(sWL[l])*1E-12))) / (h*c/(sWL[l]*1E-6))
for u in arange(self.nbrSignal):
W12 += (self.sigma_abs_s[u] * (self.P_s_b[u,:]/(self.dopedFiber.width(sWL[u])*1E-12))) / (h*c/(sWL[u]*1E-6))
for n in arange(self.nbrAse):
W12 += (self.sigma_abs_ase[n] * (self.P_ase_f[n,:]/(self.dopedFiber.width(aseWL[n])*1E-12))) / (h*c/(aseWL[n]*1E-6))
for v in arange(self.nbrAse):
W12 += (self.sigma_abs_ase[v] * (self.P_ase_b[v,:]/(self.dopedFiber.width(aseWL[v])*1E-12))) / (h*c/(aseWL[v]*1E-6))
# Compute the level population
N2 = self.dopedFiber.concDopant * ( (W13 + W12) / ((1/self.dopedFiber.tau) + W21 + W12 + W13) )
N1 = self.dopedFiber.concDopant - N2
self.N1 = N1
self.N2 = N2
return [N2, N1]
def simulate(self, direction=1, backwardOutput=False):
def dPdz(w, z, sigma_abs_p, sigma_em_s, sigma_abs_s,
sigma_abs_ase, sigma_em_ase, Fiber, pWL, sWL, aseWL,
alpha_s, alpha_p, alpha_ase, delta_nu):
'''
RHS of the ODE systems
'''
P_s_f = w[0:self.nbrSignal]
P_s_b = w[self.nbrSignal:2*self.nbrSignal]
P_p_f = w[2*self.nbrSignal:2*self.nbrSignal+self.nbrPump]
P_p_b = w[2*self.nbrSignal+self.nbrPump:2*self.nbrSignal+2*self.nbrPump]
P_ase_f = w[2*self.nbrSignal+2*self.nbrPump:2*self.nbrSignal+2*self.nbrPump+self.nbrAse]
P_ase_b = w[2*self.nbrSignal+2*self.nbrPump+self.nbrAse:2*self.nbrSignal+2*self.nbrPump+2*self.nbrAse]
# Construct the transition rate factor 1->3
W13 = 0.0
for m in arange(self.nbrPump):
W13 += (sigma_abs_p[m] * (P_p_f[m]/(Fiber.width(pWL[m])*1E-12))) / (h*c/(pWL[m]*1E-6))
for v in arange(self.nbrPump):
W13 += (sigma_abs_p[v] * (P_p_b[v]/(Fiber.width(pWL[v])*1E-12))) / (h*c/(pWL[v]*1E-6))
# Construct the transition rate factor 2->1
W21 = 0.0
for l in arange(self.nbrSignal):
W21 += (sigma_em_s[l] * (P_s_f[l]/(Fiber.width(sWL[l])*1E-12))) / (h*c/(sWL[l]*1E-6))
for u in arange(self.nbrSignal):
W21 += (sigma_em_s[u] * (P_s_b[u]/(Fiber.width(sWL[u])*1E-12))) / (h*c/(sWL[u]*1E-6))
for n in arange(self.nbrAse):
W21 += (sigma_em_ase[n] * (P_ase_f[n]/(Fiber.width(aseWL[n])*1E-12))) / (h*c/(aseWL[n]*1E-6))
for v in arange(self.nbrAse):
W21 += (sigma_em_ase[v] * (P_ase_b[v]/(Fiber.width(aseWL[v])*1E-12))) / (h*c/(aseWL[v]*1E-6))
# Construct the transition rate factor 1->2
W12 = 0.0
for l in arange(self.nbrSignal):
W12 += (sigma_abs_s[l] * (P_s_f[l]/(Fiber.width(sWL[l])*1E-12))) / (h*c/(sWL[l]*1E-6))
for u in arange(self.nbrSignal):
W12 += (sigma_abs_s[u] * (P_s_b[u]/(Fiber.width(sWL[u])*1E-12))) / (h*c/(sWL[u]*1E-6))
for n in arange(self.nbrAse):
W12 += (sigma_abs_ase[n] * (P_ase_f[n]/(Fiber.width(aseWL[n])*1E-12))) / (h*c/(aseWL[n]*1E-6))
for v in arange(self.nbrAse):
W12 += (sigma_abs_ase[v] * (P_ase_b[v]/(Fiber.width(aseWL[v])*1E-12))) / (h*c/(aseWL[v]*1E-6))
# Compute the level population
N2 = Fiber.concDopant * ( (W13 + W12) / ((1/Fiber.tau) + W21 + W12 + W13) )
N1 = Fiber.concDopant - N2
P = zeros(2*self.nbrSignal+2*self.nbrPump+2*self.nbrAse)
i = 0
# Signal Power
for l in arange(self.nbrSignal):
P[i] = sign(direction)*(sigma_em_s[l]*N2 - sigma_abs_s[l]*N1 - alpha_s) * P_s_f[l] * Fiber.pumpOverlap(sWL[l])
i += 1
for u in arange(self.nbrSignal):
P[i] = -sign(direction)*(sigma_em_s[u]*N2 - sigma_abs_s[u]*N1 - alpha_s) * P_s_b[u] * Fiber.pumpOverlap(sWL[u])
i += 1
# Pump Power
for m in arange(self.nbrPump):
P[i] = sign(direction)*(-sigma_abs_p[m]*N1 - alpha_p) * P_p_f[m] * Fiber.pumpOverlap(pWL[m])
i += 1
for v in arange(self.nbrPump):
P[i] = -sign(direction)*(-sigma_abs_p[v]*N1 - alpha_p) * P_p_b[v] * Fiber.pumpOverlap(pWL[v])
i += 1
# ASE Power
for n in arange(self.nbrAse):
P[i] = sign(direction)*(sigma_em_ase[n]*N2 - sigma_abs_ase[n]*N1 - alpha_ase) * P_ase_f[n] * Fiber.modeOverlap(aseWL[n])
P[i] += sign(direction)*2*(h*c/(aseWL[n]*1E-6)) * delta_nu[n] * sigma_em_ase[n]*N2 * Fiber.modeOverlap(aseWL[n])
i += 1
for v in arange(self.nbrAse):
P[i] = -sign(direction)*(sigma_em_ase[v]*N2 - sigma_abs_ase[v]*N1 - alpha_ase) * P_ase_b[v] * Fiber.modeOverlap(aseWL[v])
P[i] += -sign(direction)*2*(h*c/(aseWL[v]*1E-6)) * delta_nu[v] * sigma_em_ase[v]*N2 * Fiber.modeOverlap(aseWL[v])
i += 1
return P
arguments = (self.sigma_abs_p, self.sigma_em_s, self.sigma_abs_s,
self.sigma_abs_ase, self.sigma_em_ase, self.dopedFiber,
self.pumpWL, self.signalWL, self.aseWL,
self.alpha_s, self.alpha_p, self.alpha_ase, self.delta_nu)
# Set the initials conditions and resolve the ode system
if sign(direction) == 1:
w0 = r_[self.P_s_f[:,0],self.P_s_b[:,0],self.P_p_f[:,0],self.P_p_b[:,0],self.P_ase_b[:,0]*self.R1,self.P_ase_b[:,0]]
else:
w0 = r_[self.P_s_f[:,-1],self.P_s_b[:,-1],self.P_p_f[:,-1],self.P_p_b[:,-1],self.P_ase_f[:,-1],self.P_ase_b[:,-1]]
solution = integrate.odeint(dPdz, w0, self.z, args=arguments)
self.P_s_f = solution[:,0:self.nbrSignal].T
self.P_p_f = solution[:,2*self.nbrSignal:2*self.nbrSignal+self.nbrPump].T
self.P_ase_f = solution[:,2*self.nbrSignal+2*self.nbrPump:2*self.nbrSignal+2*self.nbrPump+self.nbrAse].T
def simulateBackward(self, direction=1):
'''
Propagate the signal in backward direction using the population
found in the previous forward iteration. Since N2 and N1 are constant
we can solve each equations with a simple integration
'''
# Get the initiale conditions
Pp_ini = self.P_p_b[:,-1]
Ps_ini = self.P_s_b[:,-1]
Pase_ini = self.P_ase_f[:,-1]*self.R2
self.invSptProfil()
for m in arange(self.nbrPump):
integrant = sign(direction)*(-self.sigma_abs_p[m]*self.N1[::-1] - self.alpha_p) * self.dopedFiber.pumpOverlap(self.pumpWL[m])
self.P_p_b[m,::-1] = r_[Pp_ini[m], Pp_ini[m]*exp(integrate.cumtrapz(integrant, self.z))]
for l in arange(self.nbrSignal):
integrant = sign(direction)*(self.sigma_em_s[l]*self.N2[::-1] - self.sigma_abs_s[l]*self.N1[::-1] - self.alpha_s)
integrant *= self.dopedFiber.pumpOverlap(self.signalWL[l])
self.P_s_b[l,::-1] = r_[Ps_ini[l], Ps_ini[l]*exp(integrate.cumtrapz(integrant, self.z))]
for v in arange(self.nbrAse):
integrant = sign(direction)*(self.sigma_em_ase[v]*self.N2[::-1] - self.sigma_abs_ase[v]*self.N1[::-1] - self.alpha_ase)
integrant *= self.dopedFiber.modeOverlap(self.aseWL[v])
integrant2 = sign(direction)*2*(h*c/(self.aseWL[v]*1E-6)) * self.delta_nu[v] * self.sigma_em_ase[v]*self.N2[::-1]
integrant2 *= self.dopedFiber.modeOverlap(self.aseWL[v])
sol = integrate.cumtrapz(integrant, self.z)
solTerme1 = exp(sol)
solTerme1b = r_[1.0, exp(-sol)]
solTerme2 = solTerme1 * integrate.cumtrapz(integrant2*solTerme1b, self.z)
self.P_ase_b[v,::-1] = r_[Pase_ini[v], Pase_ini[v]*solTerme1 + solTerme2]
def run(self, errorTol, nbrItrMax, avgVar = 5, errorOutput = False, verbose = False):
'''
Compute the laser output by solving the ode systems with a relaxation method
'''
i = 0
normVar = 1.0 + errorTol
if nbrItrMax < avgVar:
nbrItrMax = avgVar
outputError = zeros(nbrItrMax)
self.simulate()
for itr in arange(avgVar):
self.simulateBackward()
self.simulate()
previousOutput = self.output
self.output = self.P_ase_f[:,-1]*(1-self.R2)
outputError[i] = chi2(previousOutput, self.output)
if verbose:
print outputError[i]
i += 1
while (i < nbrItrMax):
self.simulateBackward()
self.simulate()
previousOutput = self.output
self.output = self.P_ase_f[:,-1]*(1-self.R2)
outputError[i] = chi2(previousOutput, self.output)
if verbose:
print outputError[i]
i += 1
if errorOutput:
return outputError
|
mlaprise/PyOFTK
|
PyOFTK/cavity.py
|
Python
|
gpl-2.0
| 15,373
|
[
"ASE"
] |
3a6e59c2bd28ab478761086c03dc196cf661e44dcc40db3e2fca0aad90b8af13
|
#-------------------------------------------------------------------------------
# rbtlib: root.py
#
# Get the Root List Resource from a Review Board instance.
#-------------------------------------------------------------------------------
# The MIT License (MIT)
# Copyright (c) 2016 Brian Minard
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#-------------------------------------------------------------------------------
import collections
from resource import ResourceFactory
class Root(ResourceFactory):
"""The Root List Resource for the Review Board instance.
A helper class, requiring the fully-qualified domain name and URI scheme
used by the Review Board instance to query. The caller needn't specify the
entire URL to the Root List Resource.
Other Web API resources should rely upon the parent class.
Attributes:
session: the HTTP session.
url: the URL defining the resource location.
name: the resource name.
"""
name = 'root'
def __init__(self, session, url):
"""Construct a Root List Resource."""
super(Root, self).__init__(session, self.name, url + '/api/', 'GET')
|
bminard/rbtlib
|
rbtlib/root.py
|
Python
|
mit
| 2,173
|
[
"Brian"
] |
d97fa5c2e707e2eb71f21e8258fb07729fcd84edcdbc2ce131bbc76f1b17f090
|
#
# plotting.py
#
# Some useful, not very generic plotting functions
#
# Copyright (C) 2012 Lukas Solanka <l.solanka@sms.ed.ac.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from matplotlib.pyplot import *
from brian import *
from analysis.signal import butterHighPass
small_plot_figsize = (3.75, 2.75)
small_plot_axsize = [0.3, 0.15, 0.65, 0.80]
small_plot_fontsize = 16
small_plot_texsize = 25
raster_bin_size = 2e-3
rcParams['font.size'] = small_plot_fontsize
def set_axis_params(ax):
ax.tick_params(direction='out', length=6, zorder=0)
ax.tick_params(bottom=True, top=False, left=True, right=False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.margins(0.05, tight=False)
def phaseFigTemplate():
f = figure(figsize=small_plot_figsize)
axes(small_plot_axsize)
set_axis_params(gca())
xlim([-np.pi, np.pi])
xticks([-np.pi, 0, np.pi], ('$-\pi$', '', '$\pi$'), fontsize=25)
return f
def rasterPhasePlot(phases, trials, ntrials):
f = phaseFigTemplate()
plot(phases, trials, 'k|', markeredgewidth=3)
ylabel('Trial')
ylim([-1, ntrials+1])
yticks([0, ntrials])
return f
def firingRateBarPlot(stim_freq_list, F_mean_vec, F_std_vec):
f= figure(figsize=(2.5, 4))
ax = axes(small_plot_axsize)
bar(range(len(stim_freq_list)), F_mean_vec, color='k',
yerr=F_std_vec, ecolor='k', align='center', width=0.8)
xticks(range(len(stim_freq_list)), stim_freq_list)
gca().tick_params(bottom=True, top=False, left=True, right=False)
gca().spines['top'].set_visible(False)
gca().spines['right'].set_visible(False)
xlabel('Stim. freq. (Hz)')
ylabel('F. rate (Hz)')
ylim([0, max(F_std_vec+F_mean_vec)+10])
return f
def printAndSaveTraces(spikeMon_e, spikeMon_i, stateMon_e, stateMon_i,
stateMon_Iclamp_e, stateMon_Iclamp_i, stateMon_Iext_e, stateMon_Iext_i,
options, output_fname, x_lim):
figure()
ax = subplot(211)
plot(stateMon_e.times, stateMon_e.values[0:2].T/mV)
ylabel('E membrane potential (mV)')
subplot(212, sharex=ax)
plot(stateMon_i.times, stateMon_i.values[0:2].T/mV)
xlabel('Time (s)')
ylabel('I membrane potential (mV)')
xlim(x_lim)
savefig(output_fname + '_Vm.pdf')
figure()
ax = subplot(211)
plot(stateMon_Iclamp_e.times, stateMon_Iclamp_e.values[0:2].T/pA + \
stateMon_Iext_e.values[0:2].T/pA)
ylabel('E synaptic current (pA)')
subplot(212, sharex=ax)
plot(stateMon_Iclamp_i.times, stateMon_Iclamp_i.values[0:2].T/pA + \
stateMon_Iext_i.values[0:2].T/pA)
xlabel('Time (s)')
ylabel('I synaptic current (pA)')
xlim(x_lim)
savefig(output_fname + '_Isyn.pdf')
# High pass filter these signals
figure()
ax = subplot(211)
plot(stateMon_Iclamp_e.times, butterHighPass(stateMon_Iclamp_e.values[0].T/pA, options.sim_dt, 40))
#plot(stateMon_Iclamp_e.times, stateMon_Iext_e.values[0]/pA)
ylabel('E current (pA)')
ylim([-500, 500])
subplot(212, sharex=ax)
plot(stateMon_Iclamp_i.times, butterHighPass(stateMon_Iclamp_i.values[0].T/pA, options.sim_dt, 40))
#plot(stateMon_Iclamp_i.times, stateMon_Iext_i.values[0]/pA)
xlabel('Time (s)')
ylabel('I current (pA)')
xlim(x_lim)
ylim([-500, 500])
savefig(output_fname + '_Isyn_filt.pdf')
def printFiringRatesBar(Favg_e, Favg_i, mean_e, mean_i, output_fname):
f = figure()
subplot(121)
h = hist(Favg_e, 20)
xlabel('E f. rate (Hz)')
ylabel('Count')
title('Average: ' + str(mean_e) + ' Hz')
subplot(122)
hist(Favg_i, 20)
xlabel('I f. rate (Hz)')
title('Average: ' + str(mean_i) + ' Hz')
savefig(output_fname + '_Fhist.pdf')
|
MattNolanLab/ei-attractor
|
grid_cell_model/otherpkg/plotting.py
|
Python
|
gpl-3.0
| 4,458
|
[
"Brian"
] |
d773b9e597290d54970293c14ac775543cf2cd9078dc517c3b14a4a88fc58277
|
# Zulip's OpenAPI-based API documentation system is documented at
# https://zulip.readthedocs.io/en/latest/documentation/api.html
#
# This file defines the Python code examples that appears in Zulip's
# REST API documentation, and also contains a system for running the
# example code as part of the `tools/test-api` test suite.
#
# The actual documentation appears within these blocks:
# # {code_example|start}
# Code here
# # {code_example|end}
#
# Whereas the surrounding code is test setup logic.
import json
import os
import sys
from functools import wraps
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, TypeVar, cast
from zulip import Client
from zerver.lib import mdiff
from zerver.models import get_realm, get_user
from zerver.openapi.openapi import validate_against_openapi_schema
ZULIP_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
TEST_FUNCTIONS: Dict[str, Callable[..., object]] = {}
REGISTERED_TEST_FUNCTIONS: Set[str] = set()
CALLED_TEST_FUNCTIONS: Set[str] = set()
FuncT = TypeVar("FuncT", bound=Callable[..., object])
def openapi_test_function(endpoint: str) -> Callable[[FuncT], FuncT]:
"""This decorator is used to register an OpenAPI test function with
its endpoint. Example usage:
@openapi_test_function("/messages/render:post")
def ...
"""
def wrapper(test_func: FuncT) -> FuncT:
@wraps(test_func)
def _record_calls_wrapper(*args: object, **kwargs: object) -> object:
CALLED_TEST_FUNCTIONS.add(test_func.__name__)
return test_func(*args, **kwargs)
REGISTERED_TEST_FUNCTIONS.add(test_func.__name__)
TEST_FUNCTIONS[endpoint] = _record_calls_wrapper
return cast(FuncT, _record_calls_wrapper) # https://github.com/python/mypy/issues/1927
return wrapper
def ensure_users(ids_list: List[int], user_names: List[str]) -> None:
# Ensure that the list of user ids (ids_list)
# matches the users we want to refer to (user_names).
realm = get_realm("zulip")
user_ids = [get_user(name + '@zulip.com', realm).id for name in user_names]
assert ids_list == user_ids
@openapi_test_function("/users/me/subscriptions:post")
def add_subscriptions(client: Client) -> None:
# {code_example|start}
# Subscribe to the stream "new stream"
result = client.add_subscriptions(
streams=[
{
'name': 'new stream',
'description': 'New stream for testing',
},
],
)
# {code_example|end}
validate_against_openapi_schema(result, '/users/me/subscriptions', 'post',
'200_0')
# {code_example|start}
# To subscribe other users to a stream, you may pass
# the `principals` argument, like so:
user_id = 25
result = client.add_subscriptions(
streams=[
{'name': 'new stream', 'description': 'New stream for testing'},
],
principals=[user_id],
)
# {code_example|end}
assert result['result'] == 'success'
assert 'newbie@zulip.com' in result['subscribed']
def test_add_subscriptions_already_subscribed(client: Client) -> None:
result = client.add_subscriptions(
streams=[
{'name': 'new stream', 'description': 'New stream for testing'},
],
principals=['newbie@zulip.com'],
)
validate_against_openapi_schema(result, '/users/me/subscriptions', 'post',
'200_1')
def test_authorization_errors_fatal(client: Client, nonadmin_client: Client) -> None:
client.add_subscriptions(
streams=[
{'name': 'private_stream'},
],
)
stream_id = client.get_stream_id('private_stream')['stream_id']
client.call_endpoint(
f'streams/{stream_id}',
method='PATCH',
request={'is_private': True},
)
result = nonadmin_client.add_subscriptions(
streams=[
{'name': 'private_stream'},
],
authorization_errors_fatal=False,
)
validate_against_openapi_schema(result, '/users/me/subscriptions', 'post',
'400_0')
result = nonadmin_client.add_subscriptions(
streams=[
{'name': 'private_stream'},
],
authorization_errors_fatal=True,
)
validate_against_openapi_schema(result, '/users/me/subscriptions', 'post',
'400_1')
@openapi_test_function("/users/{email}/presence:get")
def get_user_presence(client: Client) -> None:
# {code_example|start}
# Get presence information for "iago@zulip.com"
result = client.get_user_presence('iago@zulip.com')
# {code_example|end}
validate_against_openapi_schema(result, '/users/{email}/presence', 'get', '200')
@openapi_test_function("/users/me/presence:post")
def update_presence(client: Client) -> None:
request = {
'status': 'active',
'ping_only': False,
'new_user_input': False,
}
result = client.update_presence(request)
assert result['result'] == 'success'
@openapi_test_function("/users:post")
def create_user(client: Client) -> None:
# {code_example|start}
# Create a user
request = {
'email': 'newbie@zulip.com',
'password': 'temp',
'full_name': 'New User',
}
result = client.create_user(request)
# {code_example|end}
validate_against_openapi_schema(result, '/users', 'post', '200')
# Test "Email already used error"
result = client.create_user(request)
validate_against_openapi_schema(result, '/users', 'post', '400')
@openapi_test_function("/users:get")
def get_members(client: Client) -> None:
# {code_example|start}
# Get all users in the realm
result = client.get_members()
# {code_example|end}
validate_against_openapi_schema(result, '/users', 'get', '200')
members = [m for m in result['members'] if m['email'] == 'newbie@zulip.com']
assert len(members) == 1
newbie = members[0]
assert not newbie['is_admin']
assert newbie['full_name'] == 'New User'
# {code_example|start}
# You may pass the `client_gravatar` query parameter as follows:
result = client.get_members({'client_gravatar': True})
# {code_example|end}
validate_against_openapi_schema(result, '/users', 'get', '200')
assert result['members'][0]['avatar_url'] is None
# {code_example|start}
# You may pass the `include_custom_profile_fields` query parameter as follows:
result = client.get_members({'include_custom_profile_fields': True})
# {code_example|end}
validate_against_openapi_schema(result, '/users', 'get', '200')
for member in result['members']:
if member["is_bot"]:
assert member.get('profile_data', None) is None
else:
assert member.get('profile_data', None) is not None
@openapi_test_function("/users/{user_id}:get")
def get_single_user(client: Client) -> None:
# {code_example|start}
# Fetch details on a user given a user ID
user_id = 8
result = client.get_user_by_id(user_id)
# {code_example|end}
validate_against_openapi_schema(result, '/users/{user_id}', 'get', '200')
# {code_example|start}
# If you'd like data on custom profile fields, you can request them as follows:
result = client.get_user_by_id(user_id, include_custom_profile_fields=True)
# {code_example|end}
validate_against_openapi_schema(result, '/users/{user_id}', 'get', '200')
@openapi_test_function("/users/{user_id}:delete")
def deactivate_user(client: Client) -> None:
# {code_example|start}
# Deactivate a user
user_id = 8
result = client.deactivate_user_by_id(user_id)
# {code_example|end}
validate_against_openapi_schema(result, '/users/{user_id}', 'delete', '200')
@openapi_test_function("/users/{user_id}/reactivate:post")
def reactivate_user(client: Client) -> None:
# {code_example|start}
# Reactivate a user
user_id = 8
result = client.reactivate_user_by_id(user_id)
# {code_example|end}
validate_against_openapi_schema(result, '/users/{user_id}/reactivate', 'post', '200')
@openapi_test_function("/users/{user_id}:patch")
def update_user(client: Client) -> None:
# {code_example|start}
# Change a user's full name.
user_id = 10
result = client.update_user_by_id(user_id, full_name = "New Name")
# {code_example|end}
validate_against_openapi_schema(result, '/users/{user_id}', 'patch', '200')
# {code_example|start}
# Change value of the custom profile field with ID 9.
user_id = 8
result = client.update_user_by_id(user_id, profile_data = [{'id': 9, 'value': 'some data'}])
# {code_example|end}
validate_against_openapi_schema(result, '/users/{user_id}', 'patch', '400')
@openapi_test_function("/users/{user_id}/subscriptions/{stream_id}:get")
def get_subscription_status(client: Client) -> None:
# {code_example|start}
# Check whether a user is a subscriber to a given stream.
user_id = 7
stream_id = 1
result = client.call_endpoint(
url=f'/users/{user_id}/subscriptions/{stream_id}',
method='GET',
)
# {code_example|end}
validate_against_openapi_schema(result, '/users/{user_id}/subscriptions/{stream_id}', 'get', '200')
@openapi_test_function("/realm/filters:get")
def get_realm_filters(client: Client) -> None:
# {code_example|start}
# Fetch all the filters in this organization
result = client.get_realm_filters()
# {code_example|end}
validate_against_openapi_schema(result, '/realm/filters', 'get', '200')
@openapi_test_function("/realm/profile_fields:get")
def get_realm_profile_fields(client: Client) -> None:
# {code_example|start}
# Fetch all the custom profile fields in the user's organization.
result = client.call_endpoint(
url='/realm/profile_fields',
method='GET',
)
# {code_example|end}
validate_against_openapi_schema(result, '/realm/profile_fields', 'get', '200')
@openapi_test_function("/realm/profile_fields:patch")
def reorder_realm_profile_fields(client: Client) -> None:
# {code_example|start}
# Reorder the custom profile fields in the user's organization.
order = [8, 7, 6, 5, 4, 3, 2, 1]
request = {
'order': json.dumps(order)
}
result = client.call_endpoint(
url='/realm/profile_fields',
method='PATCH',
request=request
)
# {code_example|end}
validate_against_openapi_schema(result, '/realm/profile_fields', 'patch', '200')
@openapi_test_function("/realm/profile_fields:post")
def create_realm_profile_field(client: Client) -> None:
# {code_example|start}
# Create a custom profile field in the user's organization.
request = {
'name': 'Phone',
'hint': 'Contact No.',
'field_type': 1
}
result = client.call_endpoint(
url='/realm/profile_fields',
method='POST',
request=request
)
# {code_example|end}
validate_against_openapi_schema(result, '/realm/profile_fields', 'post', '200')
@openapi_test_function("/realm/filters:post")
def add_realm_filter(client: Client) -> None:
# {code_example|start}
# Add a filter to automatically linkify #<number> to the corresponding
# issue in Zulip's server repo
result = client.add_realm_filter('#(?P<id>[0-9]+)',
'https://github.com/zulip/zulip/issues/%(id)s')
# {code_example|end}
validate_against_openapi_schema(result, '/realm/filters', 'post', '200')
@openapi_test_function("/realm/filters/{filter_id}:delete")
def remove_realm_filter(client: Client) -> None:
# {code_example|start}
# Remove the linkifier (realm_filter) with ID 1
result = client.remove_realm_filter(1)
# {code_example|end}
validate_against_openapi_schema(result, '/realm/filters/{filter_id}', 'delete', '200')
@openapi_test_function("/users/me:get")
def get_profile(client: Client) -> None:
# {code_example|start}
# Get the profile of the user/bot that requests this endpoint,
# which is `client` in this case:
result = client.get_profile()
# {code_example|end}
validate_against_openapi_schema(result, '/users/me', 'get', '200')
@openapi_test_function("/get_stream_id:get")
def get_stream_id(client: Client) -> int:
# {code_example|start}
# Get the ID of a given stream
stream_name = 'new stream'
result = client.get_stream_id(stream_name)
# {code_example|end}
validate_against_openapi_schema(result, '/get_stream_id', 'get', '200')
return result['stream_id']
@openapi_test_function("/streams/{stream_id}:delete")
def delete_stream(client: Client, stream_id: int) -> None:
result = client.add_subscriptions(
streams=[
{
'name': 'stream to be deleted',
'description': 'New stream for testing',
},
],
)
# {code_example|start}
# Delete the stream named 'new stream'
stream_id = client.get_stream_id('stream to be deleted')['stream_id']
result = client.delete_stream(stream_id)
# {code_example|end}
validate_against_openapi_schema(result, '/streams/{stream_id}', 'delete', '200')
assert result['result'] == 'success'
@openapi_test_function("/streams:get")
def get_streams(client: Client) -> None:
# {code_example|start}
# Get all streams that the user has access to
result = client.get_streams()
# {code_example|end}
validate_against_openapi_schema(result, '/streams', 'get', '200')
streams = [s for s in result['streams'] if s['name'] == 'new stream']
assert streams[0]['description'] == 'New stream for testing'
# {code_example|start}
# You may pass in one or more of the query parameters mentioned above
# as keyword arguments, like so:
result = client.get_streams(include_public=False)
# {code_example|end}
validate_against_openapi_schema(result, '/streams', 'get', '200')
assert len(result['streams']) == 4
@openapi_test_function("/streams/{stream_id}:patch")
def update_stream(client: Client, stream_id: int) -> None:
# {code_example|start}
# Update the stream by a given ID
request = {
'stream_id': stream_id,
'stream_post_policy': 2,
'is_private': True,
}
result = client.update_stream(request)
# {code_example|end}
validate_against_openapi_schema(result, '/streams/{stream_id}', 'patch', '200')
assert result['result'] == 'success'
@openapi_test_function("/user_groups:get")
def get_user_groups(client: Client) -> int:
# {code_example|start}
# Get all user groups of the realm
result = client.get_user_groups()
# {code_example|end}
validate_against_openapi_schema(result, '/user_groups', 'get', '200')
hamlet_user_group = [u for u in result['user_groups']
if u['name'] == "hamletcharacters"][0]
assert hamlet_user_group['description'] == 'Characters of Hamlet'
marketing_user_group = [u for u in result['user_groups']
if u['name'] == "marketing"][0]
return marketing_user_group['id']
def test_user_not_authorized_error(nonadmin_client: Client) -> None:
result = nonadmin_client.get_streams(include_all_active=True)
validate_against_openapi_schema(result, '/rest-error-handling', 'post', '400_2')
def get_subscribers(client: Client) -> None:
result = client.get_subscribers(stream='new stream')
assert result['subscribers'] == ['iago@zulip.com', 'newbie@zulip.com']
def get_user_agent(client: Client) -> None:
result = client.get_user_agent()
assert result.startswith('ZulipPython/')
@openapi_test_function("/users/me/subscriptions:get")
def list_subscriptions(client: Client) -> None:
# {code_example|start}
# Get all streams that the user is subscribed to
result = client.list_subscriptions()
# {code_example|end}
validate_against_openapi_schema(result, '/users/me/subscriptions',
'get', '200')
streams = [s for s in result['subscriptions'] if s['name'] == 'new stream']
assert streams[0]['description'] == 'New stream for testing'
@openapi_test_function("/users/me/subscriptions:delete")
def remove_subscriptions(client: Client) -> None:
# {code_example|start}
# Unsubscribe from the stream "new stream"
result = client.remove_subscriptions(
['new stream'],
)
# {code_example|end}
validate_against_openapi_schema(result, '/users/me/subscriptions',
'delete', '200')
# test it was actually removed
result = client.list_subscriptions()
assert result['result'] == 'success'
streams = [s for s in result['subscriptions'] if s['name'] == 'new stream']
assert len(streams) == 0
# {code_example|start}
# Unsubscribe another user from the stream "new stream"
result = client.remove_subscriptions(
['new stream'],
principals=['newbie@zulip.com'],
)
# {code_example|end}
validate_against_openapi_schema(result, '/users/me/subscriptions',
'delete', '200')
@openapi_test_function("/users/me/subscriptions/muted_topics:patch")
def toggle_mute_topic(client: Client) -> None:
# Send a test message
message = {
'type': 'stream',
'to': 'Denmark',
'topic': 'boat party',
}
client.call_endpoint(
url='messages',
method='POST',
request=message,
)
# {code_example|start}
# Mute the topic "boat party" in the stream "Denmark"
request = {
'stream': 'Denmark',
'topic': 'boat party',
'op': 'add',
}
result = client.mute_topic(request)
# {code_example|end}
validate_against_openapi_schema(result,
'/users/me/subscriptions/muted_topics',
'patch', '200')
# {code_example|start}
# Unmute the topic "boat party" in the stream "Denmark"
request = {
'stream': 'Denmark',
'topic': 'boat party',
'op': 'remove',
}
result = client.mute_topic(request)
# {code_example|end}
validate_against_openapi_schema(result,
'/users/me/subscriptions/muted_topics',
'patch', '200')
@openapi_test_function("/mark_all_as_read:post")
def mark_all_as_read(client: Client) -> None:
# {code_example|start}
# Mark all of the user's unread messages as read
result = client.mark_all_as_read()
# {code_example|end}
validate_against_openapi_schema(result, '/mark_all_as_read', 'post', '200')
@openapi_test_function("/mark_stream_as_read:post")
def mark_stream_as_read(client: Client) -> None:
# {code_example|start}
# Mark the unread messages in stream with ID "1" as read
result = client.mark_stream_as_read(1)
# {code_example|end}
validate_against_openapi_schema(result, '/mark_stream_as_read', 'post', '200')
@openapi_test_function("/mark_topic_as_read:post")
def mark_topic_as_read(client: Client) -> None:
# Grab an existing topic name
topic_name = client.get_stream_topics(1)['topics'][0]['name']
# {code_example|start}
# Mark the unread messages in stream 1's topic "topic_name" as read
result = client.mark_topic_as_read(1, topic_name)
# {code_example|end}
validate_against_openapi_schema(result, '/mark_stream_as_read', 'post', '200')
@openapi_test_function("/users/me/subscriptions/properties:post")
def update_subscription_settings(client: Client) -> None:
# {code_example|start}
# Update the user's subscription in stream #1 to pin it to the top of the
# stream list; and in stream #3 to have the hex color "f00"
request = [{
'stream_id': 1,
'property': 'pin_to_top',
'value': True,
}, {
'stream_id': 3,
'property': 'color',
'value': '#f00f00',
}]
result = client.update_subscription_settings(request)
# {code_example|end}
validate_against_openapi_schema(result,
'/users/me/subscriptions/properties',
'POST', '200')
@openapi_test_function("/messages/render:post")
def render_message(client: Client) -> None:
# {code_example|start}
# Render a message
request = {
'content': '**foo**',
}
result = client.render_message(request)
# {code_example|end}
validate_against_openapi_schema(result, '/messages/render', 'post', '200')
@openapi_test_function("/messages:get")
def get_messages(client: Client) -> None:
# {code_example|start}
# Get the 100 last messages sent by "iago@zulip.com" to the stream "Verona"
request: Dict[str, Any] = {
'anchor': 'newest',
'num_before': 100,
'num_after': 0,
'narrow': [{'operator': 'sender', 'operand': 'iago@zulip.com'},
{'operator': 'stream', 'operand': 'Verona'}],
}
result = client.get_messages(request)
# {code_example|end}
validate_against_openapi_schema(result, '/messages', 'get', '200')
assert len(result['messages']) <= request['num_before']
@openapi_test_function("/messages/matches_narrow:get")
def check_messages_match_narrow(client: Client) -> None:
message = {
"type": "stream",
"to": "Verona",
"topic": "test_topic",
"content": "http://foo.com"
}
msg_ids = []
response = client.send_message(message)
msg_ids.append(response['id'])
message['content'] = "no link here"
response = client.send_message(message)
msg_ids.append(response['id'])
# {code_example|start}
# Check which messages within an array match a narrow.
request = {
'msg_ids': msg_ids,
'narrow': [{'operator': 'has', 'operand': 'link'}],
}
result = client.call_endpoint(
url='messages/matches_narrow',
method='GET',
request=request
)
# {code_example|end}
validate_against_openapi_schema(result, '/messages/matches_narrow', 'get', '200')
@openapi_test_function("/messages/{message_id}:get")
def get_raw_message(client: Client, message_id: int) -> None:
assert int(message_id)
# {code_example|start}
# Get the raw content of the message with ID "message_id"
result = client.get_raw_message(message_id)
# {code_example|end}
validate_against_openapi_schema(result, '/messages/{message_id}', 'get',
'200')
@openapi_test_function("/attachments:get")
def get_attachments(client: Client) -> None:
# {code_example|start}
# Get your attachments.
result = client.get_attachments()
# {code_example|end}
validate_against_openapi_schema(result, '/attachments', 'get', '200')
@openapi_test_function("/messages:post")
def send_message(client: Client) -> int:
request: Dict[str, Any] = {}
# {code_example|start}
# Send a stream message
request = {
"type": "stream",
"to": "Denmark",
"topic": "Castle",
"content": "I come not, friends, to steal away your hearts.",
}
result = client.send_message(request)
# {code_example|end}
validate_against_openapi_schema(result, '/messages', 'post', '200')
# test that the message was actually sent
message_id = result['id']
url = 'messages/' + str(message_id)
result = client.call_endpoint(
url=url,
method='GET',
)
assert result['result'] == 'success'
assert result['raw_content'] == request['content']
ensure_users([10], ['hamlet'])
# {code_example|start}
# Send a private message
user_id = 10
request = {
"type": "private",
"to": [user_id],
"content": "With mirth and laughter let old wrinkles come.",
}
result = client.send_message(request)
# {code_example|end}
validate_against_openapi_schema(result, '/messages', 'post', '200')
# test that the message was actually sent
message_id = result['id']
url = 'messages/' + str(message_id)
result = client.call_endpoint(
url=url,
method='GET',
)
assert result['result'] == 'success'
assert result['raw_content'] == request['content']
return message_id
@openapi_test_function("/messages/{message_id}/reactions:post")
def add_reaction(client: Client, message_id: int) -> None:
# {code_example|start}
# Add an emoji reaction
request = {
'message_id': str(message_id),
'emoji_name': 'octopus',
}
result = client.add_reaction(request)
# {code_example|end}
validate_against_openapi_schema(result, '/messages/{message_id}/reactions', 'post', '200')
@openapi_test_function("/messages/{message_id}/reactions:delete")
def remove_reaction(client: Client, message_id: int) -> None:
# {code_example|start}
# Remove an emoji reaction
request = {
'message_id': str(message_id),
'emoji_name': 'octopus',
}
result = client.remove_reaction(request)
# {code_example|end}
validate_against_openapi_schema(result, '/messages/{message_id}/reactions', 'delete', '200')
def test_nonexistent_stream_error(client: Client) -> None:
request = {
"type": "stream",
"to": "nonexistent_stream",
"topic": "Castle",
"content": "I come not, friends, to steal away your hearts.",
}
result = client.send_message(request)
validate_against_openapi_schema(result, '/messages', 'post',
'400_0')
def test_private_message_invalid_recipient(client: Client) -> None:
request = {
"type": "private",
"to": "eeshan@zulip.com",
"content": "With mirth and laughter let old wrinkles come.",
}
result = client.send_message(request)
validate_against_openapi_schema(result, '/messages', 'post',
'400_1')
@openapi_test_function("/messages/{message_id}:patch")
def update_message(client: Client, message_id: int) -> None:
assert int(message_id)
# {code_example|start}
# Edit a message
# (make sure that message_id below is set to the ID of the
# message you wish to update)
request = {
"message_id": message_id,
"content": "New content",
}
result = client.update_message(request)
# {code_example|end}
validate_against_openapi_schema(result, '/messages/{message_id}', 'patch',
'200')
# test it was actually updated
url = 'messages/' + str(message_id)
result = client.call_endpoint(
url=url,
method='GET',
)
assert result['result'] == 'success'
assert result['raw_content'] == request['content']
def test_update_message_edit_permission_error(client: Client, nonadmin_client: Client) -> None:
request = {
"type": "stream",
"to": "Denmark",
"topic": "Castle",
"content": "I come not, friends, to steal away your hearts.",
}
result = client.send_message(request)
request = {
"message_id": result["id"],
"content": "New content",
}
result = nonadmin_client.update_message(request)
validate_against_openapi_schema(result, '/messages/{message_id}', 'patch', '400')
@openapi_test_function("/messages/{message_id}:delete")
def delete_message(client: Client, message_id: int) -> None:
# {code_example|start}
# Delete the message with ID "message_id"
result = client.delete_message(message_id)
# {code_example|end}
validate_against_openapi_schema(result, '/messages/{message_id}', 'delete',
'200')
def test_delete_message_edit_permission_error(client: Client, nonadmin_client: Client) -> None:
request = {
"type": "stream",
"to": "Denmark",
"topic": "Castle",
"content": "I come not, friends, to steal away your hearts.",
}
result = client.send_message(request)
result = nonadmin_client.delete_message(result['id'])
validate_against_openapi_schema(result, '/messages/{message_id}', 'delete',
'400_1')
@openapi_test_function("/messages/{message_id}/history:get")
def get_message_history(client: Client, message_id: int) -> None:
# {code_example|start}
# Get the edit history for message with ID "message_id"
result = client.get_message_history(message_id)
# {code_example|end}
validate_against_openapi_schema(result, '/messages/{message_id}/history',
'get', '200')
@openapi_test_function("/realm/emoji:get")
def get_realm_emoji(client: Client) -> None:
# {code_example|start}
result = client.get_realm_emoji()
# {code_example|end}
validate_against_openapi_schema(result, '/realm/emoji', 'GET', '200')
@openapi_test_function("/messages/flags:post")
def update_message_flags(client: Client) -> None:
# Send a few test messages
request: Dict[str, Any] = {
"type": "stream",
"to": "Denmark",
"topic": "Castle",
"content": "I come not, friends, to steal away your hearts.",
}
message_ids = []
for i in range(0, 3):
message_ids.append(client.send_message(request)['id'])
# {code_example|start}
# Add the "read" flag to the messages with IDs in "message_ids"
request = {
'messages': message_ids,
'op': 'add',
'flag': 'read',
}
result = client.update_message_flags(request)
# {code_example|end}
validate_against_openapi_schema(result, '/messages/flags', 'post',
'200')
# {code_example|start}
# Remove the "starred" flag from the messages with IDs in "message_ids"
request = {
'messages': message_ids,
'op': 'remove',
'flag': 'starred',
}
result = client.update_message_flags(request)
# {code_example|end}
validate_against_openapi_schema(result, '/messages/flags', 'post',
'200')
def register_queue_all_events(client: Client) -> str:
# Register the queue and get all events
# Mainly for verifying schema of /register.
result = client.register()
validate_against_openapi_schema(result, '/register', 'post', '200')
return result['queue_id']
@openapi_test_function("/register:post")
def register_queue(client: Client) -> str:
# {code_example|start}
# Register the queue
result = client.register(
event_types=['message', 'realm_emoji'],
)
# {code_example|end}
validate_against_openapi_schema(result, '/register', 'post', '200')
return result['queue_id']
@openapi_test_function("/events:delete")
def deregister_queue(client: Client, queue_id: str) -> None:
# {code_example|start}
# Delete a queue (queue_id is the ID of the queue
# to be removed)
result = client.deregister(queue_id)
# {code_example|end}
validate_against_openapi_schema(result, '/events', 'delete', '200')
# Test "BAD_EVENT_QUEUE_ID" error
result = client.deregister(queue_id)
validate_against_openapi_schema(result, '/events', 'delete', '400')
@openapi_test_function("/server_settings:get")
def get_server_settings(client: Client) -> None:
# {code_example|start}
# Fetch the settings for this server
result = client.get_server_settings()
# {code_example|end}
validate_against_openapi_schema(result, '/server_settings', 'get', '200')
@openapi_test_function("/settings/notifications:patch")
def update_notification_settings(client: Client) -> None:
# {code_example|start}
# Enable push notifications even when online
request = {
'enable_offline_push_notifications': True,
'enable_online_push_notifications': True,
}
result = client.update_notification_settings(request)
# {code_example|end}
validate_against_openapi_schema(result, '/settings/notifications', 'patch', '200')
@openapi_test_function("/user_uploads:post")
def upload_file(client: Client) -> None:
path_to_file = os.path.join(ZULIP_DIR, 'zerver', 'tests', 'images', 'img.jpg')
# {code_example|start}
# Upload a file
with open(path_to_file, 'rb') as fp:
result = client.call_endpoint(
'user_uploads',
method='POST',
files=[fp],
)
client.send_message({
"type": "stream",
"to": "Denmark",
"topic": "Castle",
"content": "Check out [this picture]({}) of my castle!".format(result['uri']),
})
# {code_example|end}
validate_against_openapi_schema(result, '/user_uploads', 'post', '200')
@openapi_test_function("/users/me/{stream_id}/topics:get")
def get_stream_topics(client: Client, stream_id: int) -> None:
# {code_example|start}
result = client.get_stream_topics(stream_id)
# {code_example|end}
validate_against_openapi_schema(result, '/users/me/{stream_id}/topics',
'get', '200')
@openapi_test_function("/typing:post")
def set_typing_status(client: Client) -> None:
ensure_users([10, 11], ['hamlet', 'iago'])
# {code_example|start}
# The user has started to type in the group PM with Iago and Polonius
user_id1 = 10
user_id2 = 11
request = {
'op': 'start',
'to': [user_id1, user_id2],
}
result = client.set_typing_status(request)
# {code_example|end}
validate_against_openapi_schema(result, '/typing', 'post', '200')
# {code_example|start}
# The user has finished typing in the group PM with Iago and Polonius
user_id1 = 10
user_id2 = 11
request = {
'op': 'stop',
'to': [user_id1, user_id2],
}
result = client.set_typing_status(request)
# {code_example|end}
validate_against_openapi_schema(result, '/typing', 'post', '200')
@openapi_test_function("/realm/emoji/{emoji_name}:post")
def upload_custom_emoji(client: Client) -> None:
emoji_path = os.path.join(ZULIP_DIR, 'zerver', 'tests', 'images', 'img.jpg')
# {code_example|start}
# Upload a custom emoji; assume `emoji_path` is the path to your image.
with open(emoji_path, 'rb') as fp:
emoji_name = 'my_custom_emoji'
result = client.call_endpoint(
f'realm/emoji/{emoji_name}',
method='POST',
files=[fp],
)
# {code_example|end}
validate_against_openapi_schema(result,
'/realm/emoji/{emoji_name}',
'post', '200')
@openapi_test_function("/users/me/alert_words:get")
def get_alert_words(client: Client) -> None:
result = client.get_alert_words()
assert result['result'] == 'success'
@openapi_test_function("/users/me/alert_words:post")
def add_alert_words(client: Client) -> None:
word = ['foo', 'bar']
result = client.add_alert_words(word)
assert result['result'] == 'success'
@openapi_test_function("/users/me/alert_words:delete")
def remove_alert_words(client: Client) -> None:
word = ['foo']
result = client.remove_alert_words(word)
assert result['result'] == 'success'
@openapi_test_function("/user_groups/create:post")
def create_user_group(client: Client) -> None:
ensure_users([6, 7, 8, 10], ['aaron', 'zoe', 'cordelia', 'hamlet'])
# {code_example|start}
request = {
'name': 'marketing',
'description': 'The marketing team.',
'members': [6, 7, 8, 10],
}
result = client.create_user_group(request)
# {code_example|end}
validate_against_openapi_schema(result, '/user_groups/create', 'post', '200')
assert result['result'] == 'success'
@openapi_test_function("/user_groups/{user_group_id}:patch")
def update_user_group(client: Client, user_group_id: int) -> None:
# {code_example|start}
request = {
'group_id': user_group_id,
'name': 'marketing',
'description': 'The marketing team.',
}
result = client.update_user_group(request)
# {code_example|end}
assert result['result'] == 'success'
@openapi_test_function("/user_groups/{user_group_id}:delete")
def remove_user_group(client: Client, user_group_id: int) -> None:
# {code_example|start}
result = client.remove_user_group(user_group_id)
# {code_example|end}
validate_against_openapi_schema(result, '/user_groups/{user_group_id}', 'delete', '200')
assert result['result'] == 'success'
@openapi_test_function("/user_groups/{user_group_id}/members:post")
def update_user_group_members(client: Client, user_group_id: int) -> None:
ensure_users([8, 10, 11], ['cordelia', 'hamlet', 'iago'])
# {code_example|start}
request = {
'group_id': user_group_id,
'delete': [8, 10],
'add': [11],
}
result = client.update_user_group_members(request)
# {code_example|end}
validate_against_openapi_schema(result, '/user_groups/{group_id}/members', 'post', '200')
def test_invalid_api_key(client_with_invalid_key: Client) -> None:
result = client_with_invalid_key.list_subscriptions()
validate_against_openapi_schema(result, '/rest-error-handling', 'post', '400_0')
def test_missing_request_argument(client: Client) -> None:
result = client.render_message({})
validate_against_openapi_schema(result, '/rest-error-handling', 'post', '400_1')
def test_invalid_stream_error(client: Client) -> None:
result = client.get_stream_id('nonexistent')
validate_against_openapi_schema(result, '/get_stream_id', 'get', '400')
# SETUP METHODS FOLLOW
def test_against_fixture(result: Dict[str, Any], fixture: Dict[str, Any], check_if_equal: Optional[Iterable[str]] = None, check_if_exists: Optional[Iterable[str]] = None) -> None:
assertLength(result, fixture)
if check_if_equal is None and check_if_exists is None:
for key, value in fixture.items():
assertEqual(key, result, fixture)
if check_if_equal is not None:
for key in check_if_equal:
assertEqual(key, result, fixture)
if check_if_exists is not None:
for key in check_if_exists:
assertIn(key, result)
def assertEqual(key: str, result: Dict[str, Any], fixture: Dict[str, Any]) -> None:
if result[key] != fixture[key]:
first = f"{key} = {result[key]}"
second = f"{key} = {fixture[key]}"
raise AssertionError("Actual and expected outputs do not match; showing diff:\n" +
mdiff.diff_strings(first, second))
else:
assert result[key] == fixture[key]
def assertLength(result: Dict[str, Any], fixture: Dict[str, Any]) -> None:
if len(result) != len(fixture):
result_string = json.dumps(result, indent=4, sort_keys=True)
fixture_string = json.dumps(fixture, indent=4, sort_keys=True)
raise AssertionError("The lengths of the actual and expected outputs do not match; showing diff:\n" +
mdiff.diff_strings(result_string, fixture_string))
else:
assert len(result) == len(fixture)
def assertIn(key: str, result: Dict[str, Any]) -> None:
if key not in result.keys():
raise AssertionError(
f"The actual output does not contain the the key `{key}`.",
)
else:
assert key in result
def test_messages(client: Client, nonadmin_client: Client) -> None:
render_message(client)
message_id = send_message(client)
add_reaction(client, message_id)
remove_reaction(client, message_id)
update_message(client, message_id)
get_raw_message(client, message_id)
get_messages(client)
check_messages_match_narrow(client)
get_message_history(client, message_id)
delete_message(client, message_id)
mark_all_as_read(client)
mark_stream_as_read(client)
mark_topic_as_read(client)
update_message_flags(client)
test_nonexistent_stream_error(client)
test_private_message_invalid_recipient(client)
test_update_message_edit_permission_error(client, nonadmin_client)
test_delete_message_edit_permission_error(client, nonadmin_client)
def test_users(client: Client) -> None:
create_user(client)
get_members(client)
get_single_user(client)
deactivate_user(client)
reactivate_user(client)
update_user(client)
get_subscription_status(client)
get_profile(client)
update_notification_settings(client)
upload_file(client)
get_attachments(client)
set_typing_status(client)
update_presence(client)
get_user_presence(client)
create_user_group(client)
user_group_id = get_user_groups(client)
update_user_group(client, user_group_id)
update_user_group_members(client, user_group_id)
remove_user_group(client, user_group_id)
get_alert_words(client)
add_alert_words(client)
remove_alert_words(client)
def test_streams(client: Client, nonadmin_client: Client) -> None:
add_subscriptions(client)
test_add_subscriptions_already_subscribed(client)
list_subscriptions(client)
stream_id = get_stream_id(client)
update_stream(client, stream_id)
get_streams(client)
get_subscribers(client)
remove_subscriptions(client)
toggle_mute_topic(client)
update_subscription_settings(client)
update_notification_settings(client)
get_stream_topics(client, 1)
delete_stream(client, stream_id)
test_user_not_authorized_error(nonadmin_client)
test_authorization_errors_fatal(client, nonadmin_client)
def test_queues(client: Client) -> None:
# Note that the example for api/get-events is not tested.
# Since, methods such as client.get_events() or client.call_on_each_message
# are blocking calls and since the event queue backend is already
# thoroughly tested in zerver/tests/test_event_queue.py, it is not worth
# the effort to come up with asynchronous logic for testing those here.
queue_id = register_queue(client)
deregister_queue(client, queue_id)
register_queue_all_events(client)
def test_server_organizations(client: Client) -> None:
get_realm_filters(client)
add_realm_filter(client)
get_server_settings(client)
remove_realm_filter(client)
get_realm_emoji(client)
upload_custom_emoji(client)
get_realm_profile_fields(client)
reorder_realm_profile_fields(client)
create_realm_profile_field(client)
def test_errors(client: Client) -> None:
test_missing_request_argument(client)
test_invalid_stream_error(client)
def test_the_api(client: Client, nonadmin_client: Client) -> None:
get_user_agent(client)
test_users(client)
test_streams(client, nonadmin_client)
test_messages(client, nonadmin_client)
test_queues(client)
test_server_organizations(client)
test_errors(client)
sys.stdout.flush()
if REGISTERED_TEST_FUNCTIONS != CALLED_TEST_FUNCTIONS:
print("Error! Some @openapi_test_function tests were never called:")
print(" ", REGISTERED_TEST_FUNCTIONS - CALLED_TEST_FUNCTIONS)
sys.exit(1)
|
showell/zulip
|
zerver/openapi/python_examples.py
|
Python
|
apache-2.0
| 43,251
|
[
"Octopus"
] |
333ac8ea191805846b1c83b4fe2ae788cd53d2a025f599bf79ea82ae9c202246
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import datetime
import bson
from tg import expose, flash, redirect, validate, config
from tg.decorators import with_trailing_slash
from pylons import tmpl_context as c, app_globals as g
from pylons import request, response
from webob import exc as wexc
import allura.tasks.repo_tasks
from allura import model as M
from allura.lib import validators as V
from allura.lib.security import require_authenticated, has_access
from allura.lib import helpers as h
from allura.lib import plugin
from allura.lib.decorators import require_post
from allura.lib.repository import RepositoryApp
from allura.lib.widgets import (
SubscriptionForm,
OAuthApplicationForm,
OAuthRevocationForm,
LoginForm,
ForgottenPasswordForm)
from allura.lib.widgets import forms
from allura.controllers import BaseController
log = logging.getLogger(__name__)
class F(object):
login_form = LoginForm()
recover_password_change_form = forms.PasswordChangeBase()
forgotten_password_form = ForgottenPasswordForm()
subscription_form = SubscriptionForm()
registration_form = forms.RegistrationForm(action='/auth/save_new')
oauth_application_form = OAuthApplicationForm(action='register')
oauth_revocation_form = OAuthRevocationForm(
action='/auth/preferences/revoke_oauth')
change_personal_data_form = forms.PersonalDataForm()
add_socialnetwork_form = forms.AddSocialNetworkForm()
remove_socialnetwork_form = forms.RemoveSocialNetworkForm()
add_telnumber_form = forms.AddTelNumberForm()
add_website_form = forms.AddWebsiteForm()
skype_account_form = forms.SkypeAccountForm()
remove_textvalue_form = forms.RemoveTextValueForm()
add_timeslot_form = forms.AddTimeSlotForm()
remove_timeslot_form = forms.RemoveTimeSlotForm()
add_inactive_period_form = forms.AddInactivePeriodForm()
remove_inactive_period_form = forms.RemoveInactivePeriodForm()
save_skill_form = forms.AddUserSkillForm()
remove_skill_form = forms.RemoveSkillForm()
class AuthController(BaseController):
def __init__(self):
self.preferences = PreferencesController()
self.user_info = UserInfoController()
self.subscriptions = SubscriptionsController()
self.oauth = OAuthController()
def __getattr__(self, name):
urls = plugin.UserPreferencesProvider.get().additional_urls()
if name not in urls:
raise AttributeError("'%s' object has no attribute '%s'" % (type(self).__name__, name))
return urls[name]
@expose()
def prefs(self, *args, **kwargs):
'''
Redirect old /auth/prefs URL to /auth/subscriptions
(to handle old email links, etc).
'''
redirect('/auth/subscriptions/')
@expose('jinja:allura:templates/login.html')
@with_trailing_slash
def index(self, *args, **kwargs):
orig_request = request.environ.get('pylons.original_request', None)
if 'return_to' in kwargs:
return_to = kwargs.pop('return_to')
elif orig_request:
return_to = orig_request.url
else:
return_to = request.referer
c.form = F.login_form
return dict(return_to=return_to)
@expose('jinja:allura:templates/login_fragment.html')
def login_fragment(self, *args, **kwargs):
return self.index(*args, **kwargs)
@expose('jinja:allura:templates/create_account.html')
def create_account(self, **kw):
c.form = F.registration_form
return dict()
def _validate_hash(self, hash):
login_url = config.get('auth.login_url', '/auth/')
if not hash:
redirect(login_url)
user_record = M.User.query.find(
{'tool_data.AuthPasswordReset.hash': hash}).first()
if not user_record:
flash('Unable to process reset, please try again')
redirect(login_url)
hash_expiry = user_record.get_tool_data(
'AuthPasswordReset', 'hash_expiry')
if not hash_expiry or hash_expiry < datetime.datetime.utcnow():
flash('Unable to process reset, please try again')
redirect(login_url)
return user_record
@expose('jinja:allura:templates/forgotten_password.html')
def forgotten_password(self, hash=None, **kw):
provider = plugin.AuthenticationProvider.get(request)
if not provider.forgotten_password_process:
raise wexc.HTTPNotFound()
if not hash:
c.forgotten_password_form = F.forgotten_password_form
else:
self._validate_hash(hash)
c.recover_password_change_form = F.recover_password_change_form
return dict(hash=hash)
@expose()
@require_post()
@validate(F.recover_password_change_form, error_handler=forgotten_password)
def set_new_password(self, hash=None, pw=None, pw2=None):
provider = plugin.AuthenticationProvider.get(request)
if not provider.forgotten_password_process:
raise wexc.HTTPNotFound()
user = self._validate_hash(hash)
user.set_password(pw)
user.set_tool_data('AuthPasswordReset', hash='', hash_expiry='')
flash('Password changed')
redirect('/auth/')
@expose()
@require_post()
@validate(F.forgotten_password_form, error_handler=forgotten_password)
def password_recovery_hash(self, email=None, **kw):
provider = plugin.AuthenticationProvider.get(request)
if not provider.forgotten_password_process:
raise wexc.HTTPNotFound()
if not email:
redirect('/')
user_record = M.User.by_email_address(email)
hash = h.nonce(42)
user_record.set_tool_data('AuthPasswordReset',
hash=hash,
hash_expiry=datetime.datetime.utcnow() +
datetime.timedelta(seconds=int(config.get('auth.recovery_hash_expiry_period', 600))))
log.info('Sending password recovery link to %s', email)
text = '''
To reset your password on %s, please visit the following URL:
%s/auth/forgotten_password/%s
''' % (config['site_name'], config['base_url'], hash)
allura.tasks.mail_tasks.sendmail.post(
destinations=[email],
fromaddr=config['forgemail.return_path'],
reply_to=config['forgemail.return_path'],
subject='Password recovery',
message_id=h.gen_message_id(),
text=text)
flash('Email with instructions has been sent.')
redirect('/')
@expose()
@require_post()
@validate(F.registration_form, error_handler=create_account)
def save_new(self, display_name=None, username=None, pw=None, **kw):
user = M.User.register(
dict(username=username,
display_name=display_name,
password=pw))
plugin.AuthenticationProvider.get(request).login(user)
flash('User "%s" registered' % user.get_pref('display_name'))
redirect('/')
@expose()
def send_verification_link(self, a):
addr = M.EmailAddress.query.get(_id=a)
if addr:
addr.send_verification_link()
flash('Verification link sent')
else:
flash('No such address', 'error')
redirect(request.referer)
@expose()
def verify_addr(self, a):
addr = M.EmailAddress.query.get(nonce=a)
if addr:
addr.confirmed = True
flash('Email address confirmed')
else:
flash('Unknown verification link', 'error')
redirect('/')
@expose()
def logout(self):
plugin.AuthenticationProvider.get(request).logout()
redirect(config.get('auth.post_logout_url', '/'))
@expose()
@require_post()
@validate(F.login_form, error_handler=index)
def do_login(self, return_to=None, **kw):
if return_to and return_to != request.url:
redirect(return_to)
redirect('/')
@expose(content_type='text/plain')
def refresh_repo(self, *repo_path):
# post-commit hooks use this
if not repo_path:
return 'No repo specified'
repo_path = '/' + '/'.join(repo_path)
project, rest = h.find_project(repo_path)
if project is None:
return 'No project at %s' % repo_path
if not rest:
return '%s does not include a repo mount point' % repo_path
h.set_context(project.shortname,
rest[0], neighborhood=project.neighborhood)
if c.app is None or not getattr(c.app, 'repo'):
return 'Cannot find repo at %s' % repo_path
allura.tasks.repo_tasks.refresh.post()
return '%r refresh queued.\n' % c.app.repo
def _auth_repos(self, user):
def _unix_group_name(neighborhood, shortname):
path = neighborhood.url_prefix + \
shortname[len(neighborhood.shortname_prefix):]
parts = [p for p in path.split('/') if p]
if len(parts) == 2 and parts[0] == 'p':
parts = parts[1:]
return '.'.join(reversed(parts))
repos = []
for p in user.my_projects():
for p in [p] + p.direct_subprojects:
for app in p.app_configs:
if not issubclass(g.entry_points["tool"][app.tool_name], RepositoryApp):
continue
if not has_access(app, 'write', user, p):
continue
repos.append('/%s/%s/%s' % (
app.tool_name.lower(),
_unix_group_name(p.neighborhood, p.shortname),
app.options['mount_point']))
repos.sort()
return repos
@expose('json:')
def repo_permissions(self, repo_path=None, username=None, **kw):
"""Expects repo_path to be a filesystem path like
<tool>/<project>.<neighborhood>/reponame[.git]
unless the <neighborhood> is 'p', in which case it is
<tool>/<project>/reponame[.git]
Returns JSON describing this user's permissions on that repo.
"""
disallow = dict(allow_read=False, allow_write=False,
allow_create=False)
# Find the user
user = M.User.by_username(username)
if not user:
response.status = 404
return dict(disallow, error='unknown user')
if not repo_path:
return dict(allow_write=self._auth_repos(user))
parts = [p for p in repo_path.split(os.path.sep) if p]
# strip the tool name
parts = parts[1:]
if '.' in parts[0]:
project, neighborhood = parts[0].split('.')
else:
project, neighborhood = parts[0], 'p'
parts = [neighborhood, project] + parts[1:]
project_path = '/' + '/'.join(parts)
project, rest = h.find_project(project_path)
if project is None:
log.info("Can't find project at %s from repo_path %s",
project_path, repo_path)
response.status = 404
return dict(disallow, error='unknown project')
c.project = project
c.app = project.app_instance(rest[0])
if not c.app:
c.app = project.app_instance(os.path.splitext(rest[0])[0])
if c.app is None:
log.info("Can't find repo at %s on repo_path %s",
rest[0], repo_path)
return disallow
return dict(allow_read=has_access(c.app, 'read')(user=user),
allow_write=has_access(c.app, 'write')(user=user),
allow_create=has_access(c.app, 'create')(user=user))
class PreferencesController(BaseController):
def _check_security(self):
require_authenticated()
@with_trailing_slash
@expose('jinja:allura:templates/user_prefs.html')
def index(self, **kw):
provider = plugin.AuthenticationProvider.get(request)
menu = provider.account_navigation()
api_token = M.ApiToken.query.get(user_id=c.user._id)
return dict(
menu=menu,
api_token=api_token,
)
@h.vardec
@expose()
@require_post()
def update(self,
addr=None,
new_addr=None,
primary_addr=None,
oid=None,
new_oid=None,
preferences=None,
**kw):
if config.get('auth.method', 'local') == 'local':
if not preferences.get('display_name'):
flash("Display Name cannot be empty.", 'error')
redirect('.')
c.user.set_pref('display_name', preferences['display_name'])
for i, (old_a, data) in enumerate(zip(c.user.email_addresses, addr or [])):
obj = c.user.address_object(old_a)
if data.get('delete') or not obj:
if primary_addr == c.user.email_addresses[i]:
c.user.set_pref('email_address', None)
primary_addr = None
del c.user.email_addresses[i]
if obj:
obj.delete()
if new_addr.get('claim'):
if M.EmailAddress.query.get(_id=new_addr['addr'], confirmed=True):
flash('Email address already claimed', 'error')
else:
c.user.email_addresses.append(new_addr['addr'])
em = M.EmailAddress.upsert(new_addr['addr'])
em.claimed_by_user_id = c.user._id
em.send_verification_link()
if not primary_addr and not c.user.get_pref('email_address') and c.user.email_addresses:
primary_addr = c.user.email_addresses[0]
if primary_addr:
c.user.set_pref('email_address', primary_addr)
for k, v in preferences.iteritems():
if k == 'results_per_page':
v = int(v)
c.user.set_pref(k, v)
redirect('.')
@expose()
@require_post()
def gen_api_token(self):
tok = M.ApiToken.query.get(user_id=c.user._id)
if tok is None:
tok = M.ApiToken(user_id=c.user._id)
else:
tok.secret_key = h.cryptographic_nonce()
redirect(request.referer)
@expose()
@require_post()
def del_api_token(self):
tok = M.ApiToken.query.get(user_id=c.user._id)
if tok is None:
return
tok.delete()
redirect(request.referer)
@expose()
@require_post()
@validate(V.NullValidator(), error_handler=index)
def change_password(self, **kw):
kw = g.theme.password_change_form.to_python(kw, None)
ap = plugin.AuthenticationProvider.get(request)
try:
ap.set_password(c.user, kw['oldpw'], kw['pw'])
except wexc.HTTPUnauthorized:
flash('Incorrect password', 'error')
redirect('.')
flash('Password changed')
redirect('.')
@expose()
@require_post()
def upload_sshkey(self, key=None):
ap = plugin.AuthenticationProvider.get(request)
try:
ap.upload_sshkey(c.user.username, key)
except AssertionError, ae:
flash('Error uploading key: %s' % ae, 'error')
flash('Key uploaded')
redirect('.')
@expose()
@require_post()
def user_message(self, allow_user_messages=False):
c.user.set_pref('disable_user_messages', not allow_user_messages)
redirect(request.referer)
class UserInfoController(BaseController):
def __init__(self, *args, **kwargs):
self.skills = UserSkillsController()
self.contacts = UserContactsController()
self.availability = UserAvailabilityController()
def _check_security(self):
require_authenticated()
@with_trailing_slash
@expose('jinja:allura:templates/user_info.html')
def index(self, **kw):
provider = plugin.AuthenticationProvider.get(request)
menu = provider.account_navigation()
return dict(menu=menu)
@expose()
@require_post()
@validate(F.change_personal_data_form, error_handler=index)
def change_personal_data(self, **kw):
require_authenticated()
c.user.set_pref('sex', kw['sex'])
c.user.set_pref('birthdate', kw.get('birthdate'))
localization = {'country': kw.get('country'), 'city': kw.get('city')}
c.user.set_pref('localization', localization)
c.user.set_pref('timezone', kw['timezone'])
flash('Your personal data was successfully updated!')
redirect('.')
class UserSkillsController(BaseController):
def __init__(self, category=None):
self.category = category
super(UserSkillsController, self).__init__()
def _check_security(self):
require_authenticated()
@expose()
def _lookup(self, catshortname, *remainder):
cat = M.TroveCategory.query.get(shortname=catshortname)
return UserSkillsController(category=cat), remainder
@with_trailing_slash
@expose('jinja:allura:templates/user_skills.html')
def index(self, **kw):
l = []
parents = []
if kw.get('selected_category') is not None:
selected_skill = M.TroveCategory.query.get(
trove_cat_id=int(kw.get('selected_category')))
elif self.category:
selected_skill = self.category
else:
l = M.TroveCategory.query.find(
dict(trove_parent_id=0, show_as_skill=True)).all()
selected_skill = None
if selected_skill:
l = [scat for scat in selected_skill.subcategories
if scat.show_as_skill]
temp_cat = selected_skill.parent_category
while temp_cat:
parents = [temp_cat] + parents
temp_cat = temp_cat.parent_category
provider = plugin.AuthenticationProvider.get(request)
menu = provider.account_navigation()
return dict(
skills_list=l,
selected_skill=selected_skill,
parents=parents,
menu=menu,
add_details_fields=(len(l) == 0))
@expose()
@require_post()
@validate(F.save_skill_form, error_handler=index)
def save_skill(self, **kw):
trove_id = int(kw.get('selected_skill'))
category = M.TroveCategory.query.get(trove_cat_id=trove_id)
new_skill = dict(
category_id=category._id,
level=kw.get('level'),
comment=kw.get('comment'))
s = [skill for skill in c.user.skills
if str(skill.category_id) != str(new_skill['category_id'])]
s.append(new_skill)
c.user.set_pref('skills', s)
flash('Your skills list was successfully updated!')
redirect('.')
@expose()
@require_post()
@validate(F.remove_skill_form, error_handler=index)
def remove_skill(self, **kw):
trove_id = int(kw.get('categoryid'))
category = M.TroveCategory.query.get(trove_cat_id=trove_id)
s = [skill for skill in c.user.skills
if str(skill.category_id) != str(category._id)]
c.user.set_pref('skills', s)
flash('Your skills list was successfully updated!')
redirect('.')
class UserContactsController(BaseController):
def _check_security(self):
require_authenticated()
@with_trailing_slash
@expose('jinja:allura:templates/user_contacts.html')
def index(self, **kw):
provider = plugin.AuthenticationProvider.get(request)
menu = provider.account_navigation()
return dict(menu=menu)
@expose()
@require_post()
@validate(F.add_socialnetwork_form, error_handler=index)
def add_social_network(self, **kw):
require_authenticated()
c.user.add_socialnetwork(kw['socialnetwork'], kw['accounturl'])
flash('Your personal contacts were successfully updated!')
redirect('.')
@expose()
@require_post()
@validate(F.remove_socialnetwork_form, error_handler=index)
def remove_social_network(self, **kw):
require_authenticated()
c.user.remove_socialnetwork(kw['socialnetwork'], kw['account'])
flash('Your personal contacts were successfully updated!')
redirect('.')
@expose()
@require_post()
@validate(F.add_telnumber_form, error_handler=index)
def add_telnumber(self, **kw):
require_authenticated()
c.user.add_telephonenumber(kw['newnumber'])
flash('Your personal contacts were successfully updated!')
redirect('.')
@expose()
@require_post()
@validate(F.remove_textvalue_form, error_handler=index)
def remove_telnumber(self, **kw):
require_authenticated()
c.user.remove_telephonenumber(kw['oldvalue'])
flash('Your personal contacts were successfully updated!')
redirect('.')
@expose()
@require_post()
@validate(F.add_website_form, error_handler=index)
def add_webpage(self, **kw):
require_authenticated()
c.user.add_webpage(kw['newwebsite'])
flash('Your personal contacts were successfully updated!')
redirect('.')
@expose()
@require_post()
@validate(F.remove_textvalue_form, error_handler=index)
def remove_webpage(self, **kw):
require_authenticated()
c.user.remove_webpage(kw['oldvalue'])
flash('Your personal contacts were successfully updated!')
redirect('.')
@expose()
@require_post()
@validate(F.skype_account_form, error_handler=index)
def skype_account(self, **kw):
require_authenticated()
c.user.set_pref('skypeaccount', kw['skypeaccount'])
flash('Your personal contacts were successfully updated!')
redirect('.')
class UserAvailabilityController(BaseController):
def _check_security(self):
require_authenticated()
@with_trailing_slash
@expose('jinja:allura:templates/user_availability.html')
def index(self, **kw):
provider = plugin.AuthenticationProvider.get(request)
menu = provider.account_navigation()
return dict(menu=menu)
@expose()
@require_post()
@validate(F.add_timeslot_form, error_handler=index)
def add_timeslot(self, **kw):
require_authenticated()
c.user.add_timeslot(kw['weekday'], kw['starttime'], kw['endtime'])
flash('Your availability timeslots were successfully updated!')
redirect('.')
@expose()
@require_post()
@validate(F.remove_timeslot_form, error_handler=index)
def remove_timeslot(self, **kw):
require_authenticated()
c.user.remove_timeslot(kw['weekday'], kw['starttime'], kw['endtime'])
flash('Your availability timeslots were successfully updated!')
redirect('.')
@expose()
@require_post()
@validate(F.add_inactive_period_form, error_handler=index)
def add_inactive_period(self, **kw):
require_authenticated()
c.user.add_inactive_period(kw['startdate'], kw['enddate'])
flash('Your inactivity periods were successfully updated!')
redirect('.')
@expose()
@require_post()
@validate(F.remove_inactive_period_form, error_handler=index)
def remove_inactive_period(self, **kw):
require_authenticated()
c.user.remove_inactive_period(kw['startdate'], kw['enddate'])
flash('Your availability timeslots were successfully updated!')
redirect('.')
class SubscriptionsController(BaseController):
def _check_security(self):
require_authenticated()
@with_trailing_slash
@expose('jinja:allura:templates/user_subs.html')
def index(self, **kw):
c.form = F.subscription_form
c.revoke_access = F.oauth_revocation_form
subscriptions = []
mailboxes = M.Mailbox.query.find(
dict(user_id=c.user._id, is_flash=False))
mailboxes = list(mailboxes.ming_cursor)
project_collection = M.Project.query.mapper.collection
app_collection = M.AppConfig.query.mapper.collection
projects = dict(
(p._id, p) for p in project_collection.m.find(dict(
_id={'$in': [mb.project_id for mb in mailboxes]})))
app_index = dict(
(ac._id, ac) for ac in app_collection.m.find(dict(
_id={'$in': [mb.app_config_id for mb in mailboxes]})))
for mb in mailboxes:
project = projects.get(mb.project_id, None)
app_config = app_index.get(mb.app_config_id, None)
if project is None:
mb.m.delete()
continue
if app_config is None:
continue
subscriptions.append(dict(
subscription_id=mb._id,
project_name=project.name,
mount_point=app_config.options['mount_point'],
artifact_title=dict(
text=mb.artifact_title, href=mb.artifact_url),
topic=mb.topic,
type=mb.type,
frequency=mb.frequency.unit,
artifact=mb.artifact_index_id,
subscribed=True))
my_projects = dict((p._id, p) for p in c.user.my_projects())
my_tools = app_collection.m.find(dict(
project_id={'$in': my_projects.keys()}))
for tool in my_tools:
p_id = tool.project_id
subscribed = M.Mailbox.subscribed(
project_id=p_id, app_config_id=tool._id)
if not subscribed:
subscriptions.append(dict(
tool_id=tool._id,
project_id=p_id,
project_name=my_projects[p_id].name,
mount_point=tool.options['mount_point'],
artifact_title='No subscription',
topic=None,
type=None,
frequency=None,
artifact=None))
subscriptions.sort(key=lambda d: (d['project_name'], d['mount_point']))
provider = plugin.AuthenticationProvider.get(request)
menu = provider.account_navigation()
return dict(
subscriptions=subscriptions,
menu=menu)
@h.vardec
@expose()
@require_post()
@validate(F.subscription_form, error_handler=index)
def update_subscriptions(self, subscriptions=None, email_format=None, **kw):
for s in subscriptions:
if s['subscribed']:
if s['tool_id'] and s['project_id']:
M.Mailbox.subscribe(
project_id=bson.ObjectId(s['project_id']),
app_config_id=bson.ObjectId(s['tool_id']))
else:
if s['subscription_id'] is not None:
s['subscription_id'].delete()
if email_format:
c.user.set_pref('email_format', email_format)
redirect(request.referer)
class OAuthController(BaseController):
def _check_security(self):
require_authenticated()
@with_trailing_slash
@expose('jinja:allura:templates/oauth_applications.html')
def index(self, **kw):
c.form = F.oauth_application_form
consumer_tokens = M.OAuthConsumerToken.for_user(c.user)
access_tokens = M.OAuthAccessToken.for_user(c.user)
provider = plugin.AuthenticationProvider.get(request)
return dict(
menu=provider.account_navigation(),
consumer_tokens=consumer_tokens,
access_tokens=access_tokens,
)
@expose()
@require_post()
@validate(F.oauth_application_form, error_handler=index)
def register(self, application_name=None, application_description=None, **kw):
M.OAuthConsumerToken(name=application_name,
description=application_description)
flash('OAuth Application registered')
redirect('.')
@expose()
@require_post()
def deregister(self, _id=None):
app = M.OAuthConsumerToken.query.get(_id=bson.ObjectId(_id))
if app is None:
flash('Invalid app ID', 'error')
redirect('.')
if app.user_id != c.user._id:
flash('Invalid app ID', 'error')
redirect('.')
M.OAuthRequestToken.query.remove({'consumer_token_id': app._id})
M.OAuthAccessToken.query.remove({'consumer_token_id': app._id})
app.delete()
flash('Application deleted')
redirect('.')
@expose()
@require_post()
def generate_access_token(self, _id):
"""
Manually generate an OAuth access token for the given consumer.
NB: Manually generated access tokens are bearer tokens, which are
less secure (since they rely only on the token, which is transmitted
with each request, unlike the access token secret).
"""
consumer_token = M.OAuthConsumerToken.query.get(_id=bson.ObjectId(_id))
if consumer_token is None:
flash('Invalid app ID', 'error')
redirect('.')
if consumer_token.user_id != c.user._id:
flash('Invalid app ID', 'error')
redirect('.')
request_token = M.OAuthRequestToken(
consumer_token_id=consumer_token._id,
user_id=c.user._id,
callback='manual',
validation_pin=h.nonce(20),
is_bearer=True,
)
access_token = M.OAuthAccessToken(
consumer_token_id=consumer_token._id,
request_token_id=c.user._id,
user_id=request_token.user_id,
is_bearer=True,
)
redirect('.')
@expose()
@require_post()
def revoke_access_token(self, _id):
access_token = M.OAuthAccessToken.query.get(_id=bson.ObjectId(_id))
if access_token is None:
flash('Invalid token ID', 'error')
redirect('.')
if access_token.user_id != c.user._id:
flash('Invalid token ID', 'error')
redirect('.')
access_token.delete()
flash('Token revoked')
redirect('.')
|
apache/incubator-allura
|
Allura/allura/controllers/auth.py
|
Python
|
apache-2.0
| 31,193
|
[
"VisIt"
] |
abc3a493c4b600e4bfd8f77a2d1d71ce64f1c03c722469d05ea77f22b59df165
|
import json
import random
import sys
import numpy as np
import scipy as sp
import tensorflow as tf
class Cost(object):
@staticmethod
def fn(a, y):
return np.sum(np.nan_to_num(-y * np.log(a) - (1 - y) * np.log(1 - a)))
#For mutiple ys?
#If y = y_1, y_2, ... are desired output values, a^L = a^L_1, a^L_2, ... are actual output values (in layer L)
#There are x data points, and j neurons in the layer
#C = -1 / len(train) * sum((sum(y_j * ln(a^L_j) + (1 - y_j) * ln(1 - a^L_j)), j), x)
def delta(z, a, y):
return (a - y)
class NeuralNet(object):
def __init__(self, sizes):
#Initialize everything
self.nLayers = len(sizes)
self.sizes = sizes
#self.weights = [np.random.randn(y, x) for x, y in zip(sizes[:-1], sizes[1:])]
self.weights = [np.random.randn(y, x) / np.sqrt(x) for x, y in zip(self.sizes[:-1], self.sizes[1:])]
self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
def sigmoid(self, z):
#Sigmoid neuron activation function
return 1 / (1 + np.exp(-z))
def sigmoidPrime(self, z):
#Derivative of sigmoid function
return self.sigmoid(z) * (1 - self.sigmoid(z))
'''WE DO NOT NEED TO VECTORIZE OUR RESULTS of course
def vectorResult(self, j):
e = np.zeroes((10, 1))
e[j] = 1.0
return e'''
def forward(self, a):
#Move values forward through network
for w, b in zip(self.weights, self.biases):
a = self.sigmoid(np.dot(w, a) + b)
return a
'''def cost()
#Cross-entropy cost function
#C = -1 / len(train) * sum(y * ln(a) + (1 - y) * ln(1 - a), x) - one output
#C = -1 / len(train) * sum(y * ln(y) + (1 - y) * ln(1 - y), x) - one thing
#If y = y_1, y_2, ... are desired output values, a^L = a^L_1, a^L_2, ... are actual output values (in layer L)
#There are x data points, and j neurons in the layer
#C = -1 / len(train) * sum((sum(y_j * ln(a^L_j) + (1 - y_j) * ln(1 - a^L_j)), j), x)
def costPrime(self, outActivations, y):
#Derivative of cost function
return (outActivations - y)
#def evaluate(self, test)
#return the average cost of all the test data'''
def backprop(self, x, y):
#Compute gradient of the cost function
delw = [np.zeros(w.shape) for w in self.weights]
delb = [np.zeros(b.shape) for b in self.biases]
activation = x
activations = [x]
zs = []
for w, b in zip(self.weights, self.biases):
z = np.dot(w, activation) + b
zs.append(z)
activation = self.sigmoid(z)
activations.append(activation)
'''delta = self.costPrime(activations[-1], y) * self.sigmoidPrime(zs[-1])'''
delta = (self.cost).delta(zs[-1], activations[-1], y)
delw[-1] = np.dot(delta, activations[-2].transpose())
delb[-1] = delta
for i in xrange(2, self.nLayers):
z = zs[-i]
sp = self.sigmoidPrime(z)
delta = np.dot(self.weights[-i + 1].transpose(), delta) * sp
delw[-i] = np.dot(delta, activations[-i - 1].transpose())
delb = delta
return (delw, delb)
def updateMinBat(self, minBat, learnRate, lmda, n):
#Do gradient descent using backprop to a single mini-batch
delw = [np.zeros(w.shape) for w in self.weights]
delb = [np.zeros(b.shape) for b in self.biases]
for x, y in minBat:
deltadelw, deltadelb = self.backprop(x, y)
delw = [dw + ddw for dw, ddw in zip(delw, deltadelw)]
delb = [db + ddb for db, ddb in zip(delb, deltadelb)]
'''self.weights = [w - (learnRate / len(minBat)) * dw for w, dw in zip(self.weights, delw)]'''
self.weights = [(1 - learnRate * (lmda / n)) * w - (learnRate / len(minBat)) * dw for w, ddw in zip(self.weights, delw)]
self.biases = [b - (learnRate / len(miniBat)) * db for b, db in zip(self.biases, delb)]
'''def accuracy(self, data, convert = False):
if convert:
results = [(np.argmax(self.forward(x)), np.argmax(y)) for x, y in data]
else:
results = [(np.argmax(self.foward(x)), y) for x, y in data]
return sum(int(x == y) for x, y in results)'''
def totalCost(self, data, lmda, convert = False):
cost = 0.0
for x, y in data:
a = self.forward(x)
'''if convert: y = self.vectorResult(y)'''
cost += self.cost.fn(a, y) / len(data)
cost += 0.5 * (lmda / len(data)) * sum(np.linalg.norm(w) ** 2 for w in self.weights)
return cost
'''def stochGradDescent(self, train, test = None, epochs, minBatSize, learnRate, lmda = 0.0, monitorTrainCost = False, monitorTrainAccuracy = False, monitorTestCost = False, monitorTestAccuracy = False):'''
def stochGradDescent(self, train, test = None, epochs, minBatSize, learnRate, lmda = 0.0, monitorTrain = False, monitorTest = False):
#Apply gradient descent
n = len(train)
if test: nTest = len(test)
'''trainAccuracy, trainCost = [], []
testAccuracy, testCost = [], []'''
trainCost, testCost = [], []
'''#Notes
#We do not need accuracy!
#Finish making all this!
#Make sure that this works with our thing - check how the y-hat-values will come out!
#Generally make this a regression thing
#Figure out how it wants input and output data so we can format ours that way'''
for i in xrange(epochs):
random.shuffle(train)
minBats = [train[j:j + minBatSize] for j in xrange(0, n, minBatSize)]
for minBat in minBats:
self.updateMinBat(minBat, learnRate, lmda, len(train))
print("Epoch %s training complete" % i)
'''if monitorTrainAccuracy:
accuracy = self.accuracy(train, convert = True)
trainAccuracy.append(accuracy)
print("Accuracy with training data: {} / {}".format(accuracy, n))
if monitorTrainCost:
cost = self.totalCost(train, lmda)
trainCost.append(cost)
print("Cost with training data: {}".format(cost))
if monitorTestAccuracy:
accuracy = self.accuracy(test)
testAccuracy.append(accuracy)
print("Accuracy with testing data: {} / {}".format(accuracy, nTest))
if monitorTestCost:
cost = self.totalCost(test, lmda, convert = True)
testCost.append(cost)
print("Cost with testing data: {}".format(cost))'''
if monitorTrain:
cost = self.totalCost(train, lmda)
trainCost.append(cost)
print("Cost with training data: {}".format(cost))
if monitorTest:
cost = self.totalCost(test, lmda)
testCost.append(cost)
print("Cost with testing data: {}".format(cost))
print
return trainCost, trainAccuracy, testCost, testAccuracy
def save(self, filename):
data = {"sizes": self.sizes,
"weights": [w.tolist() for w in self.weights],
"biases": [b.tolist() for b in self.biases],
"cost": str(self.cost.__name__)}
f = open(filename, "w")
json.dump(data, f)
f.close()
def load(filename):
f = open(filename, "r")
data = json.load(f)
f.close()
cost = getattr(sys.modules[__name__], data["cost"])
net = Network(data["sizes"], cost = cost)
net.weights = [np.array(w) for w in data["weights"]]
net.biases = [np.array(b) for b in data["biases"]]
return net
|
FlowBoat/Flow-Tech-NeurAlgae
|
Versions/v0/NeurAlgae0.2.py
|
Python
|
gpl-3.0
| 7,825
|
[
"NEURON"
] |
675a6c1eb2808160cf35eac98160867162991eb6245e9a0b2376d85da751a789
|
''' Automated particle selection (AutoPicker)
This script (`ara-autopick`) was designed locate particles on a micrograph using template matching
(like LFCPick), yet incorporates several post-processing algorithms to reduce the number of noise
windows and contaminants.
It will not remove all contaminants but experiments have demonstrated that in many cases it removes enough
to achieve a high-resolution. To further reduce contamination, see ViCer (`ara-vicer`).
The AutoPicker script (`ara-autopick`) takes at minimum as input the micrograph and size of the particle in pixels and
writes out a set of coordinate files for each selected particle.
Tips
====
#. Filenames: Must follow the SPIDER format with a number before the extension, e.g. mic_00001.spi. Output files just require the number of digits: `--output sndc_0000.spi`
#. CCD micrographs - Use the `--invert` parameter to invert CCD micrographs. In this way, you can work directly with the original TIFF
#. Decimation - Use the `--bin-factor` parameter to reduce the size of the micrograph for more efficient processing. Your coordinates will be on the full micrograph.
#. Aggregration - Use `--remove-aggregates` to remove aggregation. This will remove all overlapping windows based on the window size controlled by `--window-mult`
#. Parallel Processing - Several micrographs can be run in parallel (assuming you have the memory and cores available). `-p 8` will run 8 micrographs in parallel.
Examples
========
.. sourcecode :: sh
# Run with a disk as a template on a raw film micrograph
$ ara-autopick mic_*.spi -o sndc_00001.spi -r 110 -w 312
# Run with a disk as a template on a raw film micrograph on 16 cores (1 micrograph per core in memory)
$ ara-autopick mic_*.spi -o sndc_00001.spi -r 110 -w 312 -p16
# Run with a disk as a template on a raw CCD micrograph
$ ara-autopick mic_*.spi -o sndc_00001.spi -r 110 -w 312 --invert
Critical Options
================
.. program:: ara-autopick
.. option:: -i <FILENAME1,FILENAME2>, --input-files <FILENAME1,FILENAME2>, FILENAME1 FILENAME2
List of filenames for the input micrographs.
If you use the parameters `-i` or `--inputfiles` they must be comma separated
(no spaces). If you do not use a flag, then separate by spaces. For a
very large number of files (>5000) use `-i "filename*"`
.. option:: -o <FILENAME>, --output <FILENAME>
Output filename for the coordinate file with correct number of digits (e.g. sndc_0000.spi)
.. option:: -p <FILENAME>, --param-file <FILENAME>
Filename for SPIDER parameter file describing a Cryo-EM experiment
Useful Options
===============
These options
.. program:: ara-autopick
.. option:: -w <int>, --worker-count <int>
Set the number of micrographs to process in parallel (keep in mind memory and processor restrictions)
.. option:: --invert
Invert the contrast of CCD micrographs
.. option:: --bin-factor <FLOAT>
Decimatation factor for the script: changes size of images, coordinates, parameters such as pixel_size or window unless otherwise specified
.. option:: --template <FILENAME>
An input filename of a template to use in template-matching. If this is not specified then a Gaussian smoothed disk is used of radius
`disk-mult*pixel-radius`.
Tunable Options
===============
Generally, these options do not need to be changed, their default parameters have proven successful on many datasets. However,
you may enounter a dataset that does not react properly and these options can be adjusted to get the best possible particle
selection.
.. program:: ara-autopick
.. option:: -d <float>, --dust-sigma <float>
Remove dark outlier pixels due to dust, suggested 3 for film 5 for CCD
.. option:: -x <float>, --xray-sigma <float>
Remove light outlier pixels due to electrons, suggested 3 for film 5 for CCD
.. option:: --disable-prune
Disable bad particle removal. This step is used to ensure the histogram has the proper bimodal distribution.
.. option:: --disable-threshold
Disable noise removal. This step is used to remove the large number of noise windows.
.. option:: --dist-mult <float>
This multipler scales the radius of the Gaussian smooth disk (which is used when no template is specified).
.. option:: --overlap-mult <float>
Multiplier for the amount of allowed overlap or inter-particle distance.
.. option:: --pca-mode <float>
Set the PCA mode for outlier removal: 0: auto, <1: energy, >=1: number of eigen vectors
.. option:: --selection-file <str>
Selection file for a subset of micrographs
Other Options
=============
This is not a complete list of options available to this script, for additional options see:
#. :ref:`Options shared by all scripts ... <shared-options>`
#. :ref:`Options shared by MPI-enabled scripts... <mpi-options>`
#. :ref:`Options shared by file processor scripts... <file-proc-options>`
#. :ref:`Options shared by SPIDER params scripts... <param-options>`
.. Created on Dec 21, 2011
.. codeauthor:: Robert Langlois <rl2528@columbia.edu>
'''
from ..core.app import program
from ..util import bench
from ..core.image import ndimage_utility, ndimage_filter
from ..core.learn import dimensionality_reduction
from ..core.learn import unary_classification
from ..core.metadata import format_utility, format, spider_utility, spider_params
from ..core.parallel import mpi_utility
from ..core.util import drawing
from ..core.image import ndimage_file
#import numpy # pylint: disable=W0611
import numpy.linalg
import scipy.spatial
import scipy.stats
import lfcpick
import logging
import os
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.DEBUG)
def process(filename, disk_mult_range, id_len=0, **extra):
'''Concatenate files and write to a single output file
Args:
filename : str
Input filename
id_len : int, optional
Maximum length of the ID
extra : dict
Unused key word arguments
Returns:
filename : str
Current filename
peaks : str
Coordinates found
'''
try:
spider_utility.update_spider_files(extra, spider_utility.spider_id(filename, id_len), 'good_coords', 'output', 'good', 'box_image')
except:
_logger.info("Skipping: %s - invalid SPIDER ID"%filename)
return filename, []
_logger.debug("Read micrograph")
try:
mic = lfcpick.read_micrograph(filename, **extra)
except ndimage_file.InvalidHeaderException:
_logger.warn("Skipping: %s - invalid header"%filename)
return filename, []
except:
if _logger.getEffectiveLevel() == logging.DEBUG:
_logger.exception("Skipping: %s - unknown error"%filename)
else:
_logger.warn("Skipping: %s - unknown error"%filename)
return filename, []
_logger.debug("Search micrograph")
try:
if len(disk_mult_range) > 0:
peaks = search_range(mic, disk_mult_range, **extra)
else:
peaks = search(mic, **extra)
except numpy.linalg.LinAlgError:
_logger.info("Skipping: %s"%filename)
return filename, []
_logger.debug("Write coordinates")
if len(peaks) == 0:
_logger.warn("Skipping: %s - no particles found"%filename)
return filename, []
coords = format_utility.create_namedtuple_list(peaks, "Coord", "id,peak,x,y",numpy.arange(1, len(peaks)+1, dtype=numpy.int)) if peaks.shape[0] > 0 else []
write_example(mic, coords, filename, **extra)
format.write(extra['output'], coords, default_format=format.spiderdoc)
return filename, peaks
def search(img, disable_prune=False, limit_template=0, limit=0, experimental=False, **extra):
''' Search a micrograph for particles using a template
Args:
img : array
Micrograph image
disable_prune : bool
Disable the removal of bad particles
extra : dict
Unused key word arguments
Returns:
peaks : array
List of peaks: height and coordinates
'''
template = lfcpick.create_template(**extra)
peaks = template_match(img, template, **extra)
peaks=cull_boundary(peaks, img.shape, **extra)
if len(peaks.squeeze())==0: return []
index = numpy.argsort(peaks[:,0])[::-1]
if index.shape[0] > limit_template: index = index[:limit_template]
index = index[::-1]
try:
peaks = peaks[index].copy().squeeze()
except:
try:
_logger.error("%d > %d"%(numpy.max(index), peaks.shape[0]))
except: pass
raise
if not disable_prune:
_logger.debug("Classify peaks")
if experimental:
sel = classify_windows_experimental(img, peaks, **extra)
else:
sel = classify_windows(img, peaks, **extra)
peaks = peaks[sel].copy()
peaks[:, 1:3] *= extra['bin_factor']
if limit>0:
return peaks[::-1][:limit]
return peaks[::-1]
def search_range(img, disk_mult_range, **extra):
''' Search a micrograph for particles using a template
Args:
img : array
Micrograph image
disk_mult_range : list
List of disk multipliers
extra : dict
Unused key word arguments
Returns:
peaks : array
List of peaks: height and coordinates
'''
#del extra['mask_mult']
coords_last = None
disk_mult_range = numpy.asarray(disk_mult_range)
#max_mult = disk_mult_range.max()
for disk_mult in disk_mult_range:
try:
#coords = search(img, mask_mult=float(disk_mult)/max_mult, **extra)[::-1]
coords = search(img, **extra)[::-1]
except:
_logger.error("Error for disk_mult=%f"%(disk_mult))
raise
coords[:, 1:3] /= extra['bin_factor']
coords_last = merge_coords(coords_last, coords, **extra) if coords_last is not None else coords
coords_last[:, 1:3] *= extra['bin_factor']
return coords_last[::-1]
def template_match(img, template_image, pixel_diameter, **extra):
''' Find peaks using given template in the micrograph
Args:
img : array
Micrograph
template_image : array
Template image
pixel_diameter : int
Diameter of particle in pixels
extra : dict
Unused key word arguments
Returns:
peaks : array
List of peaks including peak size, x-coordinate, y-coordinate
'''
_logger.debug("Filter micrograph")
img = ndimage_filter.gaussian_highpass(img, 0.25/(pixel_diameter/2.0), 2)
_logger.debug("Template-matching")
cc_map = ndimage_utility.cross_correlate(img, template_image)
_logger.debug("Find peaks")
peaks = lfcpick.search_peaks(cc_map, pixel_diameter, **extra)
if peaks.ndim == 1: peaks = numpy.asarray(peaks).reshape((len(peaks)/3, 3))
return peaks
def merge_coords(coords1, coords2, pixel_diameter, **extra):
'''
'''
pixel_radius = pixel_diameter/2
pixel_radius = pixel_radius*pixel_radius
selected = []
for i, f in enumerate(coords2):
dist = f[1:3]-coords1[:, 1:3]
numpy.square(dist, dist)
dist = numpy.sum(dist, axis=1)
if not (dist.min() < pixel_radius):
selected.append(i)
coords3 = numpy.zeros((coords1.shape[0]+len(selected), coords1.shape[1]))
coords3[:coords1.shape[0]]=coords1
coords3[coords1.shape[0]:]=coords2[selected]
return coords3
def cull_boundary(peaks, shape, boundary=[], bin_factor=1.0, **extra):
''' Remove peaks where the window goes outside the boundary of the
micrograph image.
Args:
peaks : array
List of peaks including peak size, x-coordinate, y-coordinate
shape : tuple
Number of rows, columns in micrograph
boundary : list
Margin for particle selection top, bottom, left, right
bin_factor : float
Image downsampling factor
extra : dict
Unused key word arguments
Returns:
peaks : array
List of peaks within the boundary including peak size, x-coordinate, y-coordinate
'''
if len(boundary) == 0: return peaks
boundary = numpy.asarray(boundary)/bin_factor
if len(boundary) > 1: boundary[1] = shape[1]-boundary[1]
if len(boundary) > 3: boundary[1] = shape[0]-boundary[3]
_logger.debug("Boundary: %s"%(str(boundary)))
j=0
for i in xrange(len(peaks)):
if peaks[i, 2] < boundary[0]: continue
elif len(boundary) > 1 and peaks[i, 2] > boundary[1]: continue
elif len(boundary) > 2 and peaks[i, 1] > boundary[2]: continue
elif len(boundary) > 3 and peaks[i, 1] > boundary[3]: continue
if i != j:
peaks[j, :] = peaks[i]
j+=1
_logger.debug("Kept: %d of %d"%(j, len(peaks)))
return peaks[:j]
def classify_windows(mic, scoords, dust_sigma=4.0, xray_sigma=4.0, disable_threshold=False, remove_aggregates=False, pca_mode=0, iter_threshold=1, real_space_nstd=2.5, nstd_pw=4.0, mask_mult=1.0, window=None, pixel_diameter=None, threshold_minimum=25, **extra):
''' Classify particle windows from non-particle windows
Args:
mic : array
Micrograph
scoords : list
List of potential particle coordinates
dust_sigma : float
Number of standard deviations for removal of outlier dark pixels
xray_sigma : float
Number of standard deviations for removal of outlier light pixels
disable_threshold : bool
Disable noise removal with threshold selection
remove_aggregates : bool
Set True to remove aggregates
pca_mode : float
Set the PCA mode for outlier removal: 0: auto, <1: energy, >=1: number of eigen vectors
iter_threshold : int
Number of times to repeat thresholding
real_space_nstd : float
Number of standard deviations for real-space PCA rejection
window : int
Size of the window in pixels
pixel_diameter : int
Diameter of particle in pixels
threshold_minimum : int
Minimum number of consider success
extra : dict
Unused key word arguments
Returns:
sel : numpy.ndarray
Bool array of selected good windows
'''
_logger.debug("Total particles: %d"%len(scoords))
radius = pixel_diameter/2*mask_mult
win_shape = (window, window)
dgmask = ndimage_utility.model_disk(int(radius)/2, win_shape)
masksm = dgmask
maskap = ndimage_utility.model_disk(1, win_shape)*-1+1
vfeat = numpy.zeros((len(scoords)))
data = numpy.zeros((len(scoords), numpy.sum(masksm>0.5)))
mask = ndimage_utility.model_disk(int(radius*1.2+1), (window, window)) * (ndimage_utility.model_disk(int(radius*0.9), win_shape)*-1+1)
datar=None
imgs=[]
_logger.debug("Windowing %d particles"%len(scoords))
for i, win in enumerate(ndimage_utility.for_each_window(mic, scoords, window, 1.0)):
if (i%10)==0: _logger.debug("Windowing particle: %d"%i)
#win=ndimage_filter.ramp(win)
imgs.append(win.copy())
ndimage_utility.replace_outlier(win, dust_sigma, xray_sigma, None, win)
ar = ndimage_utility.compress_image(ndimage_utility.normalize_standard(win, mask, False), mask)
if datar is None: datar=numpy.zeros((len(scoords), ar.shape[0]))
datar[i, :] = ar
if vfeat is not None:
vfeat[i] = numpy.sum(ndimage_utility.segment(ndimage_utility.dog(win, radius), 1024)*dgmask)
amp=ndimage_utility.fftamp(win)*maskap
ndimage_utility.vst(amp, amp)
ndimage_utility.normalize_standard(amp, masksm, out=amp)
ndimage_utility.compress_image(amp, masksm, data[i])
_logger.debug("Performing PCA")
feat, idx = dimensionality_reduction.pca(data, data, 1)[:2]
if feat.ndim != 2:
_logger.error("PCA bug: %s -- %s"%(str(feat.shape), str(data.shape)))
assert(idx > 0)
assert(feat.shape[0]>1)
_logger.debug("Eigen: %d"%idx)
try:
dsel = unary_classification.one_class_classification_old(feat, nstd=nstd_pw)
except:
dsel = numpy.ones(feat.shape[0], dtype=numpy.bool)
if _logger.getEffectiveLevel() == logging.DEBUG:
_logger.exception("Skipping contaminant removal - unknown error")
else:
_logger.warn("Skipping contaminant removal - unknown error")
feat, idx = dimensionality_reduction.pca(datar, datar, pca_mode)[:2]
if feat.ndim != 2:
_logger.error("PCA bug: %s -- %s"%(str(feat.shape), str(data.shape)))
assert(idx > 0)
assert(feat.shape[0]>1)
_logger.debug("Eigen: %d"%idx)
dsel = numpy.logical_and(dsel, unary_classification.robust_euclidean(feat, real_space_nstd))
_logger.debug("Removed by PCA: %d of %d -- %d"%(numpy.sum(dsel), len(scoords), idx))
if vfeat is not None:
sel = numpy.logical_and(dsel, vfeat == numpy.max(vfeat))
_logger.debug("Removed by Dog: %d of %d"%(numpy.sum(vfeat == numpy.max(vfeat)), len(scoords)))
else: sel = dsel
if not disable_threshold:
for i in xrange(1, iter_threshold):
tsel = classify_noise(scoords, dsel, sel, threshold_minimum)
dsel = numpy.logical_and(dsel, numpy.logical_not(tsel))
sel = numpy.logical_and(sel, numpy.logical_not(tsel))
tsel = classify_noise(scoords, dsel, sel, threshold_minimum)
_logger.debug("Removed by threshold %d of %d"%(numpy.sum(tsel), len(scoords)))
sel = numpy.logical_and(tsel, sel)
_logger.debug("Removed by all %d of %d"%(numpy.sum(sel), len(scoords)))
sel = numpy.logical_and(dsel, sel)
if remove_aggregates: classify_aggregates(scoords, window/2, sel)
#else: remove_overlap(scoords, radius, sel)
return sel
def classify_windows_experimental(mic, scoords, dust_sigma=4.0, xray_sigma=4.0, disable_threshold=False, remove_aggregates=False, pca_mode=0, iter_threshold=1, real_space_nstd=2.5, window=None, pixel_diameter=None, threshold_minimum=25, **extra):
''' Classify particle windows from non-particle windows
Args:
mic : array
Micrograph
scoords : list
List of potential particle coordinates
dust_sigma : float
Number of standard deviations for removal of outlier dark pixels
xray_sigma : float
Number of standard deviations for removal of outlier light pixels
disable_threshold : bool
Disable noise removal with threshold selection
remove_aggregates : bool
Set True to remove aggregates
pca_mode : float
Set the PCA mode for outlier removal: 0: auto, <1: energy, >=1: number of eigen vectors
iter_threshold : int
Number of times to repeat thresholding
real_space_nstd : float
Number of standard deviations for real-space PCA rejection
window : int
Size of the window in pixels
pixel_diameter : int
Diameter of particle in pixels
threshold_minimum : int
Minimum number of consider success
extra : dict
Unused key word arguments
Returns:
sel : numpy.ndarray
Bool array of selected good windows
'''
_logger.debug("Total particles: %d"%len(scoords))
radius = pixel_diameter/2
win_shape = (window, window)
dgmask = ndimage_utility.model_disk(radius/2, win_shape)
masksm = dgmask
maskap = ndimage_utility.model_disk(1, win_shape)*-1+1
vfeat = numpy.zeros((len(scoords)))
data = numpy.zeros((len(scoords), numpy.sum(masksm>0.5)))
mask = ndimage_utility.model_disk(int(radius*1.2+1), (window, window)) * (ndimage_utility.model_disk(int(radius*0.9), win_shape)*-1+1)
datar=None
imgs=[]
_logger.debug("Windowing %d particles"%len(scoords))
for i, win in enumerate(ndimage_utility.for_each_window(mic, scoords, window, 1.0)):
#if data is None:
# data = numpy.zeros((len(scoords), win.shape[0]/2-1))
if (i%10)==0: _logger.debug("Windowing particle: %d"%i)
#win=ndimage_filter.ramp(win)
imgs.append(win.copy())
ndimage_utility.replace_outlier(win, dust_sigma, xray_sigma, None, win)
#ar = ndimage_utility.compress_image(ndimage_utility.normalize_standard(win, normmask, True), mask)
ar = ndimage_utility.compress_image(ndimage_utility.normalize_standard(win, mask, False), mask)
if datar is None: datar=numpy.zeros((len(scoords), ar.shape[0]))
datar[i, :] = ar
if vfeat is not None:
vfeat[i] = numpy.sum(ndimage_utility.segment(ndimage_utility.dog(win, radius), 1024)*dgmask)
#amp = ndimage_utility.fourier_mellin(win)*maskap
amp=ndimage_utility.fftamp(win)*maskap
#amp = ndimage_utility.powerspec1d(win)
ndimage_utility.vst(amp, amp)
ndimage_utility.normalize_standard(amp, masksm, out=amp)
if 1 == 1:
ndimage_utility.compress_image(amp, masksm, data[i])
else:
data[i, :]=amp
_logger.debug("Performing PCA")
feat, idx = dimensionality_reduction.pca(data, data, 1)[:2]
if feat.ndim != 2:
_logger.error("PCA bug: %s -- %s"%(str(feat.shape), str(data.shape)))
assert(idx > 0)
assert(feat.shape[0]>1)
_logger.debug("Eigen: %d"%idx)
dsel = unary_classification.one_class_classification_old(feat)
feat, idx = dimensionality_reduction.pca(datar, datar, pca_mode)[:2]
if feat.ndim != 2:
_logger.error("PCA bug: %s -- %s"%(str(feat.shape), str(data.shape)))
assert(idx > 0)
assert(feat.shape[0]>1)
_logger.debug("Eigen: %d"%idx)
dsel = numpy.logical_and(dsel, unary_classification.robust_euclidean(feat, real_space_nstd))
_logger.debug("Removed by PCA: %d of %d -- %d"%(numpy.sum(dsel), len(scoords), idx))
if vfeat is not None:
sel = numpy.logical_and(dsel, vfeat == numpy.max(vfeat))
_logger.debug("Removed by Dog: %d of %d"%(numpy.sum(vfeat == numpy.max(vfeat)), len(scoords)))
else: sel = dsel
if not disable_threshold:
for i in xrange(1, iter_threshold):
tsel = classify_noise(scoords, dsel, sel, threshold_minimum)
dsel = numpy.logical_and(dsel, numpy.logical_not(tsel))
sel = numpy.logical_and(sel, numpy.logical_not(tsel))
tsel = classify_noise(scoords, dsel, sel, threshold_minimum)
_logger.debug("Removed by threshold %d of %d"%(numpy.sum(tsel), len(scoords)))
sel = numpy.logical_and(tsel, sel)
_logger.debug("Removed by all %d of %d"%(numpy.sum(sel), len(scoords)))
sel = numpy.logical_and(dsel, sel)
if remove_aggregates: classify_aggregates(scoords, window/2, sel)
#else: remove_overlap(scoords, radius, sel)
return sel
def outlier_rejection(feat, prob):
'''
'''
from sklearn.covariance import EmpiricalCovariance #MinCovDet
#real_cov
#linalg.inv(real_cov)
#robust_cov = MinCovDet().fit(feat)
robust_cov = EmpiricalCovariance().fit(feat)
dist = robust_cov.mahalanobis(feat - numpy.median(feat, 0))
cut = scipy.stats.chi2.ppf(prob, feat.shape[1])
return dist < cut
def classify_noise(scoords, dsel, sel=None, threshold_minimum=25):
''' Classify out the noise windows
Args:
scoords : list
List of peak and coordinates
dsel : numpy.ndarray
Good values selected by PCA
sel : numpy.ndarray
Total good values selected by PCA and DoG
threshold_minimum : int
Minimum number of consider success
Returns:
tsel : numpy.ndarray
Good values selected by Otsu
'''
if sel is None: sel = dsel
bcnt = 0
tsel=None
i=0
while bcnt < threshold_minimum and i < 10:
if tsel is not None: dsel = numpy.logical_and(numpy.logical_not(tsel), dsel)
th = unary_classification.otsu(scoords[dsel, 0], numpy.sum(dsel)/16)
tsel = scoords[:, 0] > th
bcnt = numpy.sum(numpy.logical_and(dsel, numpy.logical_and(tsel, sel)))
i+=1
return tsel
def classify_aggregates(scoords, offset, sel):
''' Remove all aggregated windows
Args:
scoords : list
List of peak and coordinates
offset : int
Window half-width
sel : numpy.ndarray
Good values selected by aggerate removal
Returns:
sel : numpy.ndarray
Good values selected by aggregate removal
'''
cutoff = offset*2
coords = scoords[sel, 1:3]
off = numpy.argwhere(sel).squeeze()
dist = scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(coords, 'euclidean'))
dist = numpy.unique(numpy.argwhere(numpy.logical_and(dist > 0, dist <= cutoff)).ravel())
sel[off[dist]] = 0
return sel
def remove_overlap(scoords, radius, sel):
''' Remove coordinates where the windows overlap by updating
the selection array (`sel`)
Args:
scoords : array
Selected coordinates
radius : int
Radius of the particle in pixels
sel : array
Output selection array that is modified in place
'''
coords = scoords[:, 1:3]
i=0
radius *= 1.1
idx = numpy.argwhere(sel).squeeze()
while i < len(idx):
dist = scipy.spatial.distance.cdist(coords[idx[i+1:]], coords[idx[i]].reshape((1, len(coords[idx[i]]))), metric='euclidean').ravel()
osel = dist < radius
if numpy.sum(osel) > 0:
if numpy.alltrue(scoords[idx[i], 0] > scoords[idx[i+1:], 0]):
sel[idx[i+1:][osel]]=0
idx = numpy.argwhere(sel).squeeze()
else:
sel[idx[i]]=0
else:
i+=1
def write_example(mic, coords, filename, box_image="", bin_factor=1.0, pixel_diameter=None, window=None, **extra):
''' Write out an image with the particles boxed
Args:
mic : array
Micrograph image
coords : list
List of particle coordinates
filename : str
Current micrograph filename to load benchmark if available
box_image : str
Output filename
bin_factor : float
Image downsampling factor
pixel_diameter : int
Diameter of particle in pixels
window : int
Size of window in pixels
extra : dict
Unused key word arguments
'''
if box_image == "" or not drawing.is_available(): return
radius = pixel_diameter/2.0
mic = ndimage_filter.filter_gaussian_highpass(mic, 0.25/radius, 2)
ndimage_utility.replace_outlier(mic, 4.0, 4.0, None, mic)
benchmark = bench.read_bench_coordinates(filename, **extra)
if benchmark is not None:
mic = drawing.draw_particle_boxes(mic, coords, window, bin_factor, ret_draw=True)
drawing.draw_particle_boxes_to_file(mic, benchmark, window, bin_factor, box_image, outline="#40ff40")
else:
drawing.draw_particle_boxes_to_file(mic, coords, window, bin_factor, box_image)
def initialize(files, param):
# Initialize global parameters for the script
if mpi_utility.is_root(**param):
_logger.info("Mask Multiplier: %f"%param['mask_mult'])
_logger.info("Peak limit: %f"%param['limit'])
if param['disable_prune']: _logger.info("Bad particle removal - disabled")
if param['disable_threshold']: _logger.info("Noise removal - disabled")
if param['remove_aggregates']: _logger.info("Aggregate removal - enabled")
if param['experimental']: _logger.info("Experimental contaminant removal - enabled")
if len(param['boundary']) > 0: _logger.info("Selection boundary: %s"%",".join([str(v) for v in param['boundary']]))
if param['iter_threshold']>1: _logger.info("Multiple-thresholds: %d"%param['iter_threshold'])
if param['box_image']!="":
try:os.makedirs(os.path.dirname(param['box_image']))
except: pass
return sorted(lfcpick.initialize(files, param))
def reduce_all(val, **extra):
# Process each input file in the main thread (for multi-threaded code)
return lfcpick.reduce_all(val, **extra)
def finalize(files, **extra):
# Finalize global parameters for the script
return lfcpick.finalize(files, **extra)
def supports(files, **extra):
''' Test if this module is required in the project workflow
Args:
files : list
List of filenames to test
extra : dict
Unused keyword arguments
Returns:
flag : bool
True if this module should be added to the workflow
'''
return True
def setup_options(parser, pgroup=None, main_option=False):
# Collection of options necessary to use functions in this script
from ..core.app.settings import OptionGroup
group = OptionGroup(parser, "AutoPick", "Options to control reference-free particle selection", id=__name__)
group.add_option("-d", dust_sigma=4.0, help="Remove dark outlier pixels due to dust, suggested 3 for film 5 for CCD", gui=dict(maximum=100, minimum=0, singleStep=1))
group.add_option("-x", xray_sigma=4.0, help="Remove light outlier pixels due to electrons, suggested 3 for film 5 for CCD", gui=dict(maximum=100, minimum=1, singleStep=1))
group.add_option("", disable_prune=False, help="Disable bad particle removal")
group.add_option("", disable_threshold=False, help="Disable noise thresholding")
group.add_option("", remove_aggregates=False, help="Use difference of Gaussian to remove possible aggergates (only use this option if there are many)")
group.add_option("", pca_mode=1.0, help="Set the PCA mode for outlier removal: 0: auto, <1: energy, >=1: number of eigen vectors", gui=dict(minimum=0.0))
group.add_option("", iter_threshold=1, help="Number of times to iterate thresholding")
group.add_option("", limit=2000, help="Limit on number of particles, 0 means give all", gui=dict(minimum=0, singleStep=1))
group.add_option("", limit_template=2000, help="Limit on number of particles after template-matching but before pruning, 0 means give all", gui=dict(minimum=0, singleStep=1))
group.add_option("", experimental=False, help="Use the latest experimental features!")
group.add_option("", real_space_nstd=2.5, help="Cutoff for real space PCA")
group.add_option("", boundary=[], help="Margin for particle selection top, bottom, left, right")
group.add_option("", threshold_minimum=25, help="Minimum number of particles for threshold selection")
group.add_option("", disk_mult_range=[], help="Experimental parameter to search range of template sizes")
group.add_option("", nstd_pw=4.0, help="Cutoff for Fourier space PCA")
group.add_option("", mask_mult=1.0, help="Change the size of the real space PCA mask")
pgroup.add_option_group(group)
if main_option:
pgroup.add_option("-i", "--micrograph-files", input_files=[], help="List of filenames for the input micrographs, e.g. mic_*.mrc", required_file=True, gui=dict(filetype="open"), regexp=spider_utility.spider_searchpath)
pgroup.add_option("-o", "--coordinate-file", output="", help="Output filename for the coordinate file with correct number of digits (e.g. sndc_0000.spi)", gui=dict(filetype="save"), required_file=True)
pgroup.add_option("", ctf_file="-", help="Input defocus file - currently ignored", required=True, gui=dict(filetype="open"))
pgroup.add_option("-s", selection_file="", help="Selection file for a subset of good micrographs", gui=dict(filetype="open"), required_file=False)
spider_params.setup_options(parser, pgroup, True)
# move next three options to benchmark
group = OptionGroup(parser, "Benchmarking", "Options to control benchmark particle selection", id=__name__)
group.add_option("-g", good="", help="Good particles for performance benchmark", gui=dict(filetype="open"))
group.add_option("", good_coords="", help="Coordindates for the good particles for performance benchmark", gui=dict(filetype="open"))
group.add_option("", good_output="", help="Output coordindates for the good particles for performance benchmark", gui=dict(filetype="save"))
group.add_option("", box_image="", help="Output filename for micrograph image with boxed particles - use `.png` as the extension", gui=dict(filetype="save"))
pgroup.add_option_group(group)
parser.change_default(log_level=3)
def change_option_defaults(parser):
''' Change the values to options specific to the script
'''
parser.change_default(bin_factor=4, window=1.35)
def check_options(options, main_option=False):
#Check if the option values are valid
from ..core.app.settings import OptionValueError
if len(options.boundary) > 0:
try: options.boundary = [int(v) for v in options.boundary]
except: raise OptionValueError, "Unable to convert boundary margin to list of integers"
if len(options.disk_mult_range) > 0:
try: options.disk_mult_range = [float(v) for v in options.disk_mult_range]
except: raise OptionValueError, "Unable to convert --disk-mult-range to list of floats"
def flags():
''' Get flags the define the supported features
Returns:
flags : dict
Supported features
'''
return dict(description = '''Automated particle selection (AutoPicker)
$ ls input-stack_*.spi
input-stack_0001.spi input-stack_0002.spi input-stack_0003.spi
Example: Unprocessed film micrograph
$ ara-autopick input-stack_*.spi -o coords_00001.dat -r 110
Example: Unprocessed CCD micrograph
$ ara-autopick input-stack_*.spi -o coords_00001.dat -r 110 --invert
''',
supports_MPI=True,
supports_OMP=True,
use_version=True)
def main():
'''Main entry point for this script
.. seealso::
arachnid.core.app.program.run_hybrid_program
'''
program.run_hybrid_program(__name__)
def dependents():
''' List of depenent modules
The autopick script depends on lfc for the template-matching
operations and uses many of the same parameters.
.. seealso::
arachnid.app.lfcpick
Returns:
modules : list
List of modules
'''
return [lfcpick]
if __name__ == "__main__": main()
|
ezralanglois/arachnid
|
arachnid/app/autopick.py
|
Python
|
gpl-2.0
| 36,461
|
[
"Gaussian"
] |
a6c9e173124dff1216a84b6c793e3fa78d020abaf8fc788720dce4b8c9eef70a
|
#############################################################################################
#
# Python script to demonstrate interaction with CASDA's SIAP v2 service.
#
# This script does a SIA 2 query to get the image cubes for a given sky location, and creates an
# async job to download all matched image cube files.
#
# Author: James Dempsey on 30 Mar 2016
#
# Written for python 2.7
# Note: astropy is available on galaxy via 'module load astropy'
# On other machines, try Anaconda https://www.continuum.io/downloads
#
#############################################################################################
from __future__ import print_function, division, unicode_literals
import argparse
import os
from astropy.coordinates import SkyCoord
from astropy import units
import casda
search_radius_degrees = 0.1
def parseargs():
"""
Parse the command line arguments
:return: An args map with the parsed arguments
"""
parser = argparse.ArgumentParser(description="Download all image cube files for a given sky location")
parser.add_argument("opal_username",
help="Your user name on the ATNF's online proposal system (normally an email address)")
parser.add_argument("-p", "--opal_password", help="Your password on the ATNF's online proposal system")
parser.add_argument("--password_file", help="The file holding your password for the ATNF's online proposal system")
parser.add_argument("ra", help="The right ascension of the sky region")
parser.add_argument("dec", help="The declination of the sky region")
parser.add_argument("destination_directory", help="The directory where the resulting files will be stored")
args = parser.parse_args()
return args
def download_images(ra, dec, username, password, destination_dir, max_images=3):
if ra.find(':') > -1 or ra.find('h') > -1:
sky_loc = SkyCoord(ra, dec, frame='icrs', unit=(units.hourangle, units.deg))
else:
sky_loc = SkyCoord(ra, dec, frame='icrs', unit=(units.deg, units.deg))
sky_region_query = 'CIRCLE %f %f %f' % (sky_loc.ra.degree, sky_loc.dec.degree, search_radius_degrees)
# 2) Use CASDA SIA2 (secure) to query for the images associated with the given sky location
print ("\n\n** Finding images and image cubes ... \n\n")
image_cube_votable = casda.find_images([sky_region_query, ], username, password)
results_array = image_cube_votable.get_table_by_id('results').array
# print results_array
# 3) For each of the image cubes, query datalink to get the secure datalink details
print ("\n\n** Retrieving datalink for each image and image cube...\n\n")
authenticated_id_tokens = []
for image_cube_result in results_array:
image_cube_id = image_cube_result['obs_publisher_did'].decode('utf-8')
# A test like the following can be used to further filter images retrieved.
# if row['dataproduct_subtype'].decode() == 'cont.restored.t0':
sync_url, authenticated_id_token = casda.get_service_link_and_id(image_cube_id, username,
password,
service='spectrum_generation_service',
destination_dir=destination_dir)
if authenticated_id_token is not None and (max_images <= 0 or len(authenticated_id_tokens) < max_images):
authenticated_id_tokens.append(authenticated_id_token)
if len(authenticated_id_tokens) == 0:
print ("\n\nNo image cubes available in sky location %f %f" % (sky_loc.ra.degree, sky_loc.dec.degree))
return 1
# 4) Create the async job
job_location = casda.create_async_soda_job(authenticated_id_tokens)
# 5) Run the async job
print ("\n\n** Starting the retrieval job...\n\n")
job_status = casda.run_async_job(job_location)
print ('\nJob finished with status %s address is %s\n\n' % (job_status, job_location))
if job_status != 'ERROR':
casda.download_all(job_location, destination_dir)
return 0
def main():
args = parseargs()
password = casda.get_opal_password(args.opal_password, args.password_file)
# 1) Create the destination directory
destination_dir = args.destination_directory + "/"
if not os.path.exists(destination_dir):
os.makedirs(destination_dir)
# Change this to choose which environment to use, prod is the default
# casda.use_dev()
return download_images(args.ra, args.dec, args.opal_username, password, destination_dir)
if __name__ == '__main__':
exit(main())
|
csiro-rds/casda-samples
|
siap.py
|
Python
|
apache-2.0
| 4,692
|
[
"Galaxy"
] |
0bebf2e09b2516fdffd8f5aee4c75f538739400ff2abe5cf375a9c062b648a75
|
#!/usr/bin/env python
##############################################################################################
#
#
# regrid_emissions_N96e.py
#
#
# Requirements:
# Iris 1.10, cf_units, numpy
#
#
# This Python script has been written by N.L. Abraham as part of the UKCA Tutorials:
# http://www.ukca.ac.uk/wiki/index.php/UKCA_Chemistry_and_Aerosol_Tutorials_at_vn10.4
#
# Copyright (C) 2015 University of Cambridge
#
# This is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# It is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You find a copy of the GNU Lesser General Public License at <http://www.gnu.org/licenses/>.
#
# Written by N. Luke Abraham 2016-10-20 <nla27@cam.ac.uk>
#
#
##############################################################################################
# preamble
import os
import time
import iris
import cf_units
import numpy
# --- CHANGE THINGS BELOW THIS LINE TO WORK WITH YOUR FILES ETC. ---
# name of file containing an ENDGame grid, e.g. your model output
# NOTE: all the fields in the file should be on the same horizontal
# grid, as the field used MAY NOT be the first in order of STASH
grid_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/um/archer/ag542/apm.pp/ag542a.pm1988dec'
#
# name of emissions file
emissions_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/emissions/combined_1960/0.5x0.5/combined_sources_SO2_low_1960_360d.nc'
#
# STASH code emissions are associated with
# 301-320: surface
# m01s00i058: SO2 low level surface emissions
#
# 321-340: full atmosphere
#
stash='m01s00i058'
# --- BELOW THIS LINE, NOTHING SHOULD NEED TO BE CHANGED ---
species_name='SO2_low'
# this is the grid we want to regrid to, e.g. N96 ENDGame
grd=iris.load(grid_file)[0]
grd.coord(axis='x').guess_bounds()
grd.coord(axis='y').guess_bounds()
# This is the original data
ems=iris.load_cube(emissions_file)
# make intersection between 0 and 360 longitude to ensure that
# the data is regridded correctly
nems = ems.intersection(longitude=(0, 360))
# make sure that we use the same coordinate system, otherwise regrid won't work
nems.coord(axis='x').coord_system=grd.coord_system()
nems.coord(axis='y').coord_system=grd.coord_system()
# now guess the bounds of the new grid prior to regridding
nems.coord(axis='x').guess_bounds()
nems.coord(axis='y').guess_bounds()
# now regrid
ocube=nems.regrid(grd,iris.analysis.AreaWeighted())
# now add correct attributes and names to netCDF file
ocube.var_name='emissions_'+str.strip(species_name)
ocube.long_name='SO2 low level emissions'
ocube.units=cf_units.Unit('kg m-2 s-1')
ocube.attributes['vertical_scaling']='surface'
ocube.attributes['um_stash_source']=stash
ocube.attributes['tracer_name']=str.strip(species_name)
# global attributes, so don't set in local_keys
# NOTE: all these should be strings, including the numbers!
# basic emissions type
ocube.attributes['emission_type']='2' # periodic time series
ocube.attributes['update_type']='2' # same as above
ocube.attributes['update_freq_in_hours']='120' # i.e. 5 days
ocube.attributes['um_version']='10.6' # UM version
ocube.attributes['source']='combined_sources_SO2_low_1960_360d.nc'
ocube.attributes['title']='Monthly surface emissions of sulfur dioxide for 1960'
ocube.attributes['File_version']='v1'
ocube.attributes['File_creation_date']=time.ctime(time.time())
ocube.attributes['grid']='regular 1.875 x 1.25 degree longitude-latitude grid (N96e)'
ocube.attributes['history']=time.ctime(time.time())+': '+__file__+' \n'+ocube.attributes['history']
ocube.attributes['institution']='Centre for Atmospheric Science, Department of Chemistry, University of Cambridge, U.K.'
ocube.attributes['reference']='Granier et al., Clim. Change, 2011; Lamarque et al., Atmos. Chem. Phys., 2010'
del ocube.attributes['NCO']
del ocube.attributes['file_creation_date']
del ocube.attributes['description']
# rename and set time coord - mid-month from 1960-Jan to 2020-Dec
# this bit is annoyingly fiddly
ocube.coord(axis='t').var_name='time'
ocube.coord(axis='t').standard_name='time'
ocube.coords(axis='t')[0].units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='360_day')
ocube.coord(axis='t').points=numpy.array([15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345])
# make z-direction.
zdims=iris.coords.DimCoord(numpy.array([0]),standard_name = 'model_level_number',
units='1',attributes={'positive':'up'})
ocube.add_aux_coord(zdims)
ocube=iris.util.new_axis(ocube, zdims)
# now transpose cube to put Z 2nd
ocube.transpose([1,0,2,3])
# make coordinates 64-bit
ocube.coord(axis='x').points=ocube.coord(axis='x').points.astype(dtype='float64')
ocube.coord(axis='y').points=ocube.coord(axis='y').points.astype(dtype='float64')
#ocube.coord(axis='z').points=ocube.coord(axis='z').points.astype(dtype='float64') # integer
ocube.coord(axis='t').points=ocube.coord(axis='t').points.astype(dtype='float64')
# for some reason, longitude_bounds are double, but latitude_bounds are float
ocube.coord('latitude').bounds=ocube.coord('latitude').bounds.astype(dtype='float64')
# add forecast_period & forecast_reference_time
# forecast_reference_time
frt=numpy.array([15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345], dtype='float64')
frt_dims=iris.coords.AuxCoord(frt,standard_name = 'forecast_reference_time',
units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='360_day'))
ocube.add_aux_coord(frt_dims,data_dims=0)
ocube.coord('forecast_reference_time').guess_bounds()
# forecast_period
fp=numpy.array([-360],dtype='float64')
fp_dims=iris.coords.AuxCoord(fp,standard_name = 'forecast_period',
units=cf_units.Unit('hours'),bounds=numpy.array([-720,0],dtype='float64'))
ocube.add_aux_coord(fp_dims,data_dims=None)
# add-in cell_methods
ocube.cell_methods = [iris.coords.CellMethod('mean', 'time')]
# set _FillValue
fillval=1e+20
ocube.data = numpy.ma.array(data=ocube.data, fill_value=fillval, dtype='float32')
# output file name, based on species
outpath='ukca_emiss_'+species_name+'.nc'
# don't want time to be cattable, as is a periodic emissions file
iris.FUTURE.netcdf_no_unlimited=True
# annoying hack to set a missing_value attribute as well as a _FillValue attribute
dict.__setitem__(ocube.attributes, 'missing_value', fillval)
# now write-out to netCDF
saver = iris.fileformats.netcdf.Saver(filename=outpath, netcdf_format='NETCDF3_CLASSIC')
saver.update_global_attributes(Conventions=iris.fileformats.netcdf.CF_CONVENTIONS_VERSION)
saver.write(ocube, local_keys=['vertical_scaling', 'missing_value','um_stash_source','tracer_name'])
# end of script
|
acsis-project/emissions
|
emissions/python/periodic_1960/regrid_SO2_low_emissions_n96e_360d_1960.py
|
Python
|
gpl-3.0
| 7,020
|
[
"NetCDF"
] |
f872a0ecf59e030d3b1040d78a4941aef28d2d62483097c3bf78863965b44b6e
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('visit', '0025_auto_20150614_1727'),
]
operations = [
migrations.AlterModelOptions(
name='caregiver',
options={'ordering': ('last_name', 'first_name')},
),
migrations.RemoveField(
model_name='caregiver',
name='name',
),
]
|
koebbe/homeworks
|
visit/migrations/0026_auto_20150614_1731.py
|
Python
|
mit
| 493
|
[
"VisIt"
] |
f1ed2ea841329c666504308129d3b8d4fd5cfffb09fd2320f33d830cb4854634
|
#!/usr/bin/env python
import logging
logging.basicConfig()
log = logging.getLogger("gui")
import wx
import os, sys
from util.trayErrors import NoUndoError
from xtal_panel import XtalPanel
from screen_panel import ScreenPanel
from score_panel import ScorePanel
from stock_panel import StockPanel
#from dataStructures.reporting import Report
wildcard = "Experiment Files (*.exp)|*.exp|" \
"Screen Files (*.screen)|*.screen|" \
"All files (*.*)|*.*"
txtwildcard = "cvs file (*.txt)|*.txt|"\
"All files (*.*)|*.*"
pdf_wildcard = "PDF Files (*.pdf)|*.pdf|" \
"All files (*.*)|*.*"
class MainFrame(wx.Frame):
"""
Main frame of the GUI holding the crystallization experiment.
"""
def __init__(self, data, controller, *args, **kwds):
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self ,*args, **kwds)
self.controller = controller
self.data = data
self.data.AddEventListener("frame",self)
# set program icon
ib=wx.IconBundle()
ib.AddIconFromFile(self.controller.path + "/files/images/icon.ico",wx.BITMAP_TYPE_ANY)
self.SetIcons(ib)
# Menu Bar
MenuBar = wx.MenuBar()
file_menu = wx.Menu()
menuItem = file_menu.Append(-1, "New","New file")
self.Bind(wx.EVT_MENU, self.OnNew, menuItem)
menuItem = file_menu.Append(-1, "Open","Open file")
self.Bind(wx.EVT_MENU, self.OnOpen, menuItem)
menuItem = file_menu.Append(-1, "Save","Save file")
self.Bind(wx.EVT_MENU, self.OnSave, menuItem)
menuItem = file_menu.Append(-1, "Save as","Save as different file")
self.Bind(wx.EVT_MENU, self.OnSaveAs, menuItem)
menuItem = file_menu.Append(-1, "PDF Report","Generate PDF Reports")
self.Bind(wx.EVT_MENU, self.OnReport, menuItem)
menuItem = file_menu.Append(-1, "E&xit","Terminate the program")
self.Bind(wx.EVT_MENU, self.OnQuit, menuItem)
MenuBar.Append(file_menu, "&File")
edit_menu = wx.Menu()
menuItem = edit_menu.Append(-1, "Undo", "Undo")
self.Bind(wx.EVT_MENU, self.OnUndo, menuItem)
menuItem = edit_menu.Append(-1, "Delete", "Delete Screen Solution")
self.Bind(wx.EVT_MENU, self.OnDelete, menuItem)
menuItem = edit_menu.Append(-1, "Change Screen Name", "Change Screen Name")
self.Bind(wx.EVT_MENU, self.OnNameChange, menuItem)
MenuBar.Append(edit_menu, "&Edit")
op_menu = wx.Menu()
menuItem = op_menu.Append(-1, "Initialize Reservoirs",\
"Initialize Reservoirs from Screen Solutions")
self.Bind(wx.EVT_MENU, self.OnInitReservoirs, menuItem)
menuItem = op_menu.Append(-1, "Remove unused reagents",\
"Remove unused stock reagents")
self.Bind(wx.EVT_MENU, self.OnRemoveUnusedStocks, menuItem)
menuItem = op_menu.Append(-1, "Import Stock Solutions",\
"Import Stock Solutions")
self.Bind(wx.EVT_MENU, self.OnImportStockSolutions, menuItem)
menuItem = op_menu.Append(-1, "Import Screen Solutions",\
"Import Screen Solutions")
self.Bind(wx.EVT_MENU, self.OnImportScreenSolutions, menuItem)
menuItem = op_menu.Append(-1, "Import Formulation from cvs file",\
"Import Formulation from cvs file")
self.Bind(wx.EVT_MENU, self.OnImportFromCvs, menuItem)
menuItem = op_menu.Append(-1, "Import stock reagents from cvs file",\
"Import stock reagents from cvs file")
self.Bind(wx.EVT_MENU, self.OnImportStocksFromCvs, menuItem)
menuItem = op_menu.Append(-1, "Import simplex screen from cvs file",\
"Import simplex screen from cvs file")
self.Bind(wx.EVT_MENU, self.OnImportScreenFromSimplexCvs, menuItem)
menuItem = op_menu.Append(-1, "Export Data",\
"Export Data for Analysis")
self.Bind(wx.EVT_MENU, self.OnGetAnalysisData, menuItem)
menuItem = op_menu.Append(-1, "Export Simplex Data",\
"Export Data for Simplex Analysis")
self.Bind(wx.EVT_MENU, self.OnGetSimplexData, menuItem)
MenuBar.Append(op_menu, "&Operations")
scr_menu = wx.Menu()
menuItem = scr_menu.Append(-1, "Random Generator",\
"Distribute reagents randomly based on frequency")
self.Bind(wx.EVT_MENU, self.OnRandomGeneration, menuItem)
MenuBar.Append(scr_menu, "&Screen")
self.SetMenuBar(MenuBar)
# end Menu Bar
# Event handling
self.Bind(wx.EVT_CLOSE, self.OnQuit)
if self.data:
# organize panels in notebook
self.notebook = wx.Notebook(self, -1, style=0)
# panel holding crystallization experiment and observations
self.xtal_panel = XtalPanel(self.notebook, self.data)
# panel for screen information
self.screen_panel = ScreenPanel(self.notebook, self.data)
# panel for stock solutions
self.stock_panel = StockPanel(self.notebook, self.data)
# panel for scoring system
self.score_panel = ScorePanel(self.notebook, self.data, self.controller)
self.__do_layout()
self.__set_properties()
else:
self.OpenTray()
def __set_properties(self):
self.SetTitle("pyTray - %s - %s" % (self.data.GetScreenName(), self.data.GetFilename()))
#get screen resolution
(x,y) = wx.DisplaySize()
#resize frame
self.SetSize((int(x*0.6),int(y*0.8)))
#center frame on screen
self.Centre(wx.BOTH)
self.SetSize((560, 830))
def __do_layout(self):
sizer_1 = wx.BoxSizer(wx.VERTICAL)
self.notebook.AddPage(self.xtal_panel, "Crystal Tray")
self.notebook.AddPage(self.screen_panel, "Screen Solutions")
self.notebook.AddPage(self.stock_panel, "Stock Solutions")
self.notebook.AddPage(self.score_panel, "Scoring System")
sizer_1.Add(self.notebook, 1, wx.EXPAND, 0)
self.SetAutoLayout(1)
self.SetSizer(sizer_1)
sizer_1.Fit(self)
sizer_1.SetSizeHints(self)
self.Fit()
self.Layout()
def OnDataChange(self):
self.SetTitle("pyTray - %s - %s" % (self.data.GetScreenName(), self.data.GetFilename()))
def OnDelete(self,event):
self.data.DeleteScreenSolutions()
def OnNameChange(self, event):
name = self.data.GetScreenName()
d = wx.GetTextFromUser("Please change screen name", "Screen name:", name, self)
if d:
self.data.SetScreenName(d)
self.data.UpdateEventListeners(["frame"],self)
def OnQuit(self,event):
self.controller.userData.SetValue("LastDir", os.getcwd())
if self.data:
if self.data.HasChanged():
answer = wx.MessageBox("There are unsaved changes. Are you sure you want to quit?", \
"Confirmation", wx.YES_NO)
if answer == wx.YES:
self.Destroy()
else:
self.Destroy()
else:
self.Destroy()
def OnOpen(self,event):
self.controller.OpenTray(self)
def OnInitReservoirs(self, event):
d = wx.MessageBox("Reservoir settings will be overwritten!\n Do you want to proceed?",\
"Confirm", wx.YES_NO, self)
if d == wx.YES:
self.data.InitReservoirsFromScreen()
self.xtal_panel.tray.RefreshWells()
self.screen_panel.tray.RefreshWells()
self.data.UpdateEventListeners(["front"],self)
def OnImportStockSolutions(self,event):
d = wx.MessageBox("Screen solutions and reservoir information will be deleted!",\
"Confirm", wx.YES_NO, self)
if d == wx.YES:
source = self.controller.GetTrayData(self)
self.data.ImportStockSolutions(source)
self.data.UpdateEventListeners(["reagents","screen"],self)
def OnImportScreenSolutions(self,event):
d = wx.MessageBox("Stock soutions, screen solutions and reservoir information will be deleted!",\
"Confirm", wx.YES_NO, self)
if d == wx.YES:
source = self.controller.GetTrayData(self)
self.data.ImportStockSolutions(source)
self.data.ImportScreenSolutions(source)
self.data.InitReservoirsFromScreen()
self.xtal_panel.tray.RefreshWells()
self.screen_panel.tray.RefreshWells()
self.data.UpdateEventListeners(["front","reagents","screen"],self)
def OnImportFromCvs(self,event):
self.ImportFromCvs(event,"formulations")
def OnImportStocksFromCvs(self,event):
self.ImportFromCvs(event,"stocks")
def OnImportScreenFromSimplexCvs(self,event):
self.ImportFromCvs(event,"screen")
def ImportFromCvs(self,event,type):
msg = "Screen solutions and reservoir information will be deleted!\n\n"\
"Importing from cvs (comma separated) text file.\n"\
"Lines starting with '#' will be ignored.\n"\
"Lines starting with '>' will be treated as headings.\n"
if type == "formulations":
msg += "The field 'SolutionNr' references the number of the solution in the original screen.\n"\
"The 'Position' field specifies the well position in the new screen."
d = wx.MessageBox(msg, "Confirm", wx.YES_NO, self)
if d == wx.YES:
dir = self.controller.userData.GetValue("LastDir")
dlg = wx.FileDialog(
self, message="Choose cvs file", defaultDir=dir,
defaultFile="", style=wx.OPEN | wx.CHANGE_DIR
)
# Show the dialog and retrieve the user response. If it is the OK response,
# process the data.
if dlg.ShowModal() == wx.ID_OK:
self.controller.userData.SetValue("LastDir", os.getcwd())
# This returns a Python list of files that were selected.
path = dlg.GetPath()
if os.access(path, os.F_OK):
from util.converter import Converter
converter = Converter(path, self.data)
if type == "formulations":
converter.convert()
elif type == "stocks":
converter.convertStocks()
elif type == "screen":
converter.convertSimplexScreen()
self.data.UpdateEventListeners(["reagents","screen"],self)
self.data.InitReservoirsFromScreen()
self.xtal_panel.tray.RefreshWells()
self.screen_panel.tray.RefreshWells()
self.data.UpdateEventListeners(["front"],self)
def OnGetAnalysisData(self, event):
self.SaveAs(self.data.SaveAnalysisData,txtwildcard)
wx.MessageBox("Data has been exported", "Message", wx.OK)
def OnGetSimplexData(self, event):
self.SaveAs(self.data.SaveSimplexData,txtwildcard)
wx.MessageBox("Data has been exported", "Message", wx.OK)
def OnNew(self, event):
self.controller.NewTray(self)
def OnRandomGeneration(self, event):
from util.screen_generator import ScreenGenerator
generator = ScreenGenerator(self.data)
generator.CreateRandomScreen()
def OnRemoveUnusedStocks(self,event):
self.data.RemoveUnusedReagents()
self.data.UpdateEventListeners(["reagents"],self)
def OnSave(self,event):
if self.data.GetFilename() != "Untitled":
self.data.Save()
else:
self.OnSaveAs(event)
pass
def OnSaveAs(self,event):
self.SaveAs(self.data.Save, wildcard)
def OnReport(self,event):
from dataStructures.reporting import Report
dir = self.controller.userData.GetValue("LastDir")
choiceList = ["Result Sheet", "Scoring Sheet", "Scoring Graphics", "Screen Solutions", "Screen Pipetting Scheme", "Stock Solutions"]
partList = ["scoreList", "emptyScoringSheet","scoreGraphics","screenSols", "screenVolumes","stockSols"]
parts = {}
choiceBox = wx.MultiChoiceDialog(self, "Choose for printing:", "Choicebox",choiceList)
if choiceBox.ShowModal() == wx.ID_OK:
choices = choiceBox.GetSelections()
for i in range(len(partList)):
if choices.count(i) > 0:
if partList[i] == "screenVolumes":
d = wx.GetTextFromUser ("Please enter the volume with unit.\nFor example: \"10 ml\"", "Input Volume",\
"10 ml", self,)
if d:
screenVolume = d.split(' ')
try:
screenVolume[0] = float(screenVolume[0])
if len(screenVolume) == 2:
parts[partList[i]] = screenVolume
else:
parts[partList[i]] = 0
except:
parts[partList[i]] = 0
print "Screen Volume Input Invalid"
else:
parts[partList[i]] = 0
else:
parts[partList[i]] = 1
else:
parts[partList[i]] = 0
dlg = wx.FileDialog(
self, message="Save Report to", defaultDir=dir,
defaultFile=self.data.GetFilename().split('.')[0], wildcard=pdf_wildcard, style=wx.SAVE | wx.CHANGE_DIR
)
# Show the dialog and retrieve the user response. If it is the OK response,
# process the data.
if dlg.ShowModal() == wx.ID_OK:
self.controller.userData.SetValue("LastDir", os.getcwd())
# This returns a Python list of files that were selected.
path = dlg.GetPath()
if os.access(path, os.F_OK):
d = wx.MessageBox("Overwrite existing file?", "Confirm", wx.YES_NO, self)
if d == wx.YES:
report = Report(self.data,parts, self.xtal_panel.tray, path)
gen = report.compile()
else:
report = Report(self.data,parts, self.xtal_panel.tray, path)
gen = report.compile()
dlg.Destroy()
if gen[0]:
#wx.MessageBox(gen[1], "Message", wx.OK)
os.startfile(path)
else:
wx.MessageBox(gen[1], "Message", wx.OK)
def OnUndo(self, event):
try:
self.xtal_panel.tray.ClearWells()
self.screen_panel.tray.ClearWells()
self.data.Undo()
self.screen_panel.tray.RefreshWells()
self.xtal_panel.tray.RefreshWells()
except NoUndoError, e:
d = wx.MessageBox("No more undo actions, sorry.", "Warning", wx.OK, self)
def SaveAs(self, saveFunction, wildcard):
dir = self.controller.userData.GetValue("LastDir")
dlg = wx.FileDialog(
self, message="Save as", defaultDir=dir,
defaultFile="", wildcard=wildcard, style=wx.SAVE | wx.CHANGE_DIR
)
# Show the dialog and retrieve the user response. If it is the OK response,
# process the data.
if dlg.ShowModal() == wx.ID_OK:
self.controller.userData.SetValue("LastDir", os.getcwd())
# This returns a Python list of files that were selected.
path = dlg.GetPath()
if os.access(path, os.F_OK):
d = wx.MessageBox("Overwrite existing file?", "Confirm", wx.YES_NO, self)
if d == wx.YES:
saveFunction(path)
else:
saveFunction(path)
dlg.Destroy()
self.data.UpdateEventListeners(["frame"],self)
"""
Testing code
****************************************
"""
import controller
class GuiApp(wx.App):
def OnInit(self):
self.controller = controller.Controller(["U:/Personal/Programming/pyTray/src/","//xtend/biopshare/Thomas/Screens/pyTray_Files/test.exp"], self)
self.controller.Start()
return True
if __name__ == "__main__":
#if True:
app = GuiApp(0)
profiling = False
if profiling:
import profile
profile.run('app.MainLoop()', 'gui_profile')
else:
app.MainLoop()
|
tschalch/pyTray
|
src/gui/main_frame.py
|
Python
|
bsd-3-clause
| 17,121
|
[
"CRYSTAL"
] |
12f2910ff4558ad71a6052cb175564c818bccc3479566bea665c232c443be79b
|
#!/usr/bin/python3
# Author: Sari Sabban
# Email: sari.sabban@gmail.com
# URL: https://github.com/sarisabban
#
# Created By: Sari Sabban
# Created Date: 13 January 2017
#
# Modified By: Sari Sabban
# Modified Date: 3 February 2017
#
# Modified By: Sari Sabban
# Modified Date: 10 February 2017
import re
import itertools
import numpy
import sys
from Bio.PDB import *
print('File must be in the same directory as this script.')
filename=sys.argv[1]
#This script calculates and prints out the SASA of each amino acid using DSSP from within biopython.
y=None
w=None
sasalist=list()
p=PDBParser()
structure=p.get_structure('X',filename)
model=structure[0]
dssp=DSSP(model,filename,acc_array='Wilke')
for x in dssp:
if x[1]=='A':
y=129*(x[3])
w='ALA'
elif x[1]=='V':
y=174*(x[3])
w='VAL'
elif x[1]=='I':
y=197*(x[3])
w='ILE'
elif x[1]=='L':
y=201*(x[3])
w='LEU'
elif x[1]=='M':
y=224*(x[3])
w='MET'
elif x[1]=='P':
y=159*(x[3])
w='PRO'
elif x[1]=='Y':
y=263*(x[3])
w='TYR'
elif x[1]=='F':
y=240*(x[3])
w='PHE'
elif x[1]=='W':
y=285*(x[3])
w='TRP'
elif x[1]=='R':
y=274*(x[3])
w='ARG'
elif x[1]=='N':
y=195*(x[3])
w='ASN'
elif x[1]=='C':
y=167*(x[3])
w='CYC'
elif x[1]=='Q':
y=225*(x[3])
w='GLN'
elif x[1]=='E':
y=223*(x[3])
w='GLU'
elif x[1]=='G':
y=104*(x[3])
w='GLY'
elif x[1]=='H':
y=224*(x[3])
w='HIS'
elif x[1]=='K':
y=236*(x[3])
w='LYS'
elif x[1]=='S':
y=155*(x[3])
w='SER'
elif x[1]=='T':
y=172*(x[3])
w='THR'
elif x[1]=='D':
y=193*(x[3])
w='ASP'
z=str(x[0])+' '+str(w)+' '+str(y)
sasalist.append(z)
sasainfo='\n'.join(sasalist)
#This script prints out the secondary structure each amino acid belongs to using DSSP from within biopython.
sslist=list()
p = PDBParser()
structure = p.get_structure('X', filename)
model = structure[0]
dssp = DSSP(model, filename)
for x in dssp:
if x[2]=='G' or x[2]=='H' or x[2]=='I':
y='H'
elif x[2]=='B' or x[2]=='E':
y='S'
else:
y='L'
sslist.append(y)
aa_ss=''.join(sslist)
sasainf=sasainfo.splitlines()
somelist=list()
for x,z in zip(sasainf,aa_ss):
y=x.strip().split()
somelist.append(y[0]+' '+z+' '+y[1]+' '+y[2])
data='\n'.join(somelist)
#Make a list of tuples, each containing an amino acid's secondary structure as key, and its corresponding SASA result as value. The amino acid sequence order is maintained.
lis=list()
value=None
key=None
for line in data.splitlines():
line=line.strip()
find_value=re.findall('\d+\.\d+$',line)
for x in find_value:
value=float(x)
find_key=re.findall('\s[HSL]\s',line)
for y in find_key:
key=y.strip()
lis.append((key,value))
#Label each amino acid depending on its SASA position according to the parameters highlighted in the paper by (Koga et.al., 2012 - PMID: 23135467). The parameters are as follows:
#Surface:
# Helix or Sheet: SASA=>60
# Loop: SASA=>40
#
#Boundry:
# Helix or Sheet: 15<SASA<60
# Loop: 25<SASA<40
#
#Core:
# Helix or Sheet: SASA=<15
# Loop: SASA=<25
core=list()
boundary=list()
surface=list()
count=0
SASA=list()
for x,y in lis:
count=count+1
if y<=25 and x=='L':
core.append(count)
SASA.append('C')
elif 25<y<40 and x=='L':
boundary.append(count)
SASA.append('B')
elif y>=40 and x=='L':
surface.append(count)
SASA.append('S')
elif y<=15 and x=='H':
core.append(count)
SASA.append('C')
elif 15<y<60 and x=='H':
boundary.append(count)
SASA.append('B')
elif y>=60 and x=='H':
surface.append(count)
SASA.append('S')
elif y<=15 and x=='S':
core.append(count)
SASA.append('C')
elif 15<y<60 and x=='S':
boundary.append(count)
SASA.append('B')
elif y>=60 and x=='S':
surface.append(count)
SASA.append('S')
#Convert the amino acid short names in the variable sasa into an amino acid sequence.
aminos1=list()
for line in data.splitlines():
line=line.strip()
AA=re.findall('\S[A-Z]\S',line)
aminos1.append(AA)
aminos2=list()
for x in aminos1:
for y in x:
if y=='ALA':
aminos2.append('A')
if y=='VAL':
aminos2.append('V')
if y=='ILE':
aminos2.append('I')
if y=='LEU':
aminos2.append('L')
if y=='MET':
aminos2.append('M')
if y=='PRO':
aminos2.append('P')
if y=='TYR':
aminos2.append('Y')
if y=='PHE':
aminos2.append('F')
if y=='TRP':
aminos2.append('W')
if y=='GLY':
aminos2.append('G')
if y=='CYS':
aminos2.append('C')
if y=='GLN':
aminos2.append('Q')
if y=='ASN':
aminos2.append('N')
if y=='THR':
aminos2.append('T')
if y=='SER':
aminos2.append('S')
if y=='ARG':
aminos2.append('R')
if y=='HIS':
aminos2.append('H')
if y=='LYS':
aminos2.append('K')
if y=='ASP':
aminos2.append('D')
if y=='GLU':
aminos2.append('E')
#Print the custom PYMOL commands to select the different protein layers according to each amino acid's SASA.
print('Surface Amino Acids Command:')
print('select Surf, resi','+'.join(str(z) for z in surface),'\n')
print('Boundary Amino Acids Command:')
print('select Bound, resi','+'.join(str(z) for z in boundary),'\n')
print('Core Amino Acids Command:')
print('select Core, resi','+'.join(str(z) for z in core),'\n')
#Calculate which amino acids are in the wrong layer. The rules this script are from the following Rosetta LayerDesign Protocol (source: goo.gl/NsQubf) and they are as follows:
#Surface
# Loop: PGNQSTDERKH
# Helix: QEKH
# Strand: QTY
#
#Boundary
# Loop: AVILFYWGNQSTPDEHR
# Helix: AVILWQEKFM
# Strand: AVILFYWQTM
#
#Core:
# Loop: AVILPFWM
# Helix: AVILFW
# Strand: AVILFWM
d=list()
def pairwise(iterable):
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
mutate=list()
for a,(b1,b2),c in zip(SASA,(pairwise(lis)),aminos2):
if a=='S' and b2[0]=='L' and (c=='P' or c=='G' or c=='N' or c=='Q' or c=='S' or c=='T' or c=='D' or c=='E' or c=='R' or c=='K' or c=='H'):
mutate.append(' ')
elif a=='B' and b2[0]=='L' and (c=='A' or c=='V' or c=='I' or c=='L' or c=='F' or c=='Y' or c=='W' or c=='G' or c=='N' or c=='Q' or c=='S' or c=='T' or c=='P' or c=='D' or c=='E' or c=='H' or c=='R'):
mutate.append(' ')
elif a=='C' and b2[0]=='L' and (c=='A' or c=='V' or c=='I' or c=='L' or c=='P' or c=='F' or c=='W' or c=='M'):
mutate.append(' ')
elif a=='S' and b2[0]=='H' and (c=='Q' or c=='E' or c=='K' or c=='H'):
mutate.append(' ')
elif a=='B' and b2[0]=='H' and (c=='A' or c=='V' or c=='I' or c=='L' or c=='W' or c=='Q' or c=='E' or c=='K' or c=='F' or c=='M'):
mutate.append(' ')
elif a=='C' and b2[0]=='H' and (c=='A' or c=='V' or c=='I' or c=='L' or c=='F' or c=='W'):
mutate.append(' ')
elif a=='S' and b2[0]=='S' and (c=='Q' or c=='T' or c=='Y'):
mutate.append(' ')
elif a=='B' and b2[0]=='S' and (c=='A' or c=='V' or c=='I' or c=='L' or c=='F' or c=='Y' or c=='W' or c=='Q' or c=='T' or c=='M'):
mutate.append(' ')
elif a=='C' and b2[0]=='S' and (c=='A' or c=='V' or c=='I' or c=='L' or c=='F' or c=='W' or c=='M'):
mutate.append(' ')
else:
mutate.append('*')
#Print alignment of SASA sequence, secondary structure sequence, amino acids sequence, and indicate with * which amino acids are in the wrong layer and should be mutated.
print('---------------\n')
print('SASA:\t',''.join(str(x) for x in SASA))
print('SS:\t',''.join(str(x) for x,y in lis))
print('AA:\t',''.join(str(x) for x in aminos2))
print('Mutate:\t',''.join(str(x) for x in mutate))
|
sarisabban/RefineAssist
|
RefineAssist3.py
|
Python
|
mit
| 7,363
|
[
"Biopython",
"PyMOL"
] |
2a12b95b0f05ef3125ad3c2d1dcccb9495b8d4fc489febb90db1605c50bc6bbf
|
#
# This source file is part of appleseed.
# Visit https://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2016-2019 Esteban Tovagliari, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Maya imports.
import maya.OpenMaya as om
import maya.cmds as mc
import maya.mel as mel
import pymel.core as pm
# appleseedMaya imports.
from logger import logger
def createGlobalNodes():
if mc.objExists("appleseedRenderGlobals"):
return
sel = mc.ls(sl=True)
mc.createNode(
"appleseedRenderGlobals",
name="appleseedRenderGlobals",
shared=True,
skipSelect=True)
mc.lockNode("appleseedRenderGlobals")
mc.select(sel, replace=True)
logger.debug("Created appleseed render global node")
def createRenderTabsMelProcedures():
pm.mel.source("createMayaSoftwareCommonGlobalsTab.mel")
mel.eval('''
global proc appleseedUpdateCommonTabProcedure()
{
updateMayaSoftwareCommonGlobalsTab();
python("import appleseedMaya.renderGlobals");
python("appleseedMaya.renderGlobals.postUpdateCommonTab()");
}
'''
)
mel.eval('''
global proc appleseedCreateAppleseedMainTabProcedure()
{
python("import appleseedMaya.renderGlobals");
python("appleseedMaya.renderGlobals.g_appleseedMainTab.create()");
}
'''
)
mel.eval('''
global proc appleseedUpdateAppleseedMainTabProcedure()
{
python("import appleseedMaya.renderGlobals");
python("appleseedMaya.renderGlobals.g_appleseedMainTab.update()");
}
'''
)
mel.eval('''
global proc appleseedCreateAppleseedLightingTabProcedure()
{
python("import appleseedMaya.renderGlobals");
python("appleseedMaya.renderGlobals.g_appleseedLightingTab.create()");
}
'''
)
mel.eval('''
global proc appleseedUpdateAppleseedLightingTabProcedure()
{
python("import appleseedMaya.renderGlobals");
python("appleseedMaya.renderGlobals.g_appleseedLightingTab.update()");
}
'''
)
mel.eval('''
global proc appleseedCreateAppleseedOutputTabProcedure()
{
python("import appleseedMaya.renderGlobals");
python("appleseedMaya.renderGlobals.g_appleseedOutputTab.create()");
}
'''
)
mel.eval('''
global proc appleseedUpdateAppleseedOutputTabProcedure()
{
python("import appleseedMaya.renderGlobals");
python("appleseedMaya.renderGlobals.g_appleseedOutputTab.update()");
}
'''
)
mel.eval('''
global proc appleseedCreateAppleseedSystemTabProcedure()
{
python("import appleseedMaya.renderGlobals");
python("appleseedMaya.renderGlobals.g_appleseedSystemTab.create()");
}
'''
)
mel.eval('''
global proc appleseedUpdateAppleseedSystemTabProcedure()
{
python("import appleseedMaya.renderGlobals");
python("appleseedMaya.renderGlobals.g_appleseedSystemTab.update()");
}
'''
)
def renderSettingsBuiltCallback(renderer):
logger.debug("appleseedRenderSettingsBuilt called!")
pm.renderer(
"appleseed",
edit=True,
addGlobalsTab=(
"Common",
"createMayaSoftwareCommonGlobalsTab",
"appleseedUpdateCommonTabProcedure"
)
)
pm.renderer(
"appleseed",
edit=True,
addGlobalsTab=(
"appleseed",
"appleseedCreateAppleseedMainTabProcedure",
"appleseedUpdateAppleseedMainTabProcedure"
)
)
pm.renderer(
"appleseed",
edit=True,
addGlobalsTab=(
"Lighting",
"appleseedCreateAppleseedLightingTabProcedure",
"appleseedUpdateAppleseedLightingTabProcedure"
)
)
pm.renderer(
"appleseed",
edit=True,
addGlobalsTab=(
"Output",
"appleseedCreateAppleseedOutputTabProcedure",
"appleseedUpdateAppleseedOutputTabProcedure"
)
)
pm.renderer(
"appleseed",
edit=True,
addGlobalsTab=(
"System",
"appleseedCreateAppleseedSystemTabProcedure",
"appleseedUpdateAppleseedSystemTabProcedure"
)
)
g_nodeAddedCallbackID = None
g_nodeRemovedCallbackID = None
g_environmentLightsList = []
APPLESEED_ENVIRONMENT_LIGHTS = [
"appleseedSkyDomeLight",
"appleseedPhysicalSkyLight"
]
g_columnWidth = 400
g_subColumnWidth = g_columnWidth - 20
g_margin = 2
def __nodeAdded(node, data):
depNodeFn = om.MFnDependencyNode(node)
nodeType = depNodeFn.typeName()
if nodeType in APPLESEED_ENVIRONMENT_LIGHTS:
logger.debug("Added or removed appleseed environment light")
global g_environmentLightsList
g_environmentLightsList.append(depNodeFn.name())
g_appleseedMainTab.updateEnvLightControl()
def __nodeRemoved(node, data):
depNodeFn = om.MFnDependencyNode(node)
nodeType = depNodeFn.typeName()
if nodeType in APPLESEED_ENVIRONMENT_LIGHTS:
logger.debug("Removed appleseed environment light")
global g_environmentLightsList
g_environmentLightsList.remove(depNodeFn.name())
g_appleseedMainTab.updateEnvLightControl()
def addRenderGlobalsScriptJobs():
logger.debug("Adding render globals script jobs")
global g_nodeAddedCallbackID
assert g_nodeAddedCallbackID is None
g_nodeAddedCallbackID = om.MDGMessage.addNodeAddedCallback(__nodeAdded)
global g_nodeRemovedCallbackID
assert g_nodeRemovedCallbackID is None
g_nodeRemovedCallbackID = om.MDGMessage.addNodeRemovedCallback(
__nodeRemoved)
# This is evalDeferred so it doesn't get
# called before createMayaSoftwareCommonGlobalsTab
python_script = "import appleseedMaya.renderGlobals; appleseedMaya.renderGlobals.currentRendererChanged()"
mc.scriptJob(
attributeChange=[
"defaultRenderGlobals.currentRenderer",
lambda: mc.evalDeferred(python_script, lowestPriority=True),
]
)
# For fixing the render globals common tab when opening new scene
# and the default renderer is appleseed
mc.scriptJob(
event=[
'NewSceneOpened',
lambda: mc.evalDeferred(python_script, lowestPriority=True),
]
)
# For fixing the render globals common tab on initial startup of maya
# when the default renderer is appleseed
mc.evalDeferred(python_script, lowestPriority=True)
def removeRenderGlobalsScriptJobs():
global g_nodeAddedCallbackID
assert g_nodeAddedCallbackID is not None
om.MMessage.removeCallback(g_nodeAddedCallbackID)
g_nodeAddedCallbackID = None
global g_nodeRemovedCallbackID
assert g_nodeRemovedCallbackID is not None
om.MMessage.removeCallback(g_nodeRemovedCallbackID)
g_nodeRemovedCallbackID = None
logger.debug("Removed render globals script jobs")
def imageFormatChanged():
logger.debug("imageFormatChanged called")
# Since we only support two file formats atm., we can hardcode things.
# 32 is the format code for png, 51 is custom image format.
# We also update the extension attribute (used in the file names preview).
newFormat = mc.getAttr("appleseedRenderGlobals.imageFormat")
if newFormat == 0: # EXR
mc.setAttr("defaultRenderGlobals.imageFormat", 51)
mc.setAttr("defaultRenderGlobals.imfkey", "exr", type="string")
mc.optionMenuGrp("imageMenuMayaSW", edit=True, select=newFormat + 1)
elif newFormat == 1: # PNG
mc.setAttr("defaultRenderGlobals.imageFormat", 32)
mc.setAttr("defaultRenderGlobals.imfkey", "png", type="string")
mc.optionMenuGrp("imageMenuMayaSW", edit=True, select=newFormat + 1)
else:
raise RuntimeError("Unknown render global image file format")
def currentRendererChanged():
newRenderer = mel.eval("currentRenderer()")
logger.debug("currentRendererChanged called, new renderer = %s", newRenderer)
if newRenderer != "appleseed":
return
# Make sure our render globals node exists.
createGlobalNodes()
# If the render globals window does not exist, create it.
if not mc.window("unifiedRenderGlobalsWindow", exists=True):
mel.eval("unifiedRenderGlobalsWindow")
if pm.versions.current() >= 2017000:
mc.workspaceControl("unifiedRenderGlobalsWindow", edit=True, visible=False)
else:
mc.window("unifiedRenderGlobalsWindow", edit=True, visible=False)
# This can happen if currentRendererChanged is called too soon during startup
# and unifiedRenderGlobalsWindow isn't complete or delayed for some reason.
# Known to happen if default renderer is appleseed and the scene is opened as
# a commandline argument. In that case the NewSceneOpened scriptjob will call the
# currentRendererChanged function again later.
if not mc.optionMenuGrp('imageMenuMayaSW', q=True, ex=True):
logger.warn("imageMenuMayaSW does not exists yet")
return
# "Customize" the image formats menu.
mc.setParent("unifiedRenderGlobalsWindow")
mel.eval("setParentToCommonTab;")
mc.setParent("imageFileOutputSW")
mc.setParent("imageMenuMayaSW")
mc.setParent("..")
parent = mc.setParent(q=True)
# Remove the menu callback and the menu items.
mel.eval('optionMenuGrp -e -changeCommand "" imageMenuMayaSW;')
items = mc.optionMenuGrp("imageMenuMayaSW", q=True, itemListLong=True)
for item in items:
mc.deleteUI(item)
# Add the formats we support.
menu = parent + "|imageMenuMayaSW|OptionMenu"
mc.menuItem(parent=menu, label="OpenEXR (.exr)", data=0)
mc.menuItem(parent=menu, label="PNG (.png)", data=1)
# Connect the control to one internal attribute in our globals node
# so that we can add a changed callback to it.
mc.connectControl(
"imageMenuMayaSW", "appleseedRenderGlobals.imageFormat", index=1)
mc.connectControl(
"imageMenuMayaSW", "appleseedRenderGlobals.imageFormat", index=2)
# Add a callback when our internal attribute changes.
# This callback gets the current value from our internal attribute and
# uses it to update the original image format attribute (closing the circle.)
mc.scriptJob(
parent=parent,
replacePrevious=True,
attributeChange=[
"appleseedRenderGlobals.imageFormat",
"import appleseedMaya.renderGlobals; appleseedMaya.renderGlobals.imageFormatChanged()"]
)
# Update the image format controls now.
imageFormatChanged()
def postUpdateCommonTab():
imageFormatChanged()
class AppleseedRenderGlobalsTab(object):
def __init__(self):
self._uis = {}
def _addControl(self, ui, attrName, connectIndex=2):
self._uis[attrName] = ui
attr = pm.Attribute("appleseedRenderGlobals." + attrName)
pm.connectControl(ui, attr, index=connectIndex)
def _addFieldSliderControl(self, attrName, **kwargs):
attr = pm.Attribute("appleseedRenderGlobals." + attrName)
self._uis[attrName] = pm.attrFieldSliderGrp(
attribute=attr,
**kwargs)
def _getAttributeMenuItems(self, attrName):
attr = pm.Attribute("appleseedRenderGlobals." + attrName)
menuItems = [
(i, v) for i, v in enumerate(attr.getEnums().keys())
]
return menuItems
class AppleseedRenderGlobalsMainTab(AppleseedRenderGlobalsTab):
def __adaptiveSamplerChanged(self, value):
self._uis["minPixelSamples"].setEnable(value)
self._uis["batchSampleSize"].setEnable(value)
self._uis["sampleNoiseThreshold"].setEnable(value)
if value:
mc.setAttr("appleseedRenderGlobals.samples", 256)
else:
mc.setAttr("appleseedRenderGlobals.samples", 32)
def __motionBlurChanged(self, value):
self._uis["mbCameraSamples"].setEnable(value)
self._uis["mbTransformSamples"].setEnable(value)
self._uis["mbDeformSamples"].setEnable(value)
self._uis["shutterOpen"].setEnable(value)
self._uis["shutterClose"].setEnable(value)
def __environmentLightSelected(self, envLight):
logger.debug("Environment light selected: %s" % envLight)
connections = mc.listConnections(
"appleseedRenderGlobals.envLight",
plugs=True)
if connections:
mc.disconnectAttr(
connections[0], "appleseedRenderGlobals.envLight")
if envLight != "<none>":
mc.connectAttr(
envLight + ".globalsMessage",
"appleseedRenderGlobals.envLight")
def updateEnvLightControl(self):
if "envLight" in self._uis:
logger.debug("Updating env lights menu")
uiName = self._uis["envLight"]
# Return if the menu does not exist yet.
if not pm.optionMenu(uiName, exists=True):
return
# Remove the callback.
pm.optionMenu(uiName, edit=True, changeCommand="")
# Delete the menu items.
items = pm.optionMenu(uiName, query=True, itemListLong=True)
for item in items:
pm.deleteUI(item)
connections = mc.listConnections("appleseedRenderGlobals.envLight")
# Rebuild the menu.
pm.menuItem(parent=uiName, label="<none>")
for envLight in g_environmentLightsList:
pm.menuItem(parent=uiName, label=envLight)
# Update the currently selected item.
if connections:
node = connections[0]
if mc.nodeType(node) == "transform":
shapes = mc.listRelatives(node, shapes=True)
assert shapes
node = shapes[0]
pm.optionMenu(uiName, edit=True, value=node)
else:
pm.optionMenu(uiName, edit=True, value="<none>")
# Restore the callback.
pm.optionMenu(
uiName, edit=True, changeCommand=self.__environmentLightSelected)
def __lockSamplingPatternChanged(self, value):
self._uis["noiseSeed"].setEnable(value)
def create(self):
# Create default render globals node if needed.
createGlobalNodes()
parentForm = pm.setParent(query=True)
pm.setUITemplate("renderGlobalsTemplate", pushTemplate=True)
pm.setUITemplate("attributeEditorTemplate", pushTemplate=True)
with pm.scrollLayout("appleseedScrollLayout", horizontalScrollBarThickness=0):
with pm.columnLayout("appleseedColumnLayout", adjustableColumn=True, width=g_columnWidth):
with pm.frameLayout("samplingFrameLayout", label="Sampling", collapsable=True, collapse=False):
with pm.columnLayout("samplingColumnLayout", adjustableColumn=True, width=g_subColumnWidth,
rowSpacing=2):
pm.separator(height=2)
self._addFieldSliderControl(
label="Render Passes",
columnWidth=(3, 160),
columnAttach=(1, "right", 4),
minValue=1,
fieldMinValue=1,
maxValue=100,
fieldMaxValue=1000000,
attrName="passes")
pm.separator(height=2)
self._addControl(
ui=pm.checkBoxGrp(
label="Adaptive Sampling",
height=18,
columnAttach=(1, "right", 4),
changeCommand=self.__adaptiveSamplerChanged),
attrName="adaptiveSampling")
pm.separator(height=2)
adaptiveSampling = mc.getAttr("appleseedRenderGlobals.adaptiveSampling")
self._addFieldSliderControl(
label="Min Samples",
columnWidth=(3, 160),
columnAttach=(1, "right", 4),
minValue=0,
fieldMinValue=0,
maxValue=256,
fieldMaxValue=1000000,
enable=adaptiveSampling,
attrName="minPixelSamples")
self._addFieldSliderControl(
label="Max Samples",
columnWidth=(3, 160),
columnAttach=(1, "right", 4),
minValue=1,
fieldMinValue=0,
maxValue=1024,
fieldMaxValue=1000000,
attrName="samples")
self._addFieldSliderControl(
label="Batch Sample Size",
columnWidth=(3, 160),
columnAttach=(1, "right", 4),
minValue=1,
fieldMinValue=1,
maxValue=128,
fieldMaxValue=1000000,
enable=adaptiveSampling,
attrName="batchSampleSize")
self._addFieldSliderControl(
label="Noise Threshold",
step=0.02,
precision=4,
columnWidth=(3, 160),
columnAttach=(1, "right", 4),
minValue=0.0001,
fieldMinValue=0.0,
maxValue=2.0,
fieldMaxValue=25.0,
enable=adaptiveSampling,
attrName="sampleNoiseThreshold")
pm.separator(height=2)
self._addControl(
ui=pm.attrEnumOptionMenuGrp(
label="Pixel Filter",
columnAttach=(1, "right", 4),
enumeratedItem=self._getAttributeMenuItems("pixelFilter")),
attrName="pixelFilter")
self._addFieldSliderControl(
label="Pixel Filter Size",
sliderStep=0.5,
precision=1,
columnWidth=(3, 160),
columnAttach=(1, "right", 4),
minValue=0.5,
fieldMinValue=0.5,
maxValue=4.0,
fieldMaxValue=20.0,
attrName="pixelFilterSize")
self._addFieldSliderControl(
label="Tile Size",
columnWidth=(3, 160),
columnAttach=(1, "right", 4),
minValue=8,
fieldMinValue=1,
maxValue=1024,
fieldMaxValue=65536,
attrName="tileSize")
pm.separator(height=2)
self._addControl(
ui=pm.checkBoxGrp(
label="Lock Sampling Pattern",
height=18,
columnAttach=(1, "right", 4),
changeCommand=self.__lockSamplingPatternChanged),
attrName="lockSamplingPattern")
lockSamplingPattern = mc.getAttr("appleseedRenderGlobals.lockSamplingPattern")
pm.separator(height=2)
self._addFieldSliderControl(
label="Sampling Pattern Seed",
columnWidth=(3, 160),
minValue=-65536,
fieldMinValue=-2147483648,
maxValue=65535,
enable=lockSamplingPattern,
fieldMaxValue=2147483647,
attrName="noiseSeed")
pm.separator(height=2)
with pm.frameLayout("motionBlurFrameLayout", label="Motion Blur", collapsable=True, collapse=True):
with pm.columnLayout("motionBlurColumnLayout", adjustableColumn=True, width=g_subColumnWidth,
rowSpacing=2):
pm.separator(height=2)
self._addControl(
ui=pm.checkBoxGrp(
label="Motion Blur",
height=18,
columnAttach=(1, "right", 4),
changeCommand=self.__motionBlurChanged),
attrName="motionBlur")
pm.separator(height=2)
enableMotionBlur = mc.getAttr(
"appleseedRenderGlobals.motionBlur")
self._addFieldSliderControl(
label="Camera Samples",
columnWidth=(3, 160),
columnAttach=(1, "right", 4),
minValue=2,
fieldMinValue=2,
maxValue=30,
fieldMaxValue=1000,
enable=enableMotionBlur,
attrName="mbCameraSamples")
self._addFieldSliderControl(
label="Transformation Samples",
columnWidth=(3, 160),
columnAttach=(1, "right", 4),
minValue=2,
fieldMinValue=2,
maxValue=30,
fieldMaxValue=1000,
enable=enableMotionBlur,
attrName="mbTransformSamples")
self._addFieldSliderControl(
label="Deformation Samples",
columnWidth=(3, 160),
columnAttach=(1, "right", 4),
minValue=2,
fieldMinValue=2,
maxValue=30,
fieldMaxValue=1000,
enable=enableMotionBlur,
attrName="mbDeformSamples")
pm.separator(height=2)
self._addFieldSliderControl(
label="Shutter Open",
sliderStep=0.05,
precision=2,
columnWidth=(3, 160),
columnAttach=(1, "right", 4),
minValue=-1.0,
fieldMinValue=-1.0,
maxValue=0.0,
fieldMaxValue=0.0,
enable=enableMotionBlur,
attrName="shutterOpen")
self._addFieldSliderControl(
label="Shutter Close",
sliderStep=0.05,
precision=2,
columnWidth=(3, 160),
columnAttach=(1, "right", 4),
minValue=0.0,
fieldMinValue=0.0,
maxValue=1.0,
fieldMaxValue=1.0,
enable=enableMotionBlur,
attrName="shutterClose")
pm.separator(height=2)
with pm.frameLayout("sceneFrameLayout", label="Scene", collapsable=True, collapse=False):
with pm.columnLayout("sceneColumnLayout", adjustableColumn=True, width=g_subColumnWidth,
rowSpacing=2):
pm.separator(height=2)
self._addFieldSliderControl(
label="Scene Scale",
sliderStep=0.1,
precision=2,
columnWidth=(3, 160),
columnAttach=(1, "right", 4),
minValue=0.01,
fieldMinValue=1.0e-6,
maxValue=100,
fieldMaxValue=1.0e+6,
attrName="sceneScale")
pm.separator(height=2)
with pm.rowLayout("appleseedRowLayout", numberOfColumns=3, columnAttach=(1, "right", 4)):
pm.text("Environment Light")
ui = pm.optionMenu(
changeCommand=self.__environmentLightSelected)
pm.menuItem(label="<none>")
for envLight in g_environmentLightsList:
pm.menuItem(parent=ui, label=envLight)
# Set the currently selected environment light in the menu.
connections = mc.listConnections(
"appleseedRenderGlobals.envLight")
if connections:
node = connections[0]
if mc.nodeType(node) == "transform":
shapes = mc.listRelatives(node, shapes=True)
assert shapes
node = shapes[0]
pm.optionMenu(ui, edit=True, value=node)
else:
pm.optionMenu(ui, edit=True, value="<none>")
self._uis["envLight"] = ui
logger.debug(
"Created globals env light menu, name = %s" % ui)
pm.separator(height=2)
self._addControl(
ui=pm.checkBoxGrp(
label="Environment Visible",
columnAttach=(1, "right", 4),
height=18),
attrName="bgLight")
pm.separator(height=2)
pm.setUITemplate("renderGlobalsTemplate", popTemplate=True)
pm.setUITemplate("attributeEditorTemplate", popTemplate=True)
pm.formLayout(
parentForm,
edit=True,
attachForm=[
("appleseedScrollLayout", "top", 0),
("appleseedScrollLayout", "bottom", 0),
("appleseedScrollLayout", "left", 0),
("appleseedScrollLayout", "right", 0)])
logger.debug("Created appleseed render global main tab")
# Update the newly created tab.
self.update()
def update(self):
assert mc.objExists("appleseedRenderGlobals")
# self.updateEnvLightControl()
g_appleseedMainTab = AppleseedRenderGlobalsMainTab()
class AppleseedRenderGlobalsLightingTab(AppleseedRenderGlobalsTab):
def __limitBouncesChanged(self, value):
self._uis["bounces"].setEnable(value)
self._uis["specularBounces"].setEnable(value)
self._uis["glossyBounces"].setEnable(value)
self._uis["diffuseBounces"].setEnable(value)
def __enableMaxRayIntensityChanged(self, value):
self._uis["maxRayIntensity"].setEnable(value)
def __enableIBLChanged(self, value):
self._uis["envSamples"].setEnable(value)
def __enableDirectLightingChanged(self, value):
self._uis["lightSamples"].setEnable(value)
def __limitPhotonTracingBouncesChanged(self, value):
self._uis["photonTracingBounces"].setEnable(value)
def __limitRadianceEstimationBouncesChanged(self, value):
self._uis["radianceEstimationBounces"].setEnable(value)
def __enableMaxRayIntensitySPPMChanged(self, value):
self._uis["maxRayIntensitySPPM"].setEnable(value)
def __enablePhotonTracingEnvPhotonsChanged(self, value):
self._uis["photonTracingEnvPhotons"].setEnable(value)
def create(self):
# Create default render globals node if needed.
createGlobalNodes()
parentForm = pm.setParent(query=True)
pm.setUITemplate("renderGlobalsTemplate", pushTemplate=True)
pm.setUITemplate("attributeEditorTemplate", pushTemplate=True)
with pm.scrollLayout("lightingScrollLayout", horizontalScrollBarThickness=0):
with pm.columnLayout("lightingColumnLayout", adjustableColumn=True, width=g_columnWidth):
with pm.frameLayout("lightingFrameLayout", label="Lighting", collapsable=True, collapse=False):
with pm.columnLayout("lightingColumnLayout", adjustableColumn=True, width=g_subColumnWidth,
rowSpacing=2):
pm.separator(height=2)
self._addControl(
ui=pm.attrEnumOptionMenuGrp(
label="Lighting Engine",
enumeratedItem=self._getAttributeMenuItems("lightingEngine"),
columnAttach=[(1, "right", 4), (2, "right", 0)],
columnWidth=[(1, 120), (2, 240)],
height=22),
attrName="lightingEngine")
pm.separator(height=2)
with pm.frameLayout("pathTracingFrameLayout", label="Unidirectional Path Tracing",
collapsable=True, collapse=True):
with pm.columnLayout("pathTracingColumnLayout", adjustableColumn=True,
width=g_subColumnWidth - g_margin, rowSpacing=2):
pm.separator(height=2)
with pm.rowColumnLayout("pathTracingRowColumnLayout", numberOfRows=2,
rowOffset=[(1, "top", 2), (2, "both", 2), (3, "both", 2)],
rowSpacing=(2, 2)):
self._addControl(
ui=pm.checkBoxGrp(
label="Caustics",
columnAttach=(1, "right", 4)),
attrName="caustics")
self._addControl(
ui=pm.checkBoxGrp(
label="Direct Lighting",
columnAttach=(1, "right", 4),
changeCommand=self.__enableDirectLightingChanged),
attrName="enableDirectLighting")
self._addControl(
ui=pm.checkBoxGrp(
label="Image-Based Lighting",
columnAttach=(1, "right", 4),
changeCommand=self.__enableIBLChanged),
attrName="enableIBL")
self._addControl(
ui=pm.checkBoxGrp(
label="Limit Bounces",
columnAttach=(1, "right", 4),
changeCommand=self.__limitBouncesChanged),
attrName="limitBounces")
pm.separator(height=2)
limitBounces = mc.getAttr(
"appleseedRenderGlobals.limitBounces")
enableIBL = mc.getAttr(
"appleseedRenderGlobals.enableIBL")
enableDirectLighting = mc.getAttr(
"appleseedRenderGlobals.enableDirectLighting")
self._addFieldSliderControl(
label="Global Bounces",
columnWidth=[(1, 115), (2, 40), (3, 200)],
columnAttach=[(1, "right", 4), (2, "right", 2), (3, "right", 8)],
minValue=0,
maxValue=30,
fieldMinValue=0,
fieldMaxValue=100,
attrName="bounces")
self._addFieldSliderControl(
label="Diffuse Bounces",
columnWidth=[(1, 115), (2, 40), (3, 200)],
columnAttach=[(1, "right", 4), (2, "right", 2), (3, "right", 8)],
rowAttach=[(1, "both", 4)],
enable=limitBounces,
minValue=0,
maxValue=30,
fieldMinValue=0,
fieldMaxValue=100,
attrName="diffuseBounces")
self._addFieldSliderControl(
label="Glossy Bounces",
columnWidth=[(1, 115), (2, 40), (3, 200)],
columnAttach=[(1, "right", 4), (2, "right", 2), (3, "right", 8)],
enable=limitBounces,
minValue=0,
maxValue=30,
fieldMinValue=0,
fieldMaxValue=100,
attrName="glossyBounces")
self._addFieldSliderControl(
label="Specular Bounces",
columnWidth=[(1, 115), (2, 40), (3, 200)],
columnAttach=[(1, "right", 4), (2, "right", 2), (3, "right", 8)],
enable=limitBounces,
minValue=0,
maxValue=30,
fieldMinValue=0,
fieldMaxValue=100,
attrName="specularBounces")
pm.separator(height=2)
"""
self._addControl(
ui=pm.attrEnumOptionMenuGrp(
label="Light Sampler",
enumeratedItem=self._getAttributeMenuItems("lightSamplingAlgorithm"),
columnAttach=[(1, "right", 2), (2, "right", 0)],
columnWidth=[(1, 120), (2, 240)]),
attrName="lightSamplingAlgorithm")
pm.separator(height=2)
"""
with pm.rowColumnLayout("pathTracingLightISRowColumnLayout", numberOfRows=1):
self._addControl(
ui=pm.checkBoxGrp(
label1="Lights Importance Sampling",
height=18,
columnAttach=[(1, "right", 4), (2, "right", 4)],
columnWidth=[(1, 86), (2, 200)]),
attrName="lightImportanceSampling")
pm.separator(height=2)
self._addFieldSliderControl(
label="Light Samples",
sliderStep=1.0,
fieldStep=0.1,
precision=1,
columnWidth=[(1, 115), (2, 40), (3, 200)],
columnAttach=[(1, "right", 4), (2, "right", 2), (3, "right", 0)],
rowAttach=[(1, "both", 4)],
enable=enableDirectLighting,
minValue=0.0,
maxValue=20.0,
fieldMinValue=0.0,
fieldMaxValue=1000000.0,
annotation="Number of light samples used to estimate direct lighting.",
attrName="lightSamples")
self._addFieldSliderControl(
label="IBL Samples",
sliderStep=1.0,
fieldStep=0.1,
precision=1,
columnWidth=[(1, 115), (2, 40), (3, 240)],
columnAttach=[(1, "right", 4), (2, "right", 2), (3, "right", 0)],
enable=enableIBL,
minValue=0.0,
maxValue=20.0,
fieldMinValue=0.0,
fieldMaxValue=1000000.0,
annotation="Number of samples used to estimate environment or image-based lighting.",
attrName="envSamples")
self._addFieldSliderControl(
label="Low Light Threshold",
precision=6,
columnWidth=[(1, 115), (2, 60), (3, 190)],
columnAttach=[(1, "right", 4), (2, "right", 2), (3, "right", 0)],
minValue=0.0,
maxValue=1.0,
fieldMinValue=0.0,
fieldMaxValue=1000.0,
annotation="Threshold at which shadow rays are terminated.",
attrName="lowLightThreshold")
pm.separator(height=2)
enableMaxRayIntensity = True
with pm.rowColumnLayout("pathTracingClampingRowColumnLayout", numberOfRows=1):
self._addControl(
ui=pm.checkBoxGrp(
label="Clamp Roughness",
height=18,
columnAttach=(1, "right", 4),
annotation="Clamp roughness on secondary and subsequent rays."),
attrName="clampRoughness")
self._addControl(
ui=pm.checkBoxGrp(
label="Clamp Ray Intensity",
height=18,
columnAttach=(1, "right", 4),
annotation="Clamp secondary and subsequent rays intensity in order to reduce fireflies.",
changeCommand=self.__enableMaxRayIntensityChanged),
attrName="enableMaxRayIntensity")
enableMaxRayIntensity = mc.getAttr("appleseedRenderGlobals.enableMaxRayIntensity")
pm.separator(height=2)
self._addFieldSliderControl(
label="Max Ray Intensity",
sliderStep=1.0,
fieldStep=0.1,
precision=1,
columnWidth=[(1, 100), (2, 40), (3, 200)],
columnAttach=[(1, "right", 4), (2, "right", 2), (3, "right", 0)],
minValue=0.0,
maxValue=2.0,
fieldMinValue=0.0,
fieldMaxValue=10000.0,
annotation="Maximum ray intensity allowed on secondary and subsequent rays.",
enable=enableMaxRayIntensity,
attrName="maxRayIntensity")
pm.separator(height=2)
with pm.frameLayout("sppmFrameLayout", label="Stochastic Progressive Photon Mapping",
collapsable=True, collapse=True):
with pm.columnLayout("sppmColumnLayout", adjustableColumn=True,
width=g_subColumnWidth - g_margin,
rowSpacing=2):
pm.separator(height=2)
self._addControl(
ui=pm.attrEnumOptionMenuGrp(
label="Photon Type",
enumeratedItem=self._getAttributeMenuItems("photonType"),
columnAttach=(1, "right", 4),
annotation="Type of photons for the SPPM engine: single-wavelength or wavelength-dependent"),
attrName="photonType")
self._addControl(
ui=pm.attrEnumOptionMenuGrp(
label="Direct Lighting",
enumeratedItem=self._getAttributeMenuItems("SPPMLightingMode"),
columnAttach=(1, "right", 4),
annotation="Type of direct lighting engine to use: raytraced, photon or none."),
attrName="SPPMLightingMode")
pm.separator(height=2)
with pm.rowColumnLayout("sppmCheckBoxesRowColumnLayout", numberOfRows=1):
self._addControl(
ui=pm.checkBoxGrp(
label="Caustics",
columnAttach=(1, "right", 4)),
attrName="SPPMCaustics")
self._addControl(
ui=pm.checkBoxGrp(
label="Image Based Lighting",
columnAttach=(1, "right", 5),
changeCommand=self.__enablePhotonTracingEnvPhotonsChanged),
attrName="SPPMEnableIBL")
pm.separator(height=2)
with pm.frameLayout("sppmPhotonTracingFrameLayout", font="smallBoldLabelFont",
label="Photon Tracing", collapsable=False, collapse=True):
with pm.rowColumnLayout("sppmCheckBoxesRowColumnLayout", numberOfRows=1):
pm.separator(height=2)
self._addControl(
ui=pm.checkBoxGrp(
label="Limit Bounces",
columnAttach=(1, "right", 4),
annotation="Restrict the number of photon bounces.",
changeCommand=self.__limitPhotonTracingBouncesChanged),
attrName="limitPhotonTracingBounces")
limitPhotonTracingBounces = mc.getAttr(
"appleseedRenderGlobals.limitPhotonTracingBounces")
self._addControl(
ui=pm.checkBoxGrp(
label="Importon Tracing",
columnAttach=(1, "right", 5),
annotation="Importons are traced to determine important parts of the scene and later photons are only stored in these important parts"),
attrName="SPPMEnableImportons")
with pm.columnLayout("sppmPhotonTracingColumnLayout", adjustableColumn=True,
width=g_subColumnWidth - g_margin, rowSpacing=2):
pm.separator(height=2)
self._addFieldSliderControl(
label="Max Bounces",
columnWidth=[(1, 125), (2, 40), (3, 100)],
columnAttach=[(1, "right", 4), (2, "right", 2), (3, "right", 8)],
minValue=0,
maxValue=30,
fieldMinValue=0,
fieldMaxValue=100,
annotation="Maximum number of photon bounces.",
enable=limitPhotonTracingBounces,
attrName="photonTracingBounces")
self._addFieldSliderControl(
label="RR Starting Bounce",
columnWidth=[(1, 125), (2, 40), (3, 100)],
columnAttach=[(1, "right", 4), (2, "right", 2), (3, "right", 8)],
minValue=0,
maxValue=30,
fieldMinValue=0,
fieldMaxValue=100,
annotation="Discard low contribution paths starting with this bounce.",
attrName="photonTracingRRMinPathLength")
pm.separator(height=2)
self._addFieldSliderControl(
label="Light Photons",
minValue=100000,
maxValue=10000000,
fieldMinValue=0,
fieldMaxValue=100000000,
columnWidth=[(1, 125), (2, 60), (3, 100)],
columnAttach=[(1, "right", 4), (2, "right", 2), (3, "right", 8)],
annotation="Number of light photons per render pass.",
attrName="photonTracingLightPhotons")
SPPMEnableIBL = mc.getAttr(
"appleseedRenderGlobals.SPPMEnableIBL")
self._addFieldSliderControl(
label="IBL Photons",
minValue=100000,
maxValue=10000000,
fieldMinValue=0,
fieldMaxValue=100000000,
columnWidth=[(1, 125), (2, 60), (3, 100)],
columnAttach=[(1, "right", 4), (2, "right", 2), (3, "right", 8)],
enable=SPPMEnableIBL,
annotation="Number of environment photons per render pass.",
attrName="photonTracingEnvPhotons")
with pm.frameLayout("sppmRadianceEstimationFrameLayout", font="smallBoldLabelFont",
label="Radiance Estimation", collapsable=False, collapse=True):
with pm.columnLayout("sppmRadianceEstimationColumnLayout", adjustableColumn=True,
width=g_subColumnWidth - g_margin, rowSpacing=2):
pm.separator(height=2)
self._addControl(
ui=pm.checkBoxGrp(
label="Limit Bounces",
columnAttach=(1, "right", 4),
annotation="Restrict the number of path bounces.",
changeCommand=self.__limitRadianceEstimationBouncesChanged),
attrName="limitRadianceEstimationBounces")
limitRadianceEstimationBounces = mc.getAttr(
"appleseedRenderGlobals.limitRadianceEstimationBounces"
)
pm.separator(height=2)
self._addFieldSliderControl(
label="Maximum Bounces",
columnWidth=[(1, 125), (2, 40), (3, 100)],
columnAttach=[(1, "right", 4), (2, "right", 2), (3, "right", 8)],
minValue=0,
maxValue=30,
fieldMinValue=0,
fieldMaxValue=100,
enable=limitRadianceEstimationBounces,
annotation="Maximum number of radiance estimation path bounces.",
attrName="radianceEstimationBounces")
self._addFieldSliderControl(
label="RR Starting Bounces",
columnWidth=[(1, 125), (2, 40), (3, 100)],
columnAttach=[(1, "right", 4), (2, "right", 2), (3, "right", 8)],
minValue=0,
maxValue=30,
fieldMinValue=0,
fieldMaxValue=100,
annotation="Discard low contribution paths starting with this bounce.",
attrName="radianceEstimationRRMinPathLength")
pm.separator(height=2)
self._addFieldSliderControl(
label="Initial Search Radius",
sliderStep=1.0,
fieldStep=0.1,
precision=3,
columnWidth=[(1, 125), (2, 60), (3, 100)],
columnAttach=[(1, "right", 4), (2, "right", 2), (3, "right", 0)],
minValue=0.01,
maxValue=0.5,
fieldMinValue=0.001,
fieldMaxValue=100.0,
annotation="Initial photon gathering radius in percent of scene diameter.",
attrName="radianceEstimationInitialRadius")
self._addFieldSliderControl(
label="Maximum Photons",
columnWidth=[(1, 125), (2, 60), (3, 100)],
columnAttach=[(1, "right", 4), (2, "right", 2), (3, "right", 8)],
minValue=8,
maxValue=500,
fieldMinValue=8,
fieldMaxValue=10000,
annotation="Maximum number of photons used for radiance estimation.",
attrName="radianceEstimationMaxPhotons")
self._addFieldSliderControl(
label="Alpha",
sliderStep=1.0,
fieldStep=0.05,
precision=3,
columnWidth=[(1, 125), (2, 60), (3, 100)],
columnAttach=[(1, "right", 4), (2, "right", 2), (3, "right", 0)],
minValue=0.0,
maxValue=1.0,
fieldMinValue=0.0,
fieldMaxValue=1.0,
annotation="Evolution rate of photon gathering radius.",
attrName="radianceEstimationAlpha")
pm.separator(height=2)
self._addControl(
ui=pm.checkBoxGrp(
label="Clamp Ray Intensity",
columnAttach=(1, "right", 4),
annotation="Clamp secondary and subsequent rays intensity in order to reduce fireflies.",
changeCommand=self.__enableMaxRayIntensitySPPMChanged),
attrName="enableMaxRayIntensitySPPM")
enableMaxRayIntensitySPPM = mc.getAttr(
"appleseedRenderGlobals.enableMaxRayIntensitySPPM"
)
pm.separator(height=2)
self._addFieldSliderControl(
label="Maximum Ray Intensity",
sliderStep=1.0,
fieldStep=0.1,
precision=1,
columnWidth=[(1, 125), (2, 40), (3, 100)],
columnAttach=[(1, "right", 4), (2, "right", 2), (3, "right", 0)],
minValue=0.0,
maxValue=2.0,
fieldMinValue=0.0,
fieldMaxValue=10000.0,
enable=enableMaxRayIntensitySPPM,
annotation="Maximum Ray Intensity valued allowed on secondary and subsequent rays.",
attrName="maxRayIntensitySPPM")
pm.setUITemplate("renderGlobalsTemplate", popTemplate=True)
pm.setUITemplate("attributeEditorTemplate", popTemplate=True)
pm.formLayout(
parentForm,
edit=True,
attachForm=[
("lightingScrollLayout", "top", 0),
("lightingScrollLayout", "bottom", 0),
("lightingScrollLayout", "left", 0),
("lightingScrollLayout", "right", 0)])
logger.debug("Created appleseed lighting engines render globals main tab.")
# Update the newly created tab.
self.update()
def update(self):
assert mc.objExists("appleseedRenderGlobals")
g_appleseedLightingTab = AppleseedRenderGlobalsLightingTab()
class AppleseedRenderGlobalsOutputTab(AppleseedRenderGlobalsTab):
def __prefilterChanged(self, value):
self._uis["spikeThreshold"].setEnable(value)
def __renderStampChanged(self, value):
self._uis["renderStampString"].setEnable(value)
self._uis["renderStampScaleFactor"].setEnable(value)
def create(self):
# Create default render globals node if needed.
createGlobalNodes()
parentForm = pm.setParent(query=True)
pm.setUITemplate("renderGlobalsTemplate", pushTemplate=True)
pm.setUITemplate("attributeEditorTemplate", pushTemplate=True)
with pm.scrollLayout("outputScrollLayout", horizontalScrollBarThickness=0):
with pm.columnLayout("outputColumnLayout", adjustableColumn=True, width=g_columnWidth):
with pm.frameLayout("outputAOVSframeLayout", label="AOVs", collapsable=True, collapse=False):
with pm.rowColumnLayout("outputAOVsColumnLayout", adjustableColumn=True, width=g_columnWidth,
numberOfColumns=2, rowSpacing=(2, 2)):
pm.separator(height=2)
pm.separator(height=2)
self._addControl(ui=pm.checkBoxGrp(label="Diffuse",
columnAttach=[(1, "right", 1), (2, "both", 8)]),
attrName="diffuseAOV")
self._addControl(ui=pm.checkBoxGrp(label="Glossy",
columnAttach=[(1, "right", 1), (2, "both", 8)]),
attrName="glossyAOV")
self._addControl(ui=pm.checkBoxGrp(label="Emission",
columnAttach=[(1, "right", 1), (2, "both", 8)]),
attrName="emissionAOV")
self._addControl(ui=pm.checkBoxGrp(label="Direct Diffuse",
columnAttach=[(1, "right", 1), (2, "both", 8)]),
attrName="directDiffuseAOV")
self._addControl(ui=pm.checkBoxGrp(label="Indirect Diffuse",
columnAttach=[(1, "right", 1), (2, "both", 8)]),
attrName="indirectDiffuseAOV")
self._addControl(ui=pm.checkBoxGrp(label="Direct Glossy",
columnAttach=[(1, "right", 1), (2, "both", 8)]),
attrName="directGlossyAOV")
self._addControl(ui=pm.checkBoxGrp(label="Indirect Glossy",
columnAttach=[(1, "right", 1), (2, "both", 8)]),
attrName="indirectGlossyAOV")
self._addControl(ui=pm.checkBoxGrp(label="Albedo",
columnAttach=[(1, "right", 1), (2, "both", 8)]),
attrName="albedoAOV")
self._addControl(ui=pm.checkBoxGrp(label="Normal",
columnAttach=[(1, "right", 1), (2, "both", 8)]),
attrName="normalAOV")
self._addControl(ui=pm.checkBoxGrp(label="Invalid Samples",
columnAttach=[(1, "right", 1), (2, "both", 8)]),
attrName="invalidSamplesAOV")
self._addControl(ui=pm.checkBoxGrp(label="Pixel Error",
columnAttach=[(1, "right", 1), (2, "both", 8)]),
attrName="pixelErrorAOV")
self._addControl(ui=pm.checkBoxGrp(label="Pixel Sample Count",
columnAttach=[(1, "right", 1), (2, "both", 8)]),
attrName="pixelSampleCountAOV")
self._addControl(ui=pm.checkBoxGrp(label="Pixel Time",
columnAttach=[(1, "right", 1), (2, "both", 8)]),
attrName="pixelTimeAOV")
self._addControl(ui=pm.checkBoxGrp(label="Pixel Variation",
columnAttach=[(1, "right", 1), (2, "both", 8)]),
attrName="pixelVariationAOV")
self._addControl(ui=pm.checkBoxGrp(label="UV",
columnAttach=[(1, "right", 1), (2, "both", 8)]),
attrName="uvAOV")
self._addControl(ui=pm.checkBoxGrp(label="Depth",
columnAttach=[(1, "right", 1), (2, "both", 8)]),
attrName="depthAOV")
self._addControl(ui=pm.checkBoxGrp(label="Position",
columnAttach=[(1, "right", 1), (2, "both", 8)]),
attrName="positionAOV")
self._addControl(ui=pm.checkBoxGrp(label="Screen Space Velocity",
columnAttach=[(1, "right", 1), (2, "both", 8)]),
attrName="velocityAOV")
self._addControl(ui=pm.checkBoxGrp(label="Cryptomatte Material",
columnAttach=[(1, "right", 1), (2, "both", 8)]),
attrName="cryptomatteMaterialAOV")
self._addControl(ui=pm.checkBoxGrp(label="Cryptomatte Object",
columnAttach=[(1, "right", 1), (2, "both", 8)]),
attrName="cryptomatteObjectAOV")
self._addControl(ui=pm.checkBoxGrp(label="NPR Shading",
columnAttach=[(1, "right", 1), (2, "both", 8)]),
attrName="nprShadingAOV")
self._addControl(ui=pm.checkBoxGrp(label="NPR Contours",
columnAttach=[(1, "right", 1), (2, "both", 8)]),
attrName="nprContourAOV")
pm.separator(height=2)
pm.separator(height=2)
with pm.frameLayout("outputDenoiserFrameLayout", label="Denoiser", collapsable=True, collapse=True):
with pm.columnLayout("outputDenoiserColumnLayout", adjustableColumn=True, width=g_columnWidth,
rowSpacing=2):
pm.separator(height=2)
self._addControl(
ui=pm.attrEnumOptionMenuGrp(
label="Denoiser",
columnAttach=(1, "right", 4),
enumeratedItem=self._getAttributeMenuItems("denoiser")),
attrName="denoiser")
pm.separator(height=2)
with pm.columnLayout("denoiserCheckboxesRowColumnLayout",
adjustableColumn=True,
width=g_columnWidth,
rowSpacing=2):
self._addControl(
ui=pm.checkBoxGrp(
label="Skip Already Denoised",
columnAttach=(1, "right", 4)),
attrName="skipDenoised")
self._addControl(
ui=pm.checkBoxGrp(
label="Random Pixel Order",
columnAttach=(1, "right", 4)),
attrName="randomPixelOrder")
enablePrefilter = mc.getAttr(
"appleseedRenderGlobals.prefilterSpikes")
self._addControl(
ui=pm.checkBoxGrp(
label="Prefilter Spikes",
columnAttach=(1, "right", 4),
changeCommand=self.__prefilterChanged),
attrName="prefilterSpikes")
pm.separator(height=2)
self._addControl(
ui=pm.floatFieldGrp(
label="Spike Threshold",
numberOfFields=1,
columnAttach=(1, "right", 4),
enable=enablePrefilter),
attrName="spikeThreshold")
self._addControl(
ui=pm.floatFieldGrp(
label="Patch Distance",
columnAttach=(1, "right", 4),
numberOfFields=1),
attrName="patchDistance")
self._addControl(
ui=pm.intFieldGrp(
label="Denoise Scales",
columnAttach=(1, "right", 4),
numberOfFields=1),
attrName="denoiseScales")
with pm.frameLayout("outputRenderStampFrameLayout", label="Render Stamp", collapsable=True,
collapse=True):
with pm.columnLayout("outputRenderStampColumnLayout", adjustableColumn=True, width=g_columnWidth,
rowSpacing=2):
pm.separator(height=2)
enableRenderStamp = mc.getAttr(
"appleseedRenderGlobals.renderStamp")
self._addControl(
ui=pm.checkBoxGrp(
label="Enable",
height=18,
columnAttach=(1, "right", 4),
changeCommand=self.__renderStampChanged),
attrName="renderStamp")
pm.separator(height=2)
self._addControl(
ui=pm.textFieldGrp(
label="Render Stamp",
height=22,
columnAttach=(1, "right", 4),
enable=enableRenderStamp,
annotation="Render stamp allows {lib-name|version|cpu-features|config|build-date|build-time}\n{render-time} and {peak-memory}."),
attrName="renderStampString")
pm.separator(height=2)
self._addFieldSliderControl(
label="Scale Factor",
sliderStep=0.1,
precision=2,
columnWidth=(3, 160),
columnAttach=(1, "right", 4),
enable=enableRenderStamp,
minValue=0.1,
maxValue=20.0,
fieldMinValue=0.1,
fieldMaxValue=30.0,
annotation="Render stamp scale factor.",
attrName="renderStampScaleFactor")
pm.separator(height=2)
pm.setUITemplate("renderGlobalsTemplate", popTemplate=True)
pm.setUITemplate("attributeEditorTemplate", popTemplate=True)
pm.formLayout(
parentForm,
edit=True,
attachForm=[
("outputScrollLayout", "top", 0),
("outputScrollLayout", "bottom", 0),
("outputScrollLayout", "left", 0),
("outputScrollLayout", "right", 0)])
logger.debug("Created appleseed render global output tab")
# Update the newly created tab.
self.update()
def update(self):
assert mc.objExists("appleseedRenderGlobals")
g_appleseedOutputTab = AppleseedRenderGlobalsOutputTab()
class AppleseedRenderGlobalsSystemTab(AppleseedRenderGlobalsTab):
def __chooseLogFilename(self):
logger.debug("Choose log filename called!")
path = pm.fileDialog2(filemode=0)
if path:
mc.setAttr("appleseedRenderGlobals.logFilename", path, type="string")
def create(self):
# Create default render globals node if needed
createGlobalNodes()
parentForm = pm.setParent(query=True)
pm.setUITemplate("renderGlobalsTemplate", pushTemplate=True)
pm.setUITemplate("attributeEditorTemplate", pushTemplate=True)
columnWidth = 400
with pm.scrollLayout("diagnosticsScrollLayout", horizontalScrollBarThickness=0):
with pm.columnLayout("diagnosticsColumnLayout", adjustableColumn=True, width=g_columnWidth):
with pm.frameLayout("overrideShadersFrameLayout", label="Override Shaders", collapsable=True,
collapse=True):
with pm.columnLayout("overrideShadersColumnLayout", adjustableColumn=False, width=g_columnWidth):
self._addControl(
ui=pm.attrEnumOptionMenuGrp(
label="Override Shaders",
columnAttach=(1, "right", 4),
enumeratedItem=self._getAttributeMenuItems("diagnostics")),
attrName="diagnostics")
with pm.frameLayout("loggingFrameLayout", label="Logging", collapsable=True, collapse=True):
with pm.columnLayout("LoggingColumnLayout", adjustableColumn=True, width=g_columnWidth):
pm.separator(height=2)
self._addControl(
ui=pm.attrEnumOptionMenuGrp(
label="Log Level",
height=24,
columnAttach=(1, "right", 4),
enumeratedItem=self._getAttributeMenuItems("logLevel"),
width=200),
attrName="logLevel")
pm.separator(height=2)
self._addControl(
ui=pm.textFieldButtonGrp(
label="Log Filename",
buttonLabel="...",
height=22,
columnAttach=(1, "right", 4),
buttonCommand=self.__chooseLogFilename),
attrName="logFilename")
pm.separator(height=2)
with pm.frameLayout("systemFrameLayout", label="System", collapsable=True, collapse=False):
with pm.columnLayout("systemColumnLayout", adjustableColumn=True, width=g_columnWidth):
pm.separator(height=2)
self._addControl(
ui=pm.intFieldGrp(
label="Threads",
columnAttach=(1, "right", 4),
numberOfFields=1),
attrName="threads")
self._addControl(
ui=pm.intFieldGrp(
label="Texture Cache Size (MB)",
columnAttach=(1, "right", 4),
numberOfFields=1),
attrName="maxTexCacheSize")
pm.separator(height=2)
with pm.frameLayout("experimentalFrameLayout", label="Experimental", collapsable=True, collapse=False):
with pm.columnLayout("experimentalColumnLayout", adjustableColumn=True, width=g_columnWidth):
self._addControl(
ui=pm.checkBoxGrp(
label="Use Embree",
columnAttach=(1, "right", 4),
height=24),
attrName="useEmbree")
logger.debug("Created appleseed render global diagnostics tab.")
pm.setUITemplate("renderGlobalsTemplate", popTemplate=True)
pm.setUITemplate("attributeEditorTemplate", popTemplate=True)
pm.formLayout(
parentForm,
edit=True,
attachForm=[
("diagnosticsScrollLayout", "top", 0),
("diagnosticsScrollLayout", "bottom", 0),
("diagnosticsScrollLayout", "left", 0),
("diagnosticsScrollLayout", "right", 0)])
# Update the newly created tab.
self.update()
def update(self):
assert mc.objExists("appleseedRenderGlobals")
g_appleseedSystemTab = AppleseedRenderGlobalsSystemTab()
|
luisbarrancos/appleseed-maya2
|
scripts/appleseedMaya/renderGlobals.py
|
Python
|
mit
| 77,738
|
[
"VisIt"
] |
aacca05387d4cf617bd4f8c34b57d6cc1aee605bad7f69023705d5625bb58385
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database (Sherrill) of interaction energies for dissociation curves of dispersion-bound bimolecular complexes.
| Geometries and Reference interaction energies from the following articles:
| Benzene Dimers from Sherrill et al. JPCA 113 10146 (2009).
| Benzene-Hydrogen Sulfide from Sherrill et al. JPCA 113 10146 (2009).
| Benzene-Methane from Sherrill et al. JPCA 113 10146 (2009).
| Methane Dimer from Takatani et al. PCCP 9 6106 (2007).
| Pyridine Dimers from Hohenstein et al. JPCA 113 878 (2009).
| Collection into NBC10 from Burns et al. JCP 134 084107 (2011).
| Revised reference interaction energies (NBC10A) from Marshall et al. JCP 135 194102 (2011).
- **cp** ``'off'`` || ``'on'``
- **rlxd** ``'off'``
- **benchmark**
- ``'NBC100'`` Burns et al. JCP 134 084107 (2011).
- |dl| ``'NBC10A'`` |dr| Marshall et al. JCP 135 194102 (2011).
- **subset**
- ``'small'``
- ``'large'``
- ``'equilibrium'``
- ``'BzBz_S'`` dissociation curve for benzene dimer, sandwich
- ``'BzBz_T'`` dissociation curve for benzene dimer, t-shaped
- ``'BzBz_PD34'`` dissociation curve for benzene dimer, parallel displaced by 3.4A
- ``'BzH2S'`` dissociation curve for benzene-H2S
- ``'BzMe'`` dissociation curve for benzene-methane
- ``'MeMe'`` dissociation curve for methane dimer
- ``'PyPy_S2'`` dissociation curve for pyridine dimer, sandwich
- ``'PyPy_T3'`` dissociation curve for pyridine dimer, t-shaped
- ``'BzBz_PD32'`` dissociation curve for benzene dimer, parallel displaced by 3.2A
- ``'BzBz_PD36'`` dissociation curve for benzene dimer, parallel displaced by 3.6A
"""
import re
import qcdb
# <<< NBC10 Database Module >>>
dbse = 'NBC1'
# <<< Database Members >>>
BzBz_S = []
dist = [3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0, 4.1, 4.2, 4.5, 5.0, 5.5, 6.0, 6.5, 10.0]
for d in dist:
BzBz_S.append('BzBz_S-' + str(d))
BzBz_T = []
dist = [4.4, 4.5, 4.6, 4.7, 4.8, 4.9, 5.0, 5.1, 5.2, 5.3, 5.4, 5.5, 5.6, 6.0, 6.5, 7.0, 7.5, 8.0]
for d in dist:
BzBz_T.append('BzBz_T-' + str(d))
BzBz_PD34 = []
dist = [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0]
for d in dist:
BzBz_PD34.append('BzBz_PD34-' + str(d))
BzH2S = []
dist = [3.2, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0, 4.1, 4.2, 4.5, 4.75, 5.0, 5.25, 5.5, 6.0, 6.5, 7.0, 7.5]
for d in dist:
BzH2S.append('BzH2S-' + str(d))
BzMe = []
dist = [3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0, 4.1, 4.2, 4.4, 4.6, 4.8, 5.0, 5.2, 5.4, 5.6, 6.0]
for d in dist:
BzMe.append('BzMe-' + str(d))
MeMe = []
dist = [3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0, 4.1, 4.2, 4.3, 4.4, 4.6, 4.8, 5.0, 5.4, 5.8]
for d in dist:
MeMe.append('MeMe-' + str(d))
PyPy_S2 = []
dist = [3.1, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0, 4.1, 4.2, 4.3, 4.4, 4.5, 4.7, 5.0, 5.5, 6.0, 6.5, 7.0]
for d in dist:
PyPy_S2.append('PyPy_S2-' + str(d))
PyPy_T3 = []
dist = [4.1, 4.3, 4.5, 4.6, 4.7, 4.8, 4.9, 5.0, 5.1, 5.2, 5.3, 5.4, 5.5, 5.7, 6.0, 6.5, 7.0, 8.0, 9.0]
for d in dist:
PyPy_T3.append('PyPy_T3-' + str(d))
BzBz_PD32 = []
dist = [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0]
for d in dist:
BzBz_PD32.append('BzBz_PD32-' + str(d))
BzBz_PD36 = []
dist = [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0]
for d in dist:
BzBz_PD36.append('BzBz_PD36-' + str(d))
temp = [BzBz_S, BzBz_T, BzBz_PD34, BzH2S, BzMe, MeMe, PyPy_S2, PyPy_T3, BzBz_PD32, BzBz_PD36]
HRXN = sum(temp, [])
HRXN_SM = ['BzMe-6.0', 'MeMe-5.0']
HRXN_LG = ['BzBz_T-5.0']
HRXN_EQ = ['BzBz_S-3.9', 'BzBz_T-5.0', 'BzBz_PD34-1.8', 'BzH2S-3.8', 'BzMe-3.8',
'MeMe-3.6', 'PyPy_S2-3.7', 'PyPy_T3-4.9', 'BzBz_PD32-1.9', 'BzBz_PD36-1.7']
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV_CP = {} # order of active reagents per counterpoise-corrected reaction
ACTV_SA = {} # order of active reagents for non-supramolecular calculations
for rxn in HRXN:
if (rxn in BzBz_S) or (rxn in BzBz_PD34) or (rxn in BzBz_PD32) or (rxn in BzBz_PD36):
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -2,
'%s-Bz-mono-unCP' % (dbse) : -2 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-Bz-mono-unCP' % (dbse) ]
elif rxn in BzBz_T:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-Bz-mono-unCP' % (dbse) : -2 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-Bz-mono-unCP' % (dbse) ]
elif rxn in BzH2S:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-Bz-mono-unCP' % (dbse) : -1,
'%s-H2S-mono-unCP' % (dbse) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-Bz-mono-unCP' % (dbse),
'%s-H2S-mono-unCP' % (dbse) ]
elif rxn in BzMe:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-Bz2-mono-unCP' % (dbse) : -1,
'%s-Me-mono-unCP' % (dbse) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-Bz2-mono-unCP' % (dbse),
'%s-Me-mono-unCP' % (dbse) ]
elif rxn in MeMe:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -2,
'%s-Me-mono-unCP' % (dbse) : -2 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-Me-mono-unCP' % (dbse) ]
elif rxn in PyPy_S2:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -2,
'%s-Py-mono-unCP' % (dbse) : -2 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-Py-mono-unCP' % (dbse) ]
elif rxn in PyPy_T3:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-Py-mono-unCP' % (dbse) : -2 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-Py-mono-unCP' % (dbse) ]
# <<< Reference Values >>>
BIND = {}
# Original publication
BIND_NBC100 = {}
BIND_NBC100['%s-BzBz_S-3.2' % (dbse)] = 3.522
BIND_NBC100['%s-BzBz_S-3.3' % (dbse)] = 1.535
BIND_NBC100['%s-BzBz_S-3.4' % (dbse)] = 0.189
BIND_NBC100['%s-BzBz_S-3.5' % (dbse)] = -0.689
BIND_NBC100['%s-BzBz_S-3.6' % (dbse)] = -1.231
BIND_NBC100['%s-BzBz_S-3.7' % (dbse)] = -1.535
BIND_NBC100['%s-BzBz_S-3.8' % (dbse)] = -1.674
BIND_NBC100['%s-BzBz_S-3.9' % (dbse)] = -1.701 # BzBz_S minimum
BIND_NBC100['%s-BzBz_S-4.0' % (dbse)] = -1.655
BIND_NBC100['%s-BzBz_S-4.1' % (dbse)] = -1.565
BIND_NBC100['%s-BzBz_S-4.2' % (dbse)] = -1.448
BIND_NBC100['%s-BzBz_S-4.5' % (dbse)] = -1.058
BIND_NBC100['%s-BzBz_S-5.0' % (dbse)] = -0.542
BIND_NBC100['%s-BzBz_S-5.5' % (dbse)] = -0.248
BIND_NBC100['%s-BzBz_S-6.0' % (dbse)] = -0.099
BIND_NBC100['%s-BzBz_S-6.5' % (dbse)] = -0.028
BIND_NBC100['%s-BzBz_S-10.0' % (dbse)] = 0.018
BIND_NBC100['%s-BzBz_T-4.4' % (dbse)] = 0.626
BIND_NBC100['%s-BzBz_T-4.5' % (dbse)] = -0.760
BIND_NBC100['%s-BzBz_T-4.6' % (dbse)] = -1.673
BIND_NBC100['%s-BzBz_T-4.7' % (dbse)] = -2.239
BIND_NBC100['%s-BzBz_T-4.8' % (dbse)] = -2.552
BIND_NBC100['%s-BzBz_T-4.9' % (dbse)] = -2.687
BIND_NBC100['%s-BzBz_T-5.0' % (dbse)] = -2.698 # BzBz_T minimum
BIND_NBC100['%s-BzBz_T-5.1' % (dbse)] = -2.627
BIND_NBC100['%s-BzBz_T-5.2' % (dbse)] = -2.503
BIND_NBC100['%s-BzBz_T-5.3' % (dbse)] = -2.349
BIND_NBC100['%s-BzBz_T-5.4' % (dbse)] = -2.179
BIND_NBC100['%s-BzBz_T-5.5' % (dbse)] = -2.005
BIND_NBC100['%s-BzBz_T-5.6' % (dbse)] = -1.833
BIND_NBC100['%s-BzBz_T-6.0' % (dbse)] = -1.242
BIND_NBC100['%s-BzBz_T-6.5' % (dbse)] = -0.752
BIND_NBC100['%s-BzBz_T-7.0' % (dbse)] = -0.468
BIND_NBC100['%s-BzBz_T-7.5' % (dbse)] = -0.302
BIND_NBC100['%s-BzBz_T-8.0' % (dbse)] = -0.203
BIND_NBC100['%s-BzBz_PD34-0.2' % (dbse)] = 0.070
BIND_NBC100['%s-BzBz_PD34-0.4' % (dbse)] = -0.257
BIND_NBC100['%s-BzBz_PD34-0.6' % (dbse)] = -0.728
BIND_NBC100['%s-BzBz_PD34-0.8' % (dbse)] = -1.260
BIND_NBC100['%s-BzBz_PD34-1.0' % (dbse)] = -1.766
BIND_NBC100['%s-BzBz_PD34-1.2' % (dbse)] = -2.179
BIND_NBC100['%s-BzBz_PD34-1.4' % (dbse)] = -2.466
BIND_NBC100['%s-BzBz_PD34-1.5' % (dbse)] = -2.557
BIND_NBC100['%s-BzBz_PD34-1.6' % (dbse)] = -2.614
BIND_NBC100['%s-BzBz_PD34-1.7' % (dbse)] = -2.640
BIND_NBC100['%s-BzBz_PD34-1.8' % (dbse)] = -2.643 # BzBz_PD34 minimum
BIND_NBC100['%s-BzBz_PD34-1.9' % (dbse)] = -2.624
BIND_NBC100['%s-BzBz_PD34-2.0' % (dbse)] = -2.587
BIND_NBC100['%s-BzBz_PD34-2.2' % (dbse)] = -2.479
BIND_NBC100['%s-BzBz_PD34-2.4' % (dbse)] = -2.356
BIND_NBC100['%s-BzBz_PD34-2.6' % (dbse)] = -2.242
BIND_NBC100['%s-BzBz_PD34-2.8' % (dbse)] = -2.147
BIND_NBC100['%s-BzBz_PD34-3.0' % (dbse)] = -2.079
BIND_NBC100['%s-BzH2S-3.2' % (dbse)] = 1.250
BIND_NBC100['%s-BzH2S-3.4' % (dbse)] = -1.570
BIND_NBC100['%s-BzH2S-3.5' % (dbse)] = -2.256
BIND_NBC100['%s-BzH2S-3.6' % (dbse)] = -2.638
BIND_NBC100['%s-BzH2S-3.7' % (dbse)] = -2.808
BIND_NBC100['%s-BzH2S-3.8' % (dbse)] = -2.834 # BzH2S minimum
BIND_NBC100['%s-BzH2S-3.9' % (dbse)] = -2.766
BIND_NBC100['%s-BzH2S-4.0' % (dbse)] = -2.639
BIND_NBC100['%s-BzH2S-4.1' % (dbse)] = -2.478
BIND_NBC100['%s-BzH2S-4.2' % (dbse)] = -2.301
BIND_NBC100['%s-BzH2S-4.5' % (dbse)] = -1.770
BIND_NBC100['%s-BzH2S-4.75' % (dbse)] = -1.393
BIND_NBC100['%s-BzH2S-5.0' % (dbse)] = -1.093
BIND_NBC100['%s-BzH2S-5.25' % (dbse)] = -0.861
BIND_NBC100['%s-BzH2S-5.5' % (dbse)] = -0.684
BIND_NBC100['%s-BzH2S-6.0' % (dbse)] = -0.446
BIND_NBC100['%s-BzH2S-6.5' % (dbse)] = -0.302
BIND_NBC100['%s-BzH2S-7.0' % (dbse)] = -0.214
BIND_NBC100['%s-BzH2S-7.5' % (dbse)] = -0.155
BIND_NBC100['%s-BzMe-3.2' % (dbse)] = 0.717
BIND_NBC100['%s-BzMe-3.3' % (dbse)] = -0.183
BIND_NBC100['%s-BzMe-3.4' % (dbse)] = -0.774
BIND_NBC100['%s-BzMe-3.5' % (dbse)] = -1.135
BIND_NBC100['%s-BzMe-3.6' % (dbse)] = -1.337
BIND_NBC100['%s-BzMe-3.7' % (dbse)] = -1.432
BIND_NBC100['%s-BzMe-3.8' % (dbse)] = -1.439 # BzMe minimum
BIND_NBC100['%s-BzMe-3.9' % (dbse)] = -1.414
BIND_NBC100['%s-BzMe-4.0' % (dbse)] = -1.327
BIND_NBC100['%s-BzMe-4.1' % (dbse)] = -1.232
BIND_NBC100['%s-BzMe-4.2' % (dbse)] = -1.138
BIND_NBC100['%s-BzMe-4.4' % (dbse)] = -0.950
BIND_NBC100['%s-BzMe-4.6' % (dbse)] = -0.760
BIND_NBC100['%s-BzMe-4.8' % (dbse)] = -0.606
BIND_NBC100['%s-BzMe-5.0' % (dbse)] = -0.475
BIND_NBC100['%s-BzMe-5.2' % (dbse)] = -0.370
BIND_NBC100['%s-BzMe-5.4' % (dbse)] = -0.286
BIND_NBC100['%s-BzMe-5.6' % (dbse)] = -0.230
BIND_NBC100['%s-BzMe-6.0' % (dbse)] = -0.141
BIND_NBC100['%s-MeMe-3.2' % (dbse)] = 0.069
BIND_NBC100['%s-MeMe-3.3' % (dbse)] = -0.239
BIND_NBC100['%s-MeMe-3.4' % (dbse)] = -0.417
BIND_NBC100['%s-MeMe-3.5' % (dbse)] = -0.508
BIND_NBC100['%s-MeMe-3.6' % (dbse)] = -0.541 # MeMe minimum
BIND_NBC100['%s-MeMe-3.7' % (dbse)] = -0.539
BIND_NBC100['%s-MeMe-3.8' % (dbse)] = -0.515
BIND_NBC100['%s-MeMe-3.9' % (dbse)] = -0.480
BIND_NBC100['%s-MeMe-4.0' % (dbse)] = -0.439
BIND_NBC100['%s-MeMe-4.1' % (dbse)] = -0.396
BIND_NBC100['%s-MeMe-4.2' % (dbse)] = -0.354
BIND_NBC100['%s-MeMe-4.3' % (dbse)] = -0.315
BIND_NBC100['%s-MeMe-4.4' % (dbse)] = -0.279
BIND_NBC100['%s-MeMe-4.6' % (dbse)] = -0.217
BIND_NBC100['%s-MeMe-4.8' % (dbse)] = -0.168
BIND_NBC100['%s-MeMe-5.0' % (dbse)] = -0.130
BIND_NBC100['%s-MeMe-5.4' % (dbse)] = -0.080
BIND_NBC100['%s-MeMe-5.8' % (dbse)] = -0.050
BIND_NBC100['%s-PyPy_S2-3.1' % (dbse)] = 2.442
BIND_NBC100['%s-PyPy_S2-3.3' % (dbse)] = -1.125
BIND_NBC100['%s-PyPy_S2-3.4' % (dbse)] = -2.016
BIND_NBC100['%s-PyPy_S2-3.5' % (dbse)] = -2.534
BIND_NBC100['%s-PyPy_S2-3.6' % (dbse)] = -2.791
BIND_NBC100['%s-PyPy_S2-3.7' % (dbse)] = -2.870 # PyPy_S2 minimum
BIND_NBC100['%s-PyPy_S2-3.8' % (dbse)] = -2.832
BIND_NBC100['%s-PyPy_S2-3.9' % (dbse)] = -2.719
BIND_NBC100['%s-PyPy_S2-4.0' % (dbse)] = -2.561
BIND_NBC100['%s-PyPy_S2-4.1' % (dbse)] = -2.381
BIND_NBC100['%s-PyPy_S2-4.2' % (dbse)] = -2.192
BIND_NBC100['%s-PyPy_S2-4.3' % (dbse)] = -2.005
BIND_NBC100['%s-PyPy_S2-4.4' % (dbse)] = -1.824
BIND_NBC100['%s-PyPy_S2-4.5' % (dbse)] = -1.655
BIND_NBC100['%s-PyPy_S2-4.7' % (dbse)] = -1.354
BIND_NBC100['%s-PyPy_S2-5.0' % (dbse)] = -0.999
BIND_NBC100['%s-PyPy_S2-5.5' % (dbse)] = -0.618
BIND_NBC100['%s-PyPy_S2-6.0' % (dbse)] = -0.402
BIND_NBC100['%s-PyPy_S2-6.5' % (dbse)] = -0.277
BIND_NBC100['%s-PyPy_S2-7.0' % (dbse)] = -0.200
BIND_NBC100['%s-PyPy_T3-4.1' % (dbse)] = 9.340
BIND_NBC100['%s-PyPy_T3-4.3' % (dbse)] = 1.991
BIND_NBC100['%s-PyPy_T3-4.5' % (dbse)] = -1.377
BIND_NBC100['%s-PyPy_T3-4.6' % (dbse)] = -2.203
BIND_NBC100['%s-PyPy_T3-4.7' % (dbse)] = -2.673
BIND_NBC100['%s-PyPy_T3-4.8' % (dbse)] = -2.897
BIND_NBC100['%s-PyPy_T3-4.9' % (dbse)] = -2.954 # PyPy_T3 minimum
BIND_NBC100['%s-PyPy_T3-5.0' % (dbse)] = -2.903
BIND_NBC100['%s-PyPy_T3-5.1' % (dbse)] = -2.784
BIND_NBC100['%s-PyPy_T3-5.2' % (dbse)] = -2.625
BIND_NBC100['%s-PyPy_T3-5.3' % (dbse)] = -2.447
BIND_NBC100['%s-PyPy_T3-5.4' % (dbse)] = -2.263
BIND_NBC100['%s-PyPy_T3-5.5' % (dbse)] = -2.080
BIND_NBC100['%s-PyPy_T3-5.7' % (dbse)] = -1.742
BIND_NBC100['%s-PyPy_T3-6.0' % (dbse)] = -1.324
BIND_NBC100['%s-PyPy_T3-6.5' % (dbse)] = -0.853
BIND_NBC100['%s-PyPy_T3-7.0' % (dbse)] = -0.574
BIND_NBC100['%s-PyPy_T3-8.0' % (dbse)] = -0.296
BIND_NBC100['%s-PyPy_T3-9.0' % (dbse)] = -0.175
BIND_NBC100['%s-BzBz_PD32-0.2' % (dbse)] = 3.301
BIND_NBC100['%s-BzBz_PD32-0.4' % (dbse)] = 2.678
BIND_NBC100['%s-BzBz_PD32-0.6' % (dbse)] = 1.783
BIND_NBC100['%s-BzBz_PD32-0.8' % (dbse)] = 0.781
BIND_NBC100['%s-BzBz_PD32-1.0' % (dbse)] = -0.171
BIND_NBC100['%s-BzBz_PD32-1.2' % (dbse)] = -0.954
BIND_NBC100['%s-BzBz_PD32-1.4' % (dbse)] = -1.508
BIND_NBC100['%s-BzBz_PD32-1.5' % (dbse)] = -1.695
BIND_NBC100['%s-BzBz_PD32-1.6' % (dbse)] = -1.827
BIND_NBC100['%s-BzBz_PD32-1.7' % (dbse)] = -1.911
BIND_NBC100['%s-BzBz_PD32-1.8' % (dbse)] = -1.950
BIND_NBC100['%s-BzBz_PD32-1.9' % (dbse)] = -1.957 # BzBz_PD32 minimum
BIND_NBC100['%s-BzBz_PD32-2.0' % (dbse)] = -1.937
BIND_NBC100['%s-BzBz_PD32-2.2' % (dbse)] = -1.860
BIND_NBC100['%s-BzBz_PD32-2.4' % (dbse)] = -1.767
BIND_NBC100['%s-BzBz_PD32-2.6' % (dbse)] = -1.702
BIND_NBC100['%s-BzBz_PD32-2.8' % (dbse)] = -1.680
BIND_NBC100['%s-BzBz_PD32-3.0' % (dbse)] = -1.705
BIND_NBC100['%s-BzBz_PD36-0.2' % (dbse)] = -1.293
BIND_NBC100['%s-BzBz_PD36-0.4' % (dbse)] = -1.462
BIND_NBC100['%s-BzBz_PD36-0.6' % (dbse)] = -1.708
BIND_NBC100['%s-BzBz_PD36-0.8' % (dbse)] = -1.984
BIND_NBC100['%s-BzBz_PD36-1.0' % (dbse)] = -2.248
BIND_NBC100['%s-BzBz_PD36-1.2' % (dbse)] = -2.458
BIND_NBC100['%s-BzBz_PD36-1.4' % (dbse)] = -2.597
BIND_NBC100['%s-BzBz_PD36-1.5' % (dbse)] = -2.635
BIND_NBC100['%s-BzBz_PD36-1.6' % (dbse)] = -2.652
BIND_NBC100['%s-BzBz_PD36-1.7' % (dbse)] = -2.654 # BzBz_PD36 minimum
BIND_NBC100['%s-BzBz_PD36-1.8' % (dbse)] = -2.642
BIND_NBC100['%s-BzBz_PD36-1.9' % (dbse)] = -2.615
BIND_NBC100['%s-BzBz_PD36-2.0' % (dbse)] = -2.575
BIND_NBC100['%s-BzBz_PD36-2.2' % (dbse)] = -2.473
BIND_NBC100['%s-BzBz_PD36-2.4' % (dbse)] = -2.356
BIND_NBC100['%s-BzBz_PD36-2.6' % (dbse)] = -2.240
BIND_NBC100['%s-BzBz_PD36-2.8' % (dbse)] = -2.130
BIND_NBC100['%s-BzBz_PD36-3.0' % (dbse)] = -2.035
# Current revision
BIND_NBC10A = {}
BIND_NBC10A['%s-BzBz_S-3.2' % (dbse)] = 3.462
BIND_NBC10A['%s-BzBz_S-3.3' % (dbse)] = 1.484
BIND_NBC10A['%s-BzBz_S-3.4' % (dbse)] = 0.147
BIND_NBC10A['%s-BzBz_S-3.5' % (dbse)] = -0.724
BIND_NBC10A['%s-BzBz_S-3.6' % (dbse)] = -1.259
BIND_NBC10A['%s-BzBz_S-3.7' % (dbse)] = -1.558
BIND_NBC10A['%s-BzBz_S-3.8' % (dbse)] = -1.693
BIND_NBC10A['%s-BzBz_S-3.9' % (dbse)] = -1.717 # BzBz_S minimum
BIND_NBC10A['%s-BzBz_S-4.0' % (dbse)] = -1.669
BIND_NBC10A['%s-BzBz_S-4.1' % (dbse)] = -1.577
BIND_NBC10A['%s-BzBz_S-4.2' % (dbse)] = -1.459
BIND_NBC10A['%s-BzBz_S-4.5' % (dbse)] = -1.066
BIND_NBC10A['%s-BzBz_S-5.0' % (dbse)] = -0.546
BIND_NBC10A['%s-BzBz_S-5.5' % (dbse)] = -0.251
BIND_NBC10A['%s-BzBz_S-6.0' % (dbse)] = -0.101
BIND_NBC10A['%s-BzBz_S-6.5' % (dbse)] = -0.029
BIND_NBC10A['%s-BzBz_S-10.0' % (dbse)] = 0.018
BIND_NBC10A['%s-BzBz_T-4.4' % (dbse)] = 0.617
BIND_NBC10A['%s-BzBz_T-4.5' % (dbse)] = -0.769
BIND_NBC10A['%s-BzBz_T-4.6' % (dbse)] = -1.682
BIND_NBC10A['%s-BzBz_T-4.7' % (dbse)] = -2.246
BIND_NBC10A['%s-BzBz_T-4.8' % (dbse)] = -2.559
BIND_NBC10A['%s-BzBz_T-4.9' % (dbse)] = -2.693
BIND_NBC10A['%s-BzBz_T-5.0' % (dbse)] = -2.703 # BzBz_T minimum
BIND_NBC10A['%s-BzBz_T-5.1' % (dbse)] = -2.630
BIND_NBC10A['%s-BzBz_T-5.2' % (dbse)] = -2.506
BIND_NBC10A['%s-BzBz_T-5.3' % (dbse)] = -2.351
BIND_NBC10A['%s-BzBz_T-5.4' % (dbse)] = -2.181
BIND_NBC10A['%s-BzBz_T-5.5' % (dbse)] = -2.006
BIND_NBC10A['%s-BzBz_T-5.6' % (dbse)] = -1.834
BIND_NBC10A['%s-BzBz_T-6.0' % (dbse)] = -1.242
BIND_NBC10A['%s-BzBz_T-6.5' % (dbse)] = -0.752
BIND_NBC10A['%s-BzBz_T-7.0' % (dbse)] = -0.468
BIND_NBC10A['%s-BzBz_T-7.5' % (dbse)] = -0.302
BIND_NBC10A['%s-BzBz_T-8.0' % (dbse)] = -0.203
BIND_NBC10A['%s-BzBz_PD34-0.2' % (dbse)] = 0.029
BIND_NBC10A['%s-BzBz_PD34-0.4' % (dbse)] = -0.298
BIND_NBC10A['%s-BzBz_PD34-0.6' % (dbse)] = -0.768
BIND_NBC10A['%s-BzBz_PD34-0.8' % (dbse)] = -1.298
BIND_NBC10A['%s-BzBz_PD34-1.0' % (dbse)] = -1.802
BIND_NBC10A['%s-BzBz_PD34-1.2' % (dbse)] = -2.213
BIND_NBC10A['%s-BzBz_PD34-1.4' % (dbse)] = -2.497
BIND_NBC10A['%s-BzBz_PD34-1.5' % (dbse)] = -2.586
BIND_NBC10A['%s-BzBz_PD34-1.6' % (dbse)] = -2.643
BIND_NBC10A['%s-BzBz_PD34-1.7' % (dbse)] = -2.668
BIND_NBC10A['%s-BzBz_PD34-1.8' % (dbse)] = -2.670 # BzBz_PD34 minimum
BIND_NBC10A['%s-BzBz_PD34-1.9' % (dbse)] = -2.649
BIND_NBC10A['%s-BzBz_PD34-2.0' % (dbse)] = -2.611
BIND_NBC10A['%s-BzBz_PD34-2.2' % (dbse)] = -2.501
BIND_NBC10A['%s-BzBz_PD34-2.4' % (dbse)] = -2.377
BIND_NBC10A['%s-BzBz_PD34-2.6' % (dbse)] = -2.260
BIND_NBC10A['%s-BzBz_PD34-2.8' % (dbse)] = -2.163
BIND_NBC10A['%s-BzBz_PD34-3.0' % (dbse)] = -2.093
BIND_NBC10A['%s-BzH2S-3.2' % (dbse)] = 1.236
BIND_NBC10A['%s-BzH2S-3.4' % (dbse)] = -1.584
BIND_NBC10A['%s-BzH2S-3.5' % (dbse)] = -2.269
BIND_NBC10A['%s-BzH2S-3.6' % (dbse)] = -2.649
BIND_NBC10A['%s-BzH2S-3.7' % (dbse)] = -2.818
BIND_NBC10A['%s-BzH2S-3.8' % (dbse)] = -2.843 # BzH2S minimum
BIND_NBC10A['%s-BzH2S-3.9' % (dbse)] = -2.773
BIND_NBC10A['%s-BzH2S-4.0' % (dbse)] = -2.645
BIND_NBC10A['%s-BzH2S-4.1' % (dbse)] = -2.483
BIND_NBC10A['%s-BzH2S-4.2' % (dbse)] = -2.305
BIND_NBC10A['%s-BzH2S-4.5' % (dbse)] = -1.771
BIND_NBC10A['%s-BzH2S-4.75' % (dbse)] = -1.393
BIND_NBC10A['%s-BzH2S-5.0' % (dbse)] = -1.092
BIND_NBC10A['%s-BzH2S-5.25' % (dbse)] = -0.859
BIND_NBC10A['%s-BzH2S-5.5' % (dbse)] = -0.682
BIND_NBC10A['%s-BzH2S-6.0' % (dbse)] = -0.444
BIND_NBC10A['%s-BzH2S-6.5' % (dbse)] = -0.301
BIND_NBC10A['%s-BzH2S-7.0' % (dbse)] = -0.212
BIND_NBC10A['%s-BzH2S-7.5' % (dbse)] = -0.154
BIND_NBC10A['%s-BzMe-3.2' % (dbse)] = 0.686
BIND_NBC10A['%s-BzMe-3.3' % (dbse)] = -0.213
BIND_NBC10A['%s-BzMe-3.4' % (dbse)] = -0.805
BIND_NBC10A['%s-BzMe-3.5' % (dbse)] = -1.173
BIND_NBC10A['%s-BzMe-3.6' % (dbse)] = -1.378
BIND_NBC10A['%s-BzMe-3.7' % (dbse)] = -1.470
BIND_NBC10A['%s-BzMe-3.8' % (dbse)] = -1.484 # BzMe minimum
BIND_NBC10A['%s-BzMe-3.9' % (dbse)] = -1.445
BIND_NBC10A['%s-BzMe-4.0' % (dbse)] = -1.374
BIND_NBC10A['%s-BzMe-4.1' % (dbse)] = -1.284
BIND_NBC10A['%s-BzMe-4.2' % (dbse)] = -1.185
BIND_NBC10A['%s-BzMe-4.4' % (dbse)] = -0.984
BIND_NBC10A['%s-BzMe-4.6' % (dbse)] = -0.800
BIND_NBC10A['%s-BzMe-4.8' % (dbse)] = -0.643
BIND_NBC10A['%s-BzMe-5.0' % (dbse)] = -0.515
BIND_NBC10A['%s-BzMe-5.2' % (dbse)] = -0.413
BIND_NBC10A['%s-BzMe-5.4' % (dbse)] = -0.332
BIND_NBC10A['%s-BzMe-5.6' % (dbse)] = -0.268
BIND_NBC10A['%s-BzMe-6.0' % (dbse)] = -0.177
BIND_NBC10A['%s-MeMe-3.2' % (dbse)] = 0.069
BIND_NBC10A['%s-MeMe-3.3' % (dbse)] = -0.239
BIND_NBC10A['%s-MeMe-3.4' % (dbse)] = -0.417
BIND_NBC10A['%s-MeMe-3.5' % (dbse)] = -0.508
BIND_NBC10A['%s-MeMe-3.6' % (dbse)] = -0.541 # MeMe minimum
BIND_NBC10A['%s-MeMe-3.7' % (dbse)] = -0.539
BIND_NBC10A['%s-MeMe-3.8' % (dbse)] = -0.515
BIND_NBC10A['%s-MeMe-3.9' % (dbse)] = -0.480
BIND_NBC10A['%s-MeMe-4.0' % (dbse)] = -0.439
BIND_NBC10A['%s-MeMe-4.1' % (dbse)] = -0.396
BIND_NBC10A['%s-MeMe-4.2' % (dbse)] = -0.354
BIND_NBC10A['%s-MeMe-4.3' % (dbse)] = -0.315
BIND_NBC10A['%s-MeMe-4.4' % (dbse)] = -0.279
BIND_NBC10A['%s-MeMe-4.6' % (dbse)] = -0.217
BIND_NBC10A['%s-MeMe-4.8' % (dbse)] = -0.168
BIND_NBC10A['%s-MeMe-5.0' % (dbse)] = -0.130
BIND_NBC10A['%s-MeMe-5.4' % (dbse)] = -0.080
BIND_NBC10A['%s-MeMe-5.8' % (dbse)] = -0.050
BIND_NBC10A['%s-PyPy_S2-3.1' % (dbse)] = 2.387
BIND_NBC10A['%s-PyPy_S2-3.3' % (dbse)] = -1.165
BIND_NBC10A['%s-PyPy_S2-3.4' % (dbse)] = -2.050
BIND_NBC10A['%s-PyPy_S2-3.5' % (dbse)] = -2.562
BIND_NBC10A['%s-PyPy_S2-3.6' % (dbse)] = -2.815
BIND_NBC10A['%s-PyPy_S2-3.7' % (dbse)] = -2.890 # PyPy_S2 minimum
BIND_NBC10A['%s-PyPy_S2-3.8' % (dbse)] = -2.849
BIND_NBC10A['%s-PyPy_S2-3.9' % (dbse)] = -2.733
BIND_NBC10A['%s-PyPy_S2-4.0' % (dbse)] = -2.573
BIND_NBC10A['%s-PyPy_S2-4.1' % (dbse)] = -2.391
BIND_NBC10A['%s-PyPy_S2-4.2' % (dbse)] = -2.201
BIND_NBC10A['%s-PyPy_S2-4.3' % (dbse)] = -2.012
BIND_NBC10A['%s-PyPy_S2-4.4' % (dbse)] = -1.830
BIND_NBC10A['%s-PyPy_S2-4.5' % (dbse)] = -1.660
BIND_NBC10A['%s-PyPy_S2-4.7' % (dbse)] = -1.357
BIND_NBC10A['%s-PyPy_S2-5.0' % (dbse)] = -1.002
BIND_NBC10A['%s-PyPy_S2-5.5' % (dbse)] = -0.619
BIND_NBC10A['%s-PyPy_S2-6.0' % (dbse)] = -0.402
BIND_NBC10A['%s-PyPy_S2-6.5' % (dbse)] = -0.276
BIND_NBC10A['%s-PyPy_S2-7.0' % (dbse)] = -0.200
BIND_NBC10A['%s-PyPy_T3-4.1' % (dbse)] = 9.341
BIND_NBC10A['%s-PyPy_T3-4.3' % (dbse)] = 1.991
BIND_NBC10A['%s-PyPy_T3-4.5' % (dbse)] = -1.377
BIND_NBC10A['%s-PyPy_T3-4.6' % (dbse)] = -2.203
BIND_NBC10A['%s-PyPy_T3-4.7' % (dbse)] = -2.673
BIND_NBC10A['%s-PyPy_T3-4.8' % (dbse)] = -2.896
BIND_NBC10A['%s-PyPy_T3-4.9' % (dbse)] = -2.954 # PyPy_T3 minimum
BIND_NBC10A['%s-PyPy_T3-5.0' % (dbse)] = -2.903
BIND_NBC10A['%s-PyPy_T3-5.1' % (dbse)] = -2.783
BIND_NBC10A['%s-PyPy_T3-5.2' % (dbse)] = -2.625
BIND_NBC10A['%s-PyPy_T3-5.3' % (dbse)] = -2.447
BIND_NBC10A['%s-PyPy_T3-5.4' % (dbse)] = -2.262
BIND_NBC10A['%s-PyPy_T3-5.5' % (dbse)] = -2.080
BIND_NBC10A['%s-PyPy_T3-5.7' % (dbse)] = -1.741
BIND_NBC10A['%s-PyPy_T3-6.0' % (dbse)] = -1.323
BIND_NBC10A['%s-PyPy_T3-6.5' % (dbse)] = -0.852
BIND_NBC10A['%s-PyPy_T3-7.0' % (dbse)] = -0.573
BIND_NBC10A['%s-PyPy_T3-8.0' % (dbse)] = -0.296
BIND_NBC10A['%s-PyPy_T3-9.0' % (dbse)] = -0.174
BIND_NBC10A['%s-BzBz_PD32-0.2' % (dbse)] = 3.241
BIND_NBC10A['%s-BzBz_PD32-0.4' % (dbse)] = 2.619
BIND_NBC10A['%s-BzBz_PD32-0.6' % (dbse)] = 1.726
BIND_NBC10A['%s-BzBz_PD32-0.8' % (dbse)] = 0.726
BIND_NBC10A['%s-BzBz_PD32-1.0' % (dbse)] = -0.222
BIND_NBC10A['%s-BzBz_PD32-1.2' % (dbse)] = -1.002
BIND_NBC10A['%s-BzBz_PD32-1.4' % (dbse)] = -1.553
BIND_NBC10A['%s-BzBz_PD32-1.5' % (dbse)] = -1.738
BIND_NBC10A['%s-BzBz_PD32-1.6' % (dbse)] = -1.868
BIND_NBC10A['%s-BzBz_PD32-1.7' % (dbse)] = -1.949
BIND_NBC10A['%s-BzBz_PD32-1.8' % (dbse)] = -1.988
BIND_NBC10A['%s-BzBz_PD32-1.9' % (dbse)] = -1.992 # BzBz_PD32 minimum
BIND_NBC10A['%s-BzBz_PD32-2.0' % (dbse)] = -1.971
BIND_NBC10A['%s-BzBz_PD32-2.2' % (dbse)] = -1.891
BIND_NBC10A['%s-BzBz_PD32-2.4' % (dbse)] = -1.795
BIND_NBC10A['%s-BzBz_PD32-2.6' % (dbse)] = -1.727
BIND_NBC10A['%s-BzBz_PD32-2.8' % (dbse)] = -1.702
BIND_NBC10A['%s-BzBz_PD32-3.0' % (dbse)] = -1.725
BIND_NBC10A['%s-BzBz_PD36-0.2' % (dbse)] = -1.321
BIND_NBC10A['%s-BzBz_PD36-0.4' % (dbse)] = -1.490
BIND_NBC10A['%s-BzBz_PD36-0.6' % (dbse)] = -1.735
BIND_NBC10A['%s-BzBz_PD36-0.8' % (dbse)] = -2.011
BIND_NBC10A['%s-BzBz_PD36-1.0' % (dbse)] = -2.273
BIND_NBC10A['%s-BzBz_PD36-1.2' % (dbse)] = -2.482
BIND_NBC10A['%s-BzBz_PD36-1.4' % (dbse)] = -2.619
BIND_NBC10A['%s-BzBz_PD36-1.5' % (dbse)] = -2.657
BIND_NBC10A['%s-BzBz_PD36-1.6' % (dbse)] = -2.674
BIND_NBC10A['%s-BzBz_PD36-1.7' % (dbse)] = -2.675 # BzBz_PD36 minimum
BIND_NBC10A['%s-BzBz_PD36-1.8' % (dbse)] = -2.662
BIND_NBC10A['%s-BzBz_PD36-1.9' % (dbse)] = -2.633
BIND_NBC10A['%s-BzBz_PD36-2.0' % (dbse)] = -2.593
BIND_NBC10A['%s-BzBz_PD36-2.2' % (dbse)] = -2.489
BIND_NBC10A['%s-BzBz_PD36-2.4' % (dbse)] = -2.371
BIND_NBC10A['%s-BzBz_PD36-2.6' % (dbse)] = -2.253
BIND_NBC10A['%s-BzBz_PD36-2.8' % (dbse)] = -2.143
BIND_NBC10A['%s-BzBz_PD36-3.0' % (dbse)] = -2.046
# Set default
BIND = BIND_NBC10A
# <<< Comment Lines >>>
TAGL = {}
rxnpattern = re.compile(r'^(.+)-(.+)$')
for item in BzBz_S:
distance = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Sandwich Benzene Dimer at %s A' % (distance.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Sandwich Benzene Dimer at %s A' % (distance.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Benzene from Sandwich Benzene Dimer at %s A' % (distance.group(2))
for item in BzBz_T:
distance = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'T-shaped Benzene Dimer at %s A' % (distance.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'T-shaped Benzene Dimer at %s A' % (distance.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Benzene from T-shaped Benzene Dimer at %s A' % (distance.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Benzene from T-shaped Benzene Dimer at %s A' % (distance.group(2))
for item in BzBz_PD34:
distance = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Parallel Displaced Benzene Dimer Interplane 3.4 at %s A' % (distance.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Parallel Displaced Benzene Dimer Interplane 3.4 at %s A' % (distance.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Benzene from Parallel Displaced Benzene Dimer Interplane 3.4 at %s A' % (distance.group(2))
for item in BzH2S:
distance = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Benzene-H2S at %s A' % (distance.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Benzene-H2S at %s A' % (distance.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Benzene from Benzene-Methane at %s A' % (distance.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Hydrogen Sulfide from Benzene-Methane at %s A' % (distance.group(2))
for item in BzMe:
distance = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Benzene-Methane at %s A' % (distance.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Benzene-Methane at %s A' % (distance.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Benzene from Benzene-Methane at %s A' % (distance.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Methane from Benzene-Methane at %s A' % (distance.group(2))
for item in MeMe:
distance = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Methane Dimer at %s A' % (distance.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Methane Dimer at %s A' % (distance.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Methane from Methane Dimer at %s A' % (distance.group(2))
for item in PyPy_S2:
distance = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Pyridine Dimer S2 Configuration at %s A' % (distance.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Pyridine Dimer S2 Configuration at %s A' % (distance.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Pyridine from Pyridine Dimer S2 Configuration at %s A' % (distance.group(2))
for item in PyPy_T3:
distance = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Pyridine Dimer T3 Configuration at %s A' % (distance.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Pyridine Dimer T3 Configuration at %s A' % (distance.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Pyridine from Pyridine Dimer T3 Configuration at %s A' % (distance.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Pyridine from Pyridine Dimer T3 Configuration at %s A' % (distance.group(2))
for item in BzBz_PD32:
distance = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Parallel Displaced Benzene Dimer Interplane 3.2 at %s A' % (distance.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Parallel Displaced Benzene Dimer Interplane 3.2 at %s A' % (distance.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Benzene from Parallel Displaced Benzene Dimer Interplane 3.2 at %s A' % (distance.group(2))
for item in BzBz_PD36:
distance = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Parallel Displaced Benzene Dimer Interplane 3.6 at %s A' % (distance.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Parallel Displaced Benzene Dimer Interplane 3.6 at %s A' % (distance.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Benzene from Parallel Displaced Benzene Dimer Interplane 3.6 at %s A' % (distance.group(2))
TAGL['%s-Bz-mono-unCP' % (dbse)] = 'Benzene'
TAGL['%s-H2S-mono-unCP' % (dbse)] = 'Hydrogen Sulfide'
TAGL['%s-Bz2-mono-unCP' % (dbse)] = 'Benzene (alt. geometry)'
TAGL['%s-Me-mono-unCP' % (dbse)] = 'Methane'
TAGL['%s-Py-mono-unCP' % (dbse)] = 'Pyridine'
#<<< Geometry Specification Strings >>>
GEOS = {}
for rxn in BzBz_S:
molname = rxnpattern.match(rxn)
Rval = float(molname.group(2))
GEOS['%s-%s-%s' % (dbse, rxn, 'dimer')] = qcdb.Molecule("""
0 1
X
X 1 RXX
X 2 RXX 1 90.0
C 3 RCC 2 90.0 1 0.0
C 3 RCC 2 90.0 1 60.0
C 3 RCC 2 90.0 1 120.0
C 3 RCC 2 90.0 1 180.0
C 3 RCC 2 90.0 1 240.0
C 3 RCC 2 90.0 1 300.0
H 3 RCH 2 90.0 1 0.0
H 3 RCH 2 90.0 1 60.0
H 3 RCH 2 90.0 1 120.0
H 3 RCH 2 90.0 1 180.0
H 3 RCH 2 90.0 1 240.0
H 3 RCH 2 90.0 1 300.0
--
0 1
X 3 RXX 2 90.0 1 0.0
X 3 R 16 90.0 2 180.0
X 3 DRXX 16 90.0 2 180.0
X 18 RXX 17 90.0 16 0.0
C 17 RCC 18 90.0 19 0.0
C 17 RCC 18 90.0 19 60.0
C 17 RCC 18 90.0 19 120.0
C 17 RCC 18 90.0 19 180.0
C 17 RCC 18 90.0 19 240.0
C 17 RCC 18 90.0 19 300.0
H 17 RCH 18 90.0 19 0.0
H 17 RCH 18 90.0 19 60.0
H 17 RCH 18 90.0 19 120.0
H 17 RCH 18 90.0 19 180.0
H 17 RCH 18 90.0 19 240.0
H 17 RCH 18 90.0 19 300.0
RXX = 1.0
DRXX = 12.0
RCC = 1.3915
RCH = 2.4715
R = %(Rval)s
units angstrom
""" % vars())
for rxn in BzBz_T:
molname = rxnpattern.match(rxn)
Rval = float(molname.group(2))
GEOS['%s-%s-%s' % (dbse, rxn, 'dimer')] = qcdb.Molecule("""
0 1
X
X 1 RXX
X 2 RXX 1 90.0
C 3 RCC 2 90.0 1 0.0
C 3 RCC 2 90.0 1 60.0
C 3 RCC 2 90.0 1 120.0
C 3 RCC 2 90.0 1 180.0
C 3 RCC 2 90.0 1 240.0
C 3 RCC 2 90.0 1 300.0
H 3 RCH 2 90.0 1 0.0
H 3 RCH 2 90.0 1 60.0
H 3 RCH 2 90.0 1 120.0
H 3 RCH 2 90.0 1 180.0
H 3 RCH 2 90.0 1 240.0
H 3 RCH 2 90.0 1 300.0
--
0 1
X 3 RXX 2 90.0 1 0.0
X 3 R 16 90.0 1 0.0
X 17 RXX 3 90.0 16 180.0
X 18 RXX 17 90.0 3 0.0
C 17 RCC 18 90.0 19 0.0
C 17 RCC 18 90.0 19 60.0
C 17 RCC 18 90.0 19 120.0
C 17 RCC 18 90.0 19 180.0
C 17 RCC 18 90.0 19 240.0
C 17 RCC 18 90.0 19 300.0
H 17 RCH 18 90.0 19 0.0
H 17 RCH 18 90.0 19 60.0
H 17 RCH 18 90.0 19 120.0
H 17 RCH 18 90.0 19 180.0
H 17 RCH 18 90.0 19 240.0
H 17 RCH 18 90.0 19 300.0
RXX = 1.0
RCC = 1.3915
RCH = 2.4715
R = %(Rval)s
units angstrom
""" % vars())
for rxn in sum([BzBz_PD32, BzBz_PD34, BzBz_PD36], []):
molname = rxnpattern.match(rxn)
Rval = float(molname.group(2))
if rxn in BzBz_PD32:
R2val = 3.2
elif rxn in BzBz_PD34:
R2val = 3.4
elif rxn in BzBz_PD36:
R2val = 3.6
GEOS['%s-%s-%s' % (dbse, rxn, 'dimer')] = qcdb.Molecule("""
0 1
X
X 1 RXX
X 2 R2 1 90.0
C 3 RCC 2 90.0 1 0.0
C 3 RCC 2 90.0 1 60.0
C 3 RCC 2 90.0 1 120.0
C 3 RCC 2 90.0 1 180.0
C 3 RCC 2 90.0 1 240.0
C 3 RCC 2 90.0 1 300.0
H 3 RCH 2 90.0 1 0.0
H 3 RCH 2 90.0 1 60.0
H 3 RCH 2 90.0 1 120.0
H 3 RCH 2 90.0 1 180.0
H 3 RCH 2 90.0 1 240.0
H 3 RCH 2 90.0 1 300.0
--
0 1
X 3 RXX 2 90.0 1 0.0
X 2 R 3 90.0 16 90.0
X 17 RXX 2 90.0 1 90.0
X 18 RXX 17 90.0 2 90.0
C 17 RCC 18 90.0 19 0.0
C 17 RCC 18 90.0 19 60.0
C 17 RCC 18 90.0 19 120.0
C 17 RCC 18 90.0 19 180.0
C 17 RCC 18 90.0 19 240.0
C 17 RCC 18 90.0 19 300.0
H 17 RCH 18 90.0 19 0.0
H 17 RCH 18 90.0 19 60.0
H 17 RCH 18 90.0 19 120.0
H 17 RCH 18 90.0 19 180.0
H 17 RCH 18 90.0 19 240.0
H 17 RCH 18 90.0 19 300.0
RXX = 1.0
RCC = 1.3915
RCH = 2.4715
R = %(Rval)s
R2 = %(R2val)s
units angstrom
""" % vars())
for rxn in BzH2S:
molname = rxnpattern.match(rxn)
Rval = float(molname.group(2))
GEOS['%s-%s-%s' % (dbse, rxn, 'dimer')] = qcdb.Molecule("""
0 1
X
X 1 1.0
C 2 BZCX 1 90.0
C 2 BZCX 1 90.0 3 60.0
C 2 BZCX 1 90.0 4 60.0
C 2 BZCX 1 90.0 5 60.0
C 2 BZCX 1 90.0 6 60.0
C 2 BZCX 1 90.0 7 60.0
X 3 1.0 2 90.0 1 0.0
H 3 BZHC 9 90.0 2 180.0
H 4 BZHC 3 120.0 2 180.0
H 5 BZHC 4 120.0 2 180.0
H 6 BZHC 5 120.0 2 180.0
H 7 BZHC 6 120.0 2 180.0
H 8 BZHC 7 120.0 2 180.0
--
0 1
S 2 R 3 90.0 4 90.0
H 16 HS 2 HHSH 9 180.0
H 16 HS 2 HHSH 9 0.0
BZCX = 1.3915
BZHC = 1.0800
HS = 1.3356
HHSH = 46.06
R = %(Rval)s
units angstrom
""" % vars())
for rxn in BzMe:
molname = rxnpattern.match(rxn)
Rval = float(molname.group(2))
GEOS['%s-%s-%s' % (dbse, rxn, 'dimer')] = qcdb.Molecule("""
0 1
X
X 1 1.0
C 2 CQ 1 90.0
C 3 CQ 2 60.0 1 90.0
C 4 CQ 2 60.0 1 90.0
C 5 CQ 2 60.0 1 90.0
C 6 CQ 2 60.0 1 90.0
C 7 CQ 2 60.0 1 90.0
X 3 1.0 2 90.0 1 0.0
H 3 CH1 9 90.0 2 180.0
H 4 CH1 3 120.0 2 180.0
H 5 CH1 4 120.0 2 180.0
H 6 CH1 5 120.0 2 180.0
H 7 CH1 6 120.0 2 180.0
H 8 CH1 7 120.0 2 180.0
--
0 1
C 2 R 3 90.0 9 0.0
H 16 CH2 2 0.0 3 0.0
H 16 CH2 2 HCH 3 0.0
H 16 CH2 17 HCH 18 120.0
H 16 CH2 17 HCH 18 240.0
CQ = 1.405731
CH1 = 1.095210
CH2 = 1.099503
HCH = 109.471209
R = %(Rval)s
units angstrom
""" % vars())
for rxn in MeMe:
molname = rxnpattern.match(rxn)
Rval = float(molname.group(2))
GEOS['%s-%s-%s' % (dbse, rxn, 'dimer')] = qcdb.Molecule("""
0 1
C
H 1 CH2
H 1 CH2 2 HCH
H 1 CH2 2 HCH 3 120.0
H 1 CH2 2 HCH 3 240.0
--
0 1
C 1 R 2 180.0 4 120.0
H 6 CH2 2 180.0 4 120.0
H 6 CH2 7 HCH 3 180.0
H 6 CH2 7 HCH 4 180.0
H 6 CH2 7 HCH 5 180.0
CH2 = 1.099503
HCH = 109.471209
R = %(Rval)s
units angstrom
""" % vars())
for rxn in PyPy_S2:
molname = rxnpattern.match(rxn)
Rval = float(molname.group(2))
GEOS['%s-%s-%s' % (dbse, rxn, 'dimer')] = qcdb.Molecule("""
0 1
X
X 1 R
N 1 1.3980380 2 90.0
C 1 1.3371053 2 90.0 3 -58.504950
C 1 1.3822904 2 90.0 4 -61.640500
C 1 1.4067471 2 90.0 5 -59.854550
C 1 1.3822904 2 90.0 6 -59.854550
C 1 1.3371053 2 90.0 7 -61.640500
H 4 1.08650 3 116.01 8 180.0
H 5 1.08260 4 120.12 3 180.0
H 6 1.08180 3 180.00 4 0.0
H 7 1.08260 8 120.12 3 180.0
H 8 1.08650 3 116.01 4 180.0
--
0 1
N 2 1.3980380 1 90.0 3 theta
C 2 1.3371053 1 90.0 14 -58.504950
C 2 1.3822904 1 90.0 15 -61.640500
C 2 1.4067471 1 90.0 16 -59.854550
C 2 1.3822904 1 90.0 17 -59.854550
C 2 1.3371053 1 90.0 18 -61.640500
H 15 1.08650 14 116.01 19 180.0
H 16 1.08260 15 120.12 14 180.0
H 17 1.08180 14 180.00 15 0.0
H 18 1.08260 19 120.12 14 180.0
H 19 1.08650 14 116.01 15 180.0
theta = 180.0
R = %(Rval)s
units angstrom
""" % vars())
for rxn in PyPy_T3:
molname = rxnpattern.match(rxn)
Rval = float(molname.group(2))
GEOS['%s-%s-%s' % (dbse, rxn, 'dimer')] = qcdb.Molecule("""
0 1
X
X 1 R
N 1 1.3980380 2 90.0
C 1 1.3371053 2 90.0 3 -58.504950
C 1 1.3822904 2 90.0 4 -61.640500
C 1 1.4067471 2 90.0 5 -59.854550
C 1 1.3822904 2 90.0 6 -59.854550
C 1 1.3371053 2 90.0 7 -61.640500
H 4 1.08650 3 116.01 8 180.0
H 5 1.08260 4 120.12 3 180.0
H 6 1.08180 3 180.00 4 0.0
H 7 1.08260 8 120.12 3 180.0
H 8 1.08650 3 116.01 4 180.0
--
0 1
X 2 2.0000000 1 90.0 3 theta
N 2 1.3980380 14 90.0 1 updown
C 2 1.3371053 14 90.0 15 -58.504950
C 2 1.3822904 14 90.0 16 -61.640500
C 2 1.4067471 14 90.0 17 -59.854550
C 2 1.3822904 14 90.0 18 -59.854550
C 2 1.3371053 14 90.0 19 -61.640500
H 16 1.08650 15 116.01 20 180.0
H 17 1.08260 16 120.12 15 180.0
H 18 1.08180 15 180.00 16 0.0
H 19 1.08260 20 120.12 15 180.0
H 20 1.08650 15 116.01 16 180.0
theta = 90.0
updown = 270.0
R = %(Rval)s
units angstrom
""" % vars())
GEOS['%s-%s-%s' % (dbse, 'Bz', 'mono-unCP')] = qcdb.Molecule("""
0 1
X
X 1 RXX
X 2 RXX 1 90.0
C 3 RCC 2 90.0 1 0.0
C 3 RCC 2 90.0 1 60.0
C 3 RCC 2 90.0 1 120.0
C 3 RCC 2 90.0 1 180.0
C 3 RCC 2 90.0 1 240.0
C 3 RCC 2 90.0 1 300.0
H 3 RCH 2 90.0 1 0.0
H 3 RCH 2 90.0 1 60.0
H 3 RCH 2 90.0 1 120.0
H 3 RCH 2 90.0 1 180.0
H 3 RCH 2 90.0 1 240.0
H 3 RCH 2 90.0 1 300.0
RXX = 1.0
RCC = 1.3915
RCH = 2.4715
units angstrom
""" % vars())
GEOS['%s-%s-%s' % (dbse, 'H2S', 'mono-unCP')] = qcdb.Molecule("""
0 1
S
H 1 HS
H 1 HS 2 HSH
HS = 1.3356
HSH = 92.12
units angstrom
""" % vars())
GEOS['%s-%s-%s' % (dbse, 'Bz2', 'mono-unCP')] = qcdb.Molecule("""
0 1
X
X 1 1.0
C 2 CQ 1 90.0
C 3 CQ 2 60.0 1 90.0
C 4 CQ 2 60.0 1 90.0
C 5 CQ 2 60.0 1 90.0
C 6 CQ 2 60.0 1 90.0
C 7 CQ 2 60.0 1 90.0
X 3 1.0 2 90.0 1 0.0
H 3 CH1 9 90.0 2 180.0
H 4 CH1 3 120.0 2 180.0
H 5 CH1 4 120.0 2 180.0
H 6 CH1 5 120.0 2 180.0
H 7 CH1 6 120.0 2 180.0
H 8 CH1 7 120.0 2 180.0
CQ = 1.405731
CH1 = 1.095210
units angstrom
""" % vars())
GEOS['%s-%s-%s' % (dbse, 'Me', 'mono-unCP')] = qcdb.Molecule("""
0 1
C
H 1 CH2
H 1 CH2 2 HCH
H 1 CH2 2 HCH 3 120.0
H 1 CH2 2 HCH 3 240.0
CH2 = 1.099503
HCH = 109.471209
units angstrom
""" % vars())
GEOS['%s-%s-%s' % (dbse, 'Py', 'mono-unCP')] = qcdb.Molecule("""
0 1
X
X 1 RXX
N 1 1.3980380 2 90.0
C 1 1.3371053 2 90.0 3 -58.504950
C 1 1.3822904 2 90.0 4 -61.640500
C 1 1.4067471 2 90.0 5 -59.854550
C 1 1.3822904 2 90.0 6 -59.854550
C 1 1.3371053 2 90.0 7 -61.640500
H 4 1.08650 3 116.01 8 180.0
H 5 1.08260 4 120.12 3 180.0
H 6 1.08180 3 180.00 4 0.0
H 7 1.08260 8 120.12 3 180.0
H 8 1.08650 3 116.01 4 180.0
RXX = 1.0
units angstrom
""" % vars())
# <<< Derived Geometry Strings >>>
for rxn in HRXN:
GEOS['%s-%s-monoA-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1, 2)
GEOS['%s-%s-monoB-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2, 1)
#########################################################################
# <<< Supplementary Quantum Chemical Results >>>
DATA = {}
DATA['NUCLEAR REPULSION ENERGY'] = {}
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.2-dimer' ] = 652.58240326
DATA['NUCLEAR REPULSION ENERGY']['NBC1-Bz-mono-unCP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.3-dimer' ] = 647.08083072
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.4-dimer' ] = 641.79881504
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.5-dimer' ] = 636.72435401
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.6-dimer' ] = 631.84627841
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.7-dimer' ] = 627.15417831
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.8-dimer' ] = 622.63833806
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.9-dimer' ] = 618.28967853
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.0-dimer' ] = 614.09970566
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.1-dimer' ] = 610.06046424
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.2-dimer' ] = 606.16449631
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.5-dimer' ] = 595.26834684
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-5.0-dimer' ] = 579.39688238
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-5.5-dimer' ] = 565.87021271
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-6.0-dimer' ] = 554.22625379
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-6.5-dimer' ] = 544.11253672
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-10.0-dimer' ] = 499.16037479
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.4-dimer' ] = 613.04854518
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.5-dimer' ] = 608.81636557
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.6-dimer' ] = 604.74550671
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.7-dimer' ] = 600.82787505
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.8-dimer' ] = 597.05577907
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.9-dimer' ] = 593.42192782
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.0-dimer' ] = 589.91942332
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.1-dimer' ] = 586.54174882
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.2-dimer' ] = 583.28275414
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.3-dimer' ] = 580.13663931
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.4-dimer' ] = 577.09793714
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.5-dimer' ] = 574.16149552
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.6-dimer' ] = 571.32245963
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-6.0-dimer' ] = 560.85272572
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-6.5-dimer' ] = 549.47925556
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-7.0-dimer' ] = 539.65622514
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-7.5-dimer' ] = 531.09189940
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-8.0-dimer' ] = 523.56205991
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.2-dimer' ] = 641.59153721
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.4-dimer' ] = 640.97218086
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.6-dimer' ] = 639.94808010
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.8-dimer' ] = 638.53114770
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.0-dimer' ] = 636.73745247
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.2-dimer' ] = 634.58670201
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.4-dimer' ] = 632.10168144
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.5-dimer' ] = 630.74164257
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.6-dimer' ] = 629.30768985
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.7-dimer' ] = 627.80329032
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.8-dimer' ] = 626.23200316
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.9-dimer' ] = 624.59746513
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.0-dimer' ] = 622.90337667
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.2-dimer' ] = 619.35158842
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.4-dimer' ] = 615.60701452
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.6-dimer' ] = 611.70022314
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.8-dimer' ] = 607.66157487
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-3.0-dimer' ] = 603.52082284
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.2-dimer' ] = 332.50866690
DATA['NUCLEAR REPULSION ENERGY']['NBC1-H2S-mono-unCP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.4-dimer' ] = 326.76493049
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.5-dimer' ] = 324.08312886
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.6-dimer' ] = 321.51823084
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.7-dimer' ] = 319.06348175
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.8-dimer' ] = 316.71257239
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.9-dimer' ] = 314.45961051
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.0-dimer' ] = 312.29909326
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.1-dimer' ] = 310.22588084
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.2-dimer' ] = 308.23517159
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.5-dimer' ] = 302.71463310
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.75-dimer' ] = 298.57449040
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.0-dimer' ] = 294.79763877
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.25-dimer' ] = 291.34045574
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.5-dimer' ] = 288.16568982
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-6.0-dimer' ] = 282.54011405
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-6.5-dimer' ] = 277.71464354
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-7.0-dimer' ] = 273.53417452
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-7.5-dimer' ] = 269.88029141
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.2-dimer' ] = 277.70122037
DATA['NUCLEAR REPULSION ENERGY']['NBC1-Bz2-mono-unCP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-Me-mono-unCP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.3-dimer' ] = 276.14505886
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.4-dimer' ] = 274.65657480
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.5-dimer' ] = 273.23211647
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.6-dimer' ] = 271.86820659
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.7-dimer' ] = 270.56154682
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.8-dimer' ] = 269.30901798
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.9-dimer' ] = 268.10767718
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.0-dimer' ] = 266.95475267
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.1-dimer' ] = 265.84763738
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.2-dimer' ] = 264.78388141
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.4-dimer' ] = 262.77738579
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.6-dimer' ] = 260.91850385
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.8-dimer' ] = 259.19247204
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.0-dimer' ] = 257.58628148
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.2-dimer' ] = 256.08845607
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.4-dimer' ] = 254.68885527
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.6-dimer' ] = 253.37850109
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-6.0-dimer' ] = 250.99455064
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.2-dimer' ] = 42.94051671
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.3-dimer' ] = 42.46449704
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.4-dimer' ] = 42.01471911
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.5-dimer' ] = 41.58914043
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.6-dimer' ] = 41.18591734
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.7-dimer' ] = 40.80338247
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.8-dimer' ] = 40.44002498
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.9-dimer' ] = 40.09447330
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.0-dimer' ] = 39.76547998
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.1-dimer' ] = 39.45190844
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.2-dimer' ] = 39.15272123
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.3-dimer' ] = 38.86696980
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.4-dimer' ] = 38.59378540
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.6-dimer' ] = 38.08199453
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.8-dimer' ] = 37.61171219
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-5.0-dimer' ] = 37.17815187
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-5.4-dimer' ] = 36.40542136
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-5.8-dimer' ] = 35.73746090
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.1-dimer' ] = 664.74968142
DATA['NUCLEAR REPULSION ENERGY']['NBC1-Py-mono-unCP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.3-dimer' ] = 653.28897360
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.4-dimer' ] = 647.90584891
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.5-dimer' ] = 642.73711461
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.6-dimer' ] = 637.77107423
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.7-dimer' ] = 632.99683541
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.8-dimer' ] = 628.40424073
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.9-dimer' ] = 623.98380628
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.0-dimer' ] = 619.72666684
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.1-dimer' ] = 615.62452662
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.2-dimer' ] = 611.66961499
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.3-dimer' ] = 607.85464633
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.4-dimer' ] = 604.17278378
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.5-dimer' ] = 600.61760611
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.7-dimer' ] = 593.86352067
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-5.0-dimer' ] = 584.54275675
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-5.5-dimer' ] = 570.86466240
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-6.0-dimer' ] = 559.10620798
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-6.5-dimer' ] = 548.90465922
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-7.0-dimer' ] = 539.98032943
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.1-dimer' ] = 631.74018099
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.3-dimer' ] = 622.28221702
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.5-dimer' ] = 613.57422251
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.6-dimer' ] = 609.47520868
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.7-dimer' ] = 605.53368830
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.8-dimer' ] = 601.74111111
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.9-dimer' ] = 598.08951503
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.0-dimer' ] = 594.57147649
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.1-dimer' ] = 591.18006603
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.2-dimer' ] = 587.90880856
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.3-dimer' ] = 584.75164753
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.4-dimer' ] = 581.70291245
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.5-dimer' ] = 578.75728949
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.7-dimer' ] = 573.15574951
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-6.0-dimer' ] = 565.41165299
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-6.5-dimer' ] = 554.01089095
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-7.0-dimer' ] = 544.16644693
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-8.0-dimer' ] = 528.04095562
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-9.0-dimer' ] = 515.40150653
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.2-dimer' ] = 652.35026383
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.4-dimer' ] = 651.65685475
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.6-dimer' ] = 650.51106101
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.8-dimer' ] = 648.92723975
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.0-dimer' ] = 646.92462020
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.2-dimer' ] = 644.52659143
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.4-dimer' ] = 641.75995892
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.5-dimer' ] = 640.24755050
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.6-dimer' ] = 638.65423207
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.7-dimer' ] = 636.98400901
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.8-dimer' ] = 635.24097954
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.9-dimer' ] = 633.42931896
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.0-dimer' ] = 631.55326486
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.2-dimer' ] = 627.62515488
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.4-dimer' ] = 623.49127864
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.6-dimer' ] = 619.18640729
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.8-dimer' ] = 614.74502815
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-3.0-dimer' ] = 610.20089775
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.2-dimer' ] = 631.66053374
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.4-dimer' ] = 631.10536715
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.6-dimer' ] = 630.18691177
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.8-dimer' ] = 628.91516711
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.0-dimer' ] = 627.30369102
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.2-dimer' ] = 625.36921338
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.4-dimer' ] = 623.13120361
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.5-dimer' ] = 621.90509666
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.6-dimer' ] = 620.61142042
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.7-dimer' ] = 619.25317914
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.8-dimer' ] = 617.83346514
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.9-dimer' ] = 616.35544587
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.0-dimer' ] = 614.82235130
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.2-dimer' ] = 611.60409513
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.4-dimer' ] = 608.20532569
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.6-dimer' ] = 604.65291019
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.8-dimer' ] = 600.97358989
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-3.0-dimer' ] = 597.19362514
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.2-dimer' ] = 652.58240326
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.3-dimer' ] = 647.08083072
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.3-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.4-dimer' ] = 641.79881504
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.5-dimer' ] = 636.72435401
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.6-dimer' ] = 631.84627841
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.7-dimer' ] = 627.15417831
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.7-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.8-dimer' ] = 622.63833806
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.9-dimer' ] = 618.28967853
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.9-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.0-dimer' ] = 614.09970566
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.1-dimer' ] = 610.06046424
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.1-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.2-dimer' ] = 606.16449631
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.5-dimer' ] = 595.26834684
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-5.0-dimer' ] = 579.39688238
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-5.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-5.5-dimer' ] = 565.87021271
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-5.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-6.0-dimer' ] = 554.22625379
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-6.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-6.5-dimer' ] = 544.11253672
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-6.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-10.0-dimer' ] = 499.16037479
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-10.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.4-dimer' ] = 613.04854518
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.4-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.5-dimer' ] = 608.81636557
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.5-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.6-dimer' ] = 604.74550671
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.6-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.7-dimer' ] = 600.82787505
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.7-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.7-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.8-dimer' ] = 597.05577907
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.8-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.9-dimer' ] = 593.42192782
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.9-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.9-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.0-dimer' ] = 589.91942332
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.0-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.1-dimer' ] = 586.54174882
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.1-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.1-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.2-dimer' ] = 583.28275414
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.2-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.3-dimer' ] = 580.13663931
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.3-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.3-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.4-dimer' ] = 577.09793714
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.4-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.5-dimer' ] = 574.16149552
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.5-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.6-dimer' ] = 571.32245963
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.6-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-6.0-dimer' ] = 560.85272572
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-6.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-6.0-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-6.5-dimer' ] = 549.47925556
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-6.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-6.5-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-7.0-dimer' ] = 539.65622514
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-7.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-7.0-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-7.5-dimer' ] = 531.09189940
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-7.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-7.5-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-8.0-dimer' ] = 523.56205991
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-8.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-8.0-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.2-dimer' ] = 641.59153721
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.4-dimer' ] = 640.97218086
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.6-dimer' ] = 639.94808010
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.8-dimer' ] = 638.53114770
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.0-dimer' ] = 636.73745247
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.2-dimer' ] = 634.58670201
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.4-dimer' ] = 632.10168144
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.5-dimer' ] = 630.74164257
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.6-dimer' ] = 629.30768985
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.7-dimer' ] = 627.80329032
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.7-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.8-dimer' ] = 626.23200316
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.9-dimer' ] = 624.59746513
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.9-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.0-dimer' ] = 622.90337667
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.2-dimer' ] = 619.35158842
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.4-dimer' ] = 615.60701452
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.6-dimer' ] = 611.70022314
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.8-dimer' ] = 607.66157487
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-3.0-dimer' ] = 603.52082284
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-3.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.2-dimer' ] = 332.50866690
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.2-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.4-dimer' ] = 326.76493049
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.4-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.5-dimer' ] = 324.08312886
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.5-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.6-dimer' ] = 321.51823084
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.6-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.7-dimer' ] = 319.06348175
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.7-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.7-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.8-dimer' ] = 316.71257239
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.8-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.9-dimer' ] = 314.45961051
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.9-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.9-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.0-dimer' ] = 312.29909326
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.0-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.1-dimer' ] = 310.22588084
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.1-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.1-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.2-dimer' ] = 308.23517159
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.2-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.5-dimer' ] = 302.71463310
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.5-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.75-dimer' ] = 298.57449040
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.75-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.75-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.0-dimer' ] = 294.79763877
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.0-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.25-dimer' ] = 291.34045574
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.25-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.25-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.5-dimer' ] = 288.16568982
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.5-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-6.0-dimer' ] = 282.54011405
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-6.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-6.0-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-6.5-dimer' ] = 277.71464354
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-6.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-6.5-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-7.0-dimer' ] = 273.53417452
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-7.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-7.0-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-7.5-dimer' ] = 269.88029141
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-7.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-7.5-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.2-dimer' ] = 277.70122037
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.2-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.2-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.3-dimer' ] = 276.14505886
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.3-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.3-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.4-dimer' ] = 274.65657480
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.4-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.4-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.5-dimer' ] = 273.23211647
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.5-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.5-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.6-dimer' ] = 271.86820659
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.6-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.6-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.7-dimer' ] = 270.56154682
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.7-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.7-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.8-dimer' ] = 269.30901798
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.8-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.8-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.9-dimer' ] = 268.10767718
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.9-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.9-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.0-dimer' ] = 266.95475267
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.0-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.0-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.1-dimer' ] = 265.84763738
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.1-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.1-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.2-dimer' ] = 264.78388141
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.2-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.2-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.4-dimer' ] = 262.77738579
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.4-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.4-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.6-dimer' ] = 260.91850385
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.6-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.6-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.8-dimer' ] = 259.19247204
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.8-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.8-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.0-dimer' ] = 257.58628148
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.0-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.0-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.2-dimer' ] = 256.08845607
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.2-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.2-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.4-dimer' ] = 254.68885527
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.4-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.4-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.6-dimer' ] = 253.37850109
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.6-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.6-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-6.0-dimer' ] = 250.99455064
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-6.0-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-6.0-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.2-dimer' ] = 42.94051671
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.2-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.3-dimer' ] = 42.46449704
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.3-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.4-dimer' ] = 42.01471911
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.4-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.5-dimer' ] = 41.58914043
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.5-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.6-dimer' ] = 41.18591734
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.6-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.7-dimer' ] = 40.80338247
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.7-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.8-dimer' ] = 40.44002498
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.8-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.9-dimer' ] = 40.09447330
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.9-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.0-dimer' ] = 39.76547998
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.0-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.1-dimer' ] = 39.45190844
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.1-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.2-dimer' ] = 39.15272123
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.2-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.3-dimer' ] = 38.86696980
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.3-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.4-dimer' ] = 38.59378540
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.4-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.6-dimer' ] = 38.08199453
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.6-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.8-dimer' ] = 37.61171219
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.8-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-5.0-dimer' ] = 37.17815187
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-5.0-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-5.4-dimer' ] = 36.40542136
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-5.4-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-5.8-dimer' ] = 35.73746090
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-5.8-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.1-dimer' ] = 664.74968142
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.1-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.3-dimer' ] = 653.28897360
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.3-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.4-dimer' ] = 647.90584891
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.4-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.5-dimer' ] = 642.73711461
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.5-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.6-dimer' ] = 637.77107423
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.6-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.7-dimer' ] = 632.99683541
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.7-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.8-dimer' ] = 628.40424073
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.8-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.9-dimer' ] = 623.98380628
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.9-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.0-dimer' ] = 619.72666684
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.0-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.1-dimer' ] = 615.62452662
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.1-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.2-dimer' ] = 611.66961499
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.2-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.3-dimer' ] = 607.85464633
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.3-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.4-dimer' ] = 604.17278378
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.4-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.5-dimer' ] = 600.61760611
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.5-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.7-dimer' ] = 593.86352067
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.7-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-5.0-dimer' ] = 584.54275675
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-5.0-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-5.5-dimer' ] = 570.86466240
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-5.5-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-6.0-dimer' ] = 559.10620798
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-6.0-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-6.5-dimer' ] = 548.90465922
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-6.5-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-7.0-dimer' ] = 539.98032943
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-7.0-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.1-dimer' ] = 631.74018099
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.1-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.1-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.3-dimer' ] = 622.28221702
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.3-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.3-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.5-dimer' ] = 613.57422251
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.5-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.5-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.6-dimer' ] = 609.47520868
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.6-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.6-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.7-dimer' ] = 605.53368830
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.7-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.7-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.8-dimer' ] = 601.74111111
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.8-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.8-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.9-dimer' ] = 598.08951503
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.9-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.9-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.0-dimer' ] = 594.57147649
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.0-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.0-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.1-dimer' ] = 591.18006603
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.1-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.1-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.2-dimer' ] = 587.90880856
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.2-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.2-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.3-dimer' ] = 584.75164753
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.3-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.3-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.4-dimer' ] = 581.70291245
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.4-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.4-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.5-dimer' ] = 578.75728949
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.5-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.5-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.7-dimer' ] = 573.15574951
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.7-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.7-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-6.0-dimer' ] = 565.41165299
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-6.0-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-6.0-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-6.5-dimer' ] = 554.01089095
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-6.5-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-6.5-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-7.0-dimer' ] = 544.16644693
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-7.0-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-7.0-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-8.0-dimer' ] = 528.04095562
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-8.0-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-8.0-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-9.0-dimer' ] = 515.40150653
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-9.0-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-9.0-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.2-dimer' ] = 652.35026383
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.4-dimer' ] = 651.65685475
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.6-dimer' ] = 650.51106101
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.8-dimer' ] = 648.92723975
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.0-dimer' ] = 646.92462020
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.2-dimer' ] = 644.52659143
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.4-dimer' ] = 641.75995892
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.5-dimer' ] = 640.24755050
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.6-dimer' ] = 638.65423207
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.7-dimer' ] = 636.98400901
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.7-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.8-dimer' ] = 635.24097954
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.9-dimer' ] = 633.42931896
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.9-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.0-dimer' ] = 631.55326486
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.2-dimer' ] = 627.62515488
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.4-dimer' ] = 623.49127864
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.6-dimer' ] = 619.18640729
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.8-dimer' ] = 614.74502815
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-3.0-dimer' ] = 610.20089775
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-3.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.2-dimer' ] = 631.66053374
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.4-dimer' ] = 631.10536715
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.6-dimer' ] = 630.18691177
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.8-dimer' ] = 628.91516711
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.0-dimer' ] = 627.30369102
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.2-dimer' ] = 625.36921338
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.4-dimer' ] = 623.13120361
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.5-dimer' ] = 621.90509666
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.6-dimer' ] = 620.61142042
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.7-dimer' ] = 619.25317914
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.7-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.8-dimer' ] = 617.83346514
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.9-dimer' ] = 616.35544587
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.9-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.0-dimer' ] = 614.82235130
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.2-dimer' ] = 611.60409513
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.4-dimer' ] = 608.20532569
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.6-dimer' ] = 604.65291019
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.8-dimer' ] = 600.97358989
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-3.0-dimer' ] = 597.19362514
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-3.0-monoA-CP' ] = 204.01997321
|
lothian/psi4
|
psi4/share/psi4/databases/NBC10.py
|
Python
|
lgpl-3.0
| 98,454
|
[
"Psi4"
] |
e7d468491ad20d5cc61358389835abd419423141c6bf4301cbef1450bb4a9751
|
import logging
import glob
from opendrift.readers.reader_netCDF_CF_generic import Reader
def reader_from_url(url, timeout=10):
'''Make readers from URLs or paths to datasets'''
if isinstance(url, list):
return [reader_from_url(u) for u in url]
files = glob.glob(url)
for f in files: # Regular file
try:
r = Reader(f)
return r
except:
logging.warning('%s is not a netCDF CF file recognised by '
'OpenDrift' % f)
try:
from opendrift.readers.reader_ROMS_native import Reader as Reader_ROMS_native
r = Reader_ROMS_native(f)
return r
except:
logging.warning('%s is also not a ROMS netCDF file recognised by '
'OpenDrift' % f)
try:
from opendrift.readers.reader_grib import Reader as Reader_grib
r = Reader_grib(f)
return r
except:
logging.warning('%s is also not a GRIB file recognised by '
'OpenDrift' % f)
if files == []: # Try with OPeNDAP URL
try: # Check URL accessibility/timeout
try:
# for python 3
import urllib.request as urllib_request
except ImportError:
# for python 2
import urllib2 as urllib_request
request = urllib_request.Request(url)
try: # netrc
import netrc
import base64
parts = urllib_request.urlparse.urlparse(url)
login, account, password = netrc.netrc().authenticators(parts.netloc)
creds = base64.encodestring('%s:%s' % (login, password)).strip()
request.add_header("Authorization", "Basic %s" % creds)
logging.debug('Applied NETRC credentials')
except:
logging.debug('Could not apply NETRC credentials')
urllib_request.urlopen(request, timeout=timeout)
except Exception as e:
# Error code 400 is expected!
if not isinstance(e, urllib_request.HTTPError) or e.code != 400:
logging.warning('ULR %s not accessible: ' % url + str(e))
return None
try:
r = Reader(url)
return r
except Exception as e:
logging.warning('%s is not a netCDF file recognised '
'by OpenDrift: %s' % (url, str(e)))
try:
from opendrift.readers.reader_ROMS_native import Reader as Reader_ROMS_native
r = Reader_ROMS_native(url)
return r
except Exception as e:
logging.warning('%s is also not a ROMS netCDF file recognised by '
'OpenDrift: %s' % (url, str(e)))
return None
|
knutfrode/opendrift
|
opendrift/readers/__init__.py
|
Python
|
gpl-2.0
| 3,051
|
[
"NetCDF"
] |
9a753edcc58178ef10b38fcfac740f3643702413f3c87abfa64938dd187509e1
|
#!/usr/bin/env python
import theano
import theano.tensor as T
import numpy
import numpy as np
import pickle as pkl
from collections import OrderedDict
import cPickle as pickle
from theano import config
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
floatX = config.floatX
def shared_to_cpu(shared_params, params):
for k, v in shared_params.iteritems():
params[k] = v.get_value()
def cpu_to_shared(params, shared_params):
for k, v in params.iteritems():
shared_params[k].set_value(v)
def save_model(filename, options, params, shared_params=None):
if not shared_params == None:
shared_to_cpu(shared_params, params);
model = OrderedDict()
model['options'] = options
model['params'] = params
pickle.dump(model, open(filename, 'w'))
def load_model(filename):
model = pickle.load(open(filename, 'rb'))
options = model['options']
params = model['params']
shared_params = init_shared_params(params)
return options, params, shared_params
# return options, params, shared_params
def ortho_weight(ndim):
"""
Random orthogonal weights, we take
the right matrix in the SVD.
Remember in SVD, u has the same # rows as W
and v has the same # of cols as W. So we
are ensuring that the rows are
orthogonal.
"""
W = numpy.random.randn(ndim, ndim)
u, _, _ = numpy.linalg.svd(W)
return u.astype('float32')
def init_weight(n, d, options, activation='tanh'):
''' initialize weight matrix
options['init_type'] determines
gaussian or uniform initlizaiton
'''
if options['init_type'] == 'gaussian' or activation == 'relu':
return (numpy.random.randn(n, d).astype(floatX)) * options['std']
elif options['init_type'] == 'uniform':
# [-range, range]
return ((numpy.random.rand(n, d) * 2 - 1) * \
options['range']).astype(floatX)
elif options['init_type'] == 'glorot uniform':
low = -1.0 * np.sqrt(6.0/(n + d))
high = 1.0 * np.sqrt(6.0/(n + d))
if activation == 'sigmoid':
low = low * 4.0
high = high * 4.0
return numpy.random.uniform(low,high,(n,d)).astype(floatX)
layers = {'ff': ('init_fflayer', 'fflayer'),
'lstm': ('init_lstm_layer', 'lstm_layer'),
'lstm_append': (None, 'lstm_append_layer')}
def get_layer(name):
fns = layers[name]
return (eval(fns[0]), eval(fns[1]))
# initialize the parmaters
def init_params(options):
''' Initialize all the parameters
'''
params = OrderedDict()
n_words = options['n_words']
n_emb = options['n_emb']
n_dim = options['n_dim']
n_image_feat = options['n_image_feat']
n_common_feat = options['n_common_feat']
n_output = options['n_output']
n_attention = options['n_attention']
# embedding weights
# params['w_emb'] = init_weight(n_words, n_emb, options)
## use the same initialization as BOW
# params['w_emb'] = ((numpy.random.rand(n_words, n_emb) * 2 - 1) * 0.5).astype(floatX)
embedding_matrix = pkl.load(open(options['embedding_file'], 'r'))[1:].astype(floatX)
params['w_emb'] = embedding_matrix
params = init_fflayer(params, n_image_feat, n_dim, options,
prefix='image_mlp')
params = init_fflayer(params, n_dim, n_dim, options,
prefix='F_func', activation='relu')
params = init_fflayer(params, 2*n_dim, 2*n_dim, options,
prefix='G_func', activation='relu')
params = init_fflayer(params, 4*n_dim, n_output, options,
prefix='scale_to_softmax')
return params
def init_shared_params(params):
''' return a shared version of all parameters
'''
shared_params = OrderedDict()
for k, p in params.iteritems():
shared_params[k] = theano.shared(params[k], name = k)
return shared_params
# activation function for ff layer
def tanh(x):
return T.tanh(x)
def relu(x):
return T.maximum(x, np.float32(0.))
def linear(x):
return x
def init_fflayer(params, nin, nout, options, prefix='ff', activation='tanh'):
''' initialize ff layer
'''
params[prefix + '_w'] = init_weight(nin, nout, options,activation)
params[prefix + '_b'] = np.zeros(nout, dtype='float32')
if activation == 'relu':
params[prefix + '_b'] = np.ones(nout, dtype='float32')
return params
def fflayer(shared_params, x, options, prefix='ff', act_func='tanh'):
''' fflayer: multiply weight then add bias
'''
return eval(act_func)(T.dot(x, shared_params[prefix + '_w']) +
shared_params[prefix + '_b'])
def dropout_layer(x, dropout, trng, drop_ratio=0.5):
''' dropout layer
'''
x_drop = T.switch(dropout,
(x * trng.binomial(x.shape,
p = 1 - drop_ratio,
n = 1,
dtype = x.dtype) \
/ (numpy.float32(1.0) - drop_ratio)),
x)
return x_drop
def batchedSoftmax(x, axis=1):
if axis == 1:
x = x.dimshuffle((0,2,1))
init_shape = x.shape
x = x.reshape((init_shape[0]*init_shape[1], init_shape[2]))
x = T.nnet.softmax(x)
x = x.reshape(init_shape)
if axis == 1:
x = x.dimshuffle((0,2,1))
return x
def build_model(shared_params, options):
trng = RandomStreams(1234)
drop_ratio = options['drop_ratio']
batch_size = options['batch_size']
n_dim = options['n_dim']
w_emb = shared_params['w_emb']
dropout = theano.shared(numpy.float32(0.))
image_feat = T.ftensor3('image_feat')
# T x batch_size
input_idx = T.imatrix('input_idx')
input_mask = T.matrix('input_mask')
# label is the TRUE label
label = T.ivector('label')
empty_word = theano.shared(value=np.zeros((1, options['n_emb']),
dtype='float32'),
name='empty_word')
w_emb_extend = T.concatenate([empty_word, shared_params['w_emb']],
axis=0)
a = w_emb_extend[input_idx] # T x bt_sz x n_dim
b = fflayer(shared_params, image_feat, options,
prefix='image_mlp',
act_func=options.get('image_mlp_act',
'tanh')) # bt_sz x num_regions x n_dim
Fb = fflayer(shared_params, b, options,
prefix='F_func',
act_func='relu') # bt x num_region x n_dim
Fa = fflayer(shared_params, a, options,
prefix='F_func',
act_func='relu') # T x bt_sz x n_dim
e = T.batched_dot(Fa.dimshuffle((1,0,2)) , Fb.dimshuffle((0,2,1) ) ) # bt x T x num_regions
alpha = batchedSoftmax(e,1) # bt x T x num_regions
beta = batchedSoftmax(e,2) # bt x T x num_regions
alpha = T.batched_dot(alpha.dimshuffle((0,2,1)), a.dimshuffle((1,0,2))) # bt x num_regions x ndim
beta = T.batched_dot(beta, b) # bt x T x ndim
a_beta = T.concatenate([a.dimshuffle((1,0,2)), beta], axis=2) # bt x T x 2*ndim
b_alpha = T.concatenate([b, alpha], axis=2) # bt x num_regions x 2*ndim
G_a_beta = fflayer(shared_params, a_beta, options,
prefix='G_func',
act_func='tanh') # bt x T x 2*ndim
G_b_alpha = fflayer(shared_params, b_alpha, options,
prefix='G_func',
act_func='tanh') # bt x num_regions x 2*ndim
V1 = T.sum(G_a_beta, axis=1) # bt x 2*ndim
V2 = T.sum(G_b_alpha, axis=1) # bt x 2*ndim
h_star = T.concatenate([V1, V2], axis=1) # bt x 4*dim
## Final Dense
combined_hidden = fflayer(shared_params, h_star, options,
prefix='scale_to_softmax',
act_func='tanh')
# drop the image output
prob = T.nnet.softmax(combined_hidden)
prob_y = prob[T.arange(prob.shape[0]), label]
pred_label = T.argmax(prob, axis=1)
confidence = T.max(prob, axis=1)
# sum or mean?
cost = -T.mean(T.log(prob_y))
accu = T.mean(T.eq(pred_label, label))
return image_feat, input_idx, input_mask, \
label, dropout, cost, accu, alpha, pred_label, confidence
# return image_feat, input_idx, input_mask, \
# label, dropout, cost, accu, pred_label
# h_encode, c_encode, h_decode, c_decode
|
codedecde/ImageQA
|
Src/TheanoModel/Code/simple_af_theano.py
|
Python
|
mit
| 8,603
|
[
"Gaussian"
] |
28ad9bb1d33debbd6134c27613525115635f9bfeb1a8df6c2aa484f95cca4c01
|
# WCS response decoder.
# Decodes response from a WCS (either a Coverages XML document or a Multipart MIME)
# and extracts the urls of the coverage data.
# Copyright (c) 2007 STFC <http://www.stfc.ac.uk>
# Author: Dominic Lowe, STFC
# contact email: d.lowe@rl.ac.uk
#
# Multipart MIME decoding based on http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/86676
# example: used in conjunction with ows lib wcs:
# from owslib import wcsdecoder
# u=wcs.getcoverage(identifier=['TuMYrRQ4'], timeSequence=['2792-06-01T00:00:00.0'], bbox=(-112,36,-106,41),
# format='application/netcdf', store='true')
# decoder=wcsdecoder.WCSDecoder(u)
# decoder.getCoverages()
import os
from owslib.etree import etree
import email
import errno
import mimetypes
class WCSDecoder(object):
def __init__(self, u):
''' initiate with a urllib url object.'''
self.u = u
self._getType()
def _getType(self):
''' determine whether it is a Multipart Mime or a Coverages XML file'''
# what's the best way to test this?
# for now read start of file
tempu = self.u
if tempu.readline()[:14] == '<?xml version=':
self.urlType = 'XML'
else:
self.urlType = 'Multipart'
def getCoverages(self, unpackdir='./unpacked'):
if self.urlType == 'XML':
paths = []
u_xml = self.u.read()
u_tree = etree.fromstring(u_xml)
for ref in u_tree.findall(
'{http://www.opengis.net/wcs/1.1}Coverage/{http://www.opengis.net/wcs/1.1}Reference'):
path = ref.attrib['{http://www.w3.org/1999/xlink}href']
paths.append(path)
for ref in u_tree.findall(
'{http://www.opengis.net/wcs/1.1.0/owcs}Coverage/{{http://www.opengis.net/wcs/1.1.0/owcs}Reference'): # noqa
path = ref.attrib['{http://www.w3.org/1999/xlink}href']
paths.append(path)
elif self.urlType == 'Multipart':
# Decode multipart mime and return fileobjects
u_mpart = self.u.read()
mpart = MpartMime(u_mpart)
paths = mpart.unpackToDir(unpackdir)
return paths
class MpartMime(object):
def __init__(self, mpartmime):
""" mpartmime is a multipart mime file that has already been read in."""
self.mpartmime = mpartmime
def unpackToDir(self, unpackdir):
""" unpacks contents of Multipart mime to a given directory"""
names = []
# create the directory if it doesn't exist:
try:
os.mkdir(unpackdir)
except OSError as e:
# Ignore directory exists error
if e.errno != errno.EEXIST:
raise
# now walk through the multipart mime and write out files
msg = email.message_from_string(self.mpartmime)
counter = 1
for part in msg.walk():
# multipart/* are just containers, ignore
if part.get_content_maintype() == 'multipart':
continue
# Applications should really check the given filename so that an
# email message can't be used to overwrite important files
filename = part.get_filename()
if not filename:
try:
ext = mimetypes.guess_extension(part.get_type())
except Exception:
ext = None
if not ext:
# Use a generic extension
ext = '.bin'
filename = 'part-%03d%s' % (counter, ext)
fullpath = os.path.join(unpackdir, filename)
names.append(fullpath)
fp = open(fullpath, 'wb')
fp.write(part.get_payload(decode=True))
fp.close()
return names
|
kalxas/OWSLib
|
owslib/coverage/wcsdecoder.py
|
Python
|
bsd-3-clause
| 3,848
|
[
"NetCDF"
] |
63adcaf487fa05bb0682c1c68d68edf0f47e1a4f1f3e83130be3edc6f9b69727
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import numpy as np
from fractions import Fraction
from math import gcd
from itertools import groupby, product
from string import ascii_lowercase
from warnings import warn
import logging
import math
import warnings
from monty.fractions import lcm
from monty.json import MSONable
from pymatgen.core.periodic_table import Element, Specie, get_el_sp, DummySpecie
from pymatgen.transformations.transformation_abc import AbstractTransformation
from pymatgen.transformations.standard_transformations import \
SubstitutionTransformation, OrderDisorderedStructureTransformation
from pymatgen.command_line.enumlib_caller import EnumlibAdaptor, EnumError
from pymatgen.analysis.ewald import EwaldSummation
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.structure_prediction.substitution_probability import \
SubstitutionPredictor
from pymatgen.analysis.structure_matcher import StructureMatcher, \
SpinComparator
from pymatgen.analysis.energy_models import SymmetryModel
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.core.surface import SlabGenerator
from pymatgen.electronic_structure.core import Spin
from pymatgen.analysis.gb.grain import GrainBoundaryGenerator
"""
This module implements more advanced transformations.
"""
__author__ = "Shyue Ping Ong, Stephen Dacek, Anubhav Jain, Matthew Horton"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jul 24, 2012"
logger = logging.getLogger(__name__)
class ChargeBalanceTransformation(AbstractTransformation):
"""
This is a transformation that disorders a structure to make it charge
balanced, given an oxidation state-decorated structure.
Args:
charge_balance_sp: specie to add or remove. Currently only removal
is supported
"""
def __init__(self, charge_balance_sp):
self.charge_balance_sp = str(charge_balance_sp)
def apply_transformation(self, structure):
charge = structure.charge
specie = get_el_sp(self.charge_balance_sp)
num_to_remove = charge / specie.oxi_state
num_in_structure = structure.composition[specie]
removal_fraction = num_to_remove / num_in_structure
if removal_fraction < 0:
raise ValueError("addition of specie not yet supported by "
"ChargeBalanceTransformation")
trans = SubstitutionTransformation(
{self.charge_balance_sp: {
self.charge_balance_sp: 1 - removal_fraction}})
return trans.apply_transformation(structure)
def __str__(self):
return "Charge Balance Transformation : " + \
"Species to remove = {}".format(str(self.charge_balance_sp))
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return False
class SuperTransformation(AbstractTransformation):
"""
This is a transformation that is inherently one-to-many. It is constructed
from a list of transformations and returns one structure for each
transformation. The primary use for this class is extending a transmuter
object.
Args:
transformations ([transformations]): List of transformations to apply
to a structure. One transformation is applied to each output
structure.
nstructures_per_trans (int): If the transformations are one-to-many and,
nstructures_per_trans structures from each transformation are
added to the full list. Defaults to 1, i.e., only best structure.
"""
def __init__(self, transformations, nstructures_per_trans=1):
self._transformations = transformations
self.nstructures_per_trans = nstructures_per_trans
def apply_transformation(self, structure, return_ranked_list=False):
if not return_ranked_list:
raise ValueError("SuperTransformation has no single best structure"
" output. Must use return_ranked_list")
structures = []
for t in self._transformations:
if t.is_one_to_many:
for d in t.apply_transformation(
structure,
return_ranked_list=self.nstructures_per_trans):
d["transformation"] = t
structures.append(d)
else:
structures.append(
{"transformation": t,
"structure": t.apply_transformation(structure)})
return structures
def __str__(self):
return "Super Transformation : Transformations = " + \
"{}".format(" ".join([str(t) for t in self._transformations]))
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
class MultipleSubstitutionTransformation:
"""
Performs multiple substitutions on a structure. For example, can do a
fractional replacement of Ge in LiGePS with a list of species, creating one
structure for each substitution. Ordering is done using a dummy element so
only one ordering must be done per substitution oxidation state. Charge
balancing of the structure is optionally performed.
.. note::
There are no checks to make sure that removal fractions are possible
and rounding may occur. Currently charge balancing only works for
removal of species.
"""
def __init__(self, sp_to_replace, r_fraction, substitution_dict,
charge_balance_species=None, order=True):
"""
Performs multiple fractional substitutions on a transmuter.
Args:
sp_to_replace: species to be replaced
r_fraction: fraction of that specie to replace
substitution_dict: dictionary of the format
{2: ["Mg", "Ti", "V", "As", "Cr", "Ta", "N", "Nb"],
3: ["Ru", "Fe", "Co", "Ce", "As", "Cr", "Ta", "N", "Nb"],
4: ["Ru", "V", "Cr", "Ta", "N", "Nb"],
5: ["Ru", "W", "Mn"]
}
The number is the charge used for each of the list of elements
(an element can be present in multiple lists)
charge_balance_species: If specified, will balance the charge on
the structure using that specie.
"""
self.sp_to_replace = sp_to_replace
self.r_fraction = r_fraction
self.substitution_dict = substitution_dict
self.charge_balance_species = charge_balance_species
self.order = order
def apply_transformation(self, structure, return_ranked_list=False):
if not return_ranked_list:
raise ValueError("MultipleSubstitutionTransformation has no single"
" best structure output. Must use"
" return_ranked_list.")
outputs = []
for charge, el_list in self.substitution_dict.items():
mapping = {}
if charge > 0:
sign = "+"
else:
sign = "-"
dummy_sp = "X{}{}".format(str(charge), sign)
mapping[self.sp_to_replace] = {
self.sp_to_replace: 1 - self.r_fraction,
dummy_sp: self.r_fraction}
trans = SubstitutionTransformation(mapping)
dummy_structure = trans.apply_transformation(structure)
if self.charge_balance_species is not None:
cbt = ChargeBalanceTransformation(self.charge_balance_species)
dummy_structure = cbt.apply_transformation(dummy_structure)
if self.order:
trans = OrderDisorderedStructureTransformation()
dummy_structure = trans.apply_transformation(dummy_structure)
for el in el_list:
if charge > 0:
sign = "+"
else:
sign = "-"
st = SubstitutionTransformation(
{"X{}+".format(str(charge)): "{}{}{}".format(el, charge,
sign)})
new_structure = st.apply_transformation(dummy_structure)
outputs.append({"structure": new_structure})
return outputs
def __str__(self):
return "Multiple Substitution Transformation : Substitution on " + \
"{}".format(self.sp_to_replace)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
class EnumerateStructureTransformation(AbstractTransformation):
"""
Order a disordered structure using enumlib. For complete orderings, this
generally produces fewer structures that the OrderDisorderedStructure
transformation, and at a much faster speed.
Args:
min_cell_size:
The minimum cell size wanted. Must be an int. Defaults to 1.
max_cell_size:
The maximum cell size wanted. Must be an int. Defaults to 1.
symm_prec:
Tolerance to use for symmetry.
refine_structure:
This parameter has the same meaning as in enumlib_caller.
If you are starting from a structure that has been relaxed via
some electronic structure code, it is usually much better to
start with symmetry determination and then obtain a refined
structure. The refined structure have cell parameters and
atomic positions shifted to the expected symmetry positions,
which makes it much less sensitive precision issues in enumlib.
If you are already starting from an experimental cif, refinment
should have already been done and it is not necessary. Defaults
to False.
enum_precision_parameter (float): Finite precision parameter for
enumlib. Default of 0.001 is usually ok, but you might need to
tweak it for certain cells.
check_ordered_symmetry (bool): Whether to check the symmetry of
the ordered sites. If the symmetry of the ordered sites is
lower, the lowest symmetry ordered sites is included in the
enumeration. This is important if the ordered sites break
symmetry in a way that is important getting possible
structures. But sometimes including ordered sites
slows down enumeration to the point that it cannot be
completed. Switch to False in those cases. Defaults to True.
max_disordered_sites (int):
An alternate parameter to max_cell size. Will sequentially try
larger and larger cell sizes until (i) getting a result or (ii)
the number of disordered sites in the cell exceeds
max_disordered_sites. Must set max_cell_size to None when using
this parameter.
sort_criteria (str): Sort by Ewald energy ("ewald", must have oxidation
states and slow) or by number of sites ("nsites", much faster).
timeout (float): timeout in minutes to pass to EnumlibAdaptor
"""
def __init__(self, min_cell_size=1, max_cell_size=1, symm_prec=0.1,
refine_structure=False, enum_precision_parameter=0.001,
check_ordered_symmetry=True, max_disordered_sites=None,
sort_criteria="ewald", timeout=None):
self.symm_prec = symm_prec
self.min_cell_size = min_cell_size
self.max_cell_size = max_cell_size
self.refine_structure = refine_structure
self.enum_precision_parameter = enum_precision_parameter
self.check_ordered_symmetry = check_ordered_symmetry
self.max_disordered_sites = max_disordered_sites
self.sort_criteria = sort_criteria
self.timeout = timeout
if max_cell_size and max_disordered_sites:
raise ValueError("Cannot set both max_cell_size and "
"max_disordered_sites!")
def apply_transformation(self, structure, return_ranked_list=False):
"""
Return either a single ordered structure or a sequence of all ordered
structures.
Args:
structure: Structure to order.
return_ranked_list (bool): Whether or not multiple structures are
returned. If return_ranked_list is a number, that number of
structures is returned.
Returns:
Depending on returned_ranked list, either a transformed structure
or a list of dictionaries, where each dictionary is of the form
{"structure" = .... , "other_arguments"}
The list of ordered structures is ranked by ewald energy / atom, if
the input structure is an oxidation state decorated structure.
Otherwise, it is ranked by number of sites, with smallest number of
sites first.
"""
try:
num_to_return = int(return_ranked_list)
except ValueError:
num_to_return = 1
if self.refine_structure:
finder = SpacegroupAnalyzer(structure, self.symm_prec)
structure = finder.get_refined_structure()
contains_oxidation_state = all(
[hasattr(sp, "oxi_state") and sp.oxi_state != 0 for sp in
structure.composition.elements]
)
structures = None
if structure.is_ordered:
warn("Enumeration skipped for structure with composition {} "
"because it is ordered".format(structure.composition))
structures = [structure.copy()]
if self.max_disordered_sites:
ndisordered = sum([1 for site in structure if not site.is_ordered])
if ndisordered > self.max_disordered_sites:
raise ValueError(
"Too many disordered sites! ({} > {})".format(
ndisordered, self.max_disordered_sites))
max_cell_sizes = range(self.min_cell_size, int(
math.floor(self.max_disordered_sites / ndisordered)) + 1)
else:
max_cell_sizes = [self.max_cell_size]
for max_cell_size in max_cell_sizes:
adaptor = EnumlibAdaptor(
structure, min_cell_size=self.min_cell_size,
max_cell_size=max_cell_size,
symm_prec=self.symm_prec, refine_structure=False,
enum_precision_parameter=self.enum_precision_parameter,
check_ordered_symmetry=self.check_ordered_symmetry,
timeout=self.timeout)
try:
adaptor.run()
except EnumError:
warn("Unable to enumerate for max_cell_size = %d".format(
max_cell_size))
structures = adaptor.structures
if structures:
break
if structures is None:
raise ValueError("Unable to enumerate")
original_latt = structure.lattice
inv_latt = np.linalg.inv(original_latt.matrix)
ewald_matrices = {}
all_structures = []
for s in structures:
new_latt = s.lattice
transformation = np.dot(new_latt.matrix, inv_latt)
transformation = tuple([tuple([int(round(cell)) for cell in row])
for row in transformation])
if contains_oxidation_state and self.sort_criteria == "ewald":
if transformation not in ewald_matrices:
s_supercell = structure * transformation
ewald = EwaldSummation(s_supercell)
ewald_matrices[transformation] = ewald
else:
ewald = ewald_matrices[transformation]
energy = ewald.compute_sub_structure(s)
all_structures.append({"num_sites": len(s), "energy": energy,
"structure": s})
else:
all_structures.append({"num_sites": len(s), "structure": s})
def sort_func(s):
return s["energy"] / s["num_sites"] \
if contains_oxidation_state and self.sort_criteria == "ewald" \
else s["num_sites"]
self._all_structures = sorted(all_structures, key=sort_func)
if return_ranked_list:
return self._all_structures[0:num_to_return]
else:
return self._all_structures[0]["structure"]
def __str__(self):
return "EnumerateStructureTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
class SubstitutionPredictorTransformation(AbstractTransformation):
"""
This transformation takes a structure and uses the structure
prediction module to find likely site substitutions.
Args:
threshold: Threshold for substitution.
**kwargs: Args for SubstitutionProbability class lambda_table, alpha
"""
def __init__(self, threshold=1e-2, scale_volumes=True, **kwargs):
self.kwargs = kwargs
self.threshold = threshold
self.scale_volumes = scale_volumes
self._substitutor = SubstitutionPredictor(threshold=threshold,
**kwargs)
def apply_transformation(self, structure, return_ranked_list=False):
if not return_ranked_list:
raise ValueError("SubstitutionPredictorTransformation doesn't"
" support returning 1 structure")
preds = self._substitutor.composition_prediction(
structure.composition, to_this_composition=False)
preds.sort(key=lambda x: x['probability'], reverse=True)
outputs = []
for pred in preds:
st = SubstitutionTransformation(pred['substitutions'])
output = {'structure': st.apply_transformation(structure),
'probability': pred['probability'],
'threshold': self.threshold, 'substitutions': {}}
# dictionary keys have to be converted to strings for JSON
for key, value in pred['substitutions'].items():
output['substitutions'][str(key)] = str(value)
outputs.append(output)
return outputs
def __str__(self):
return "SubstitutionPredictorTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
class MagOrderParameterConstraint(MSONable):
def __init__(self, order_parameter,
species_constraints=None,
site_constraint_name=None,
site_constraints=None):
"""
This class can be used to supply MagOrderingTransformation
to just a specific subset of species or sites that satisfy the
provided constraints. This can be useful for setting an order
parameters for, for example, ferrimagnetic structures which
might order on certain motifs, with the global order parameter
dependent on how many sites satisfy that motif.
:param order_parameter (float): any number from 0.0 to 1.0,
typically 0.5 (antiferromagnetic) or 1.0 (ferromagnetic)
:param species_constraint (list): str or list of strings
of Specie symbols that the constraint should apply to
:param site_constraint_name (str): name of the site property
that the constraint should apply to, e.g. "coordination_no"
:param site_constraints (list): list of values of the site
property that the constraints should apply to
"""
# validation
if site_constraints and site_constraints != [None] \
and not site_constraint_name:
raise ValueError("Specify the name of the site constraint.")
elif not site_constraints and site_constraint_name:
raise ValueError("Please specify some site constraints.")
if not isinstance(species_constraints, list):
species_constraints = [species_constraints]
if not isinstance(site_constraints, list):
site_constraints = [site_constraints]
if order_parameter > 1 or order_parameter < 0:
raise ValueError('Order parameter must lie between 0 and 1')
elif order_parameter != 0.5:
warnings.warn("Use care when using a non-standard order parameter, "
"though it can be useful in some cases it can also "
"lead to unintended behavior. Consult documentation.")
self.order_parameter = order_parameter
self.species_constraints = species_constraints
self.site_constraint_name = site_constraint_name
self.site_constraints = site_constraints
def satisfies_constraint(self, site):
"""
Checks if a periodic site satisfies the constraint.
"""
if not site.is_ordered:
return False
if self.species_constraints \
and str(site.specie) in self.species_constraints:
satisfies_constraints = True
else:
satisfies_constraints = False
if self.site_constraint_name \
and self.site_constraint_name in site.properties:
prop = site.properties[self.site_constraint_name]
if prop in self.site_constraints:
satisfies_constraints = True
else:
satisfies_constraints = False
return satisfies_constraints
class MagOrderingTransformation(AbstractTransformation):
def __init__(self, mag_species_spin, order_parameter=0.5,
energy_model=SymmetryModel(), **kwargs):
"""
This transformation takes a structure and returns a list of collinear
magnetic orderings. For disordered structures, make an ordered
approximation first.
:param mag_species_spin: A mapping of elements/species to their
spin magnitudes, e.g. {"Fe3+": 5, "Mn3+": 4}
:param order_parameter (float or list): if float, a specifies a
global order parameter and can take values from 0.0 to 1.0
(e.g. 0.5 for antiferromagnetic or 1.0 for ferromagnetic), if
list has to be a list of
:class: `pymatgen.transformations.advanced_transformations.MagOrderParameterConstraint`
to specify more complicated orderings, see documentation for
MagOrderParameterConstraint more details on usage
:param energy_model: Energy model to rank the returned structures,
see :mod: `pymatgen.analysis.energy_models` for more information (note
that this is not necessarily a physical energy). By default, returned
structures use SymmetryModel() which ranks structures from most
symmetric to least.
:param kwargs: Additional kwargs that are passed to
:class:`EnumerateStructureTransformation` such as min_cell_size etc.
"""
# checking for sensible order_parameter values
if isinstance(order_parameter, float):
# convert to constraint format
order_parameter = [MagOrderParameterConstraint(order_parameter=order_parameter,
species_constraints=
list(mag_species_spin.keys()))]
elif isinstance(order_parameter, list):
ops = [isinstance(item, MagOrderParameterConstraint) for item in order_parameter]
if not any(ops):
raise ValueError("Order parameter not correctly defined.")
else:
raise ValueError("Order parameter not correctly defined.")
self.mag_species_spin = mag_species_spin
# store order parameter constraints as dicts to save implementing
# to/from dict methods for MSONable compatibility
self.order_parameter = [op.as_dict() for op in order_parameter]
self.energy_model = energy_model
self.enum_kwargs = kwargs
@staticmethod
def determine_min_cell(disordered_structure):
"""
Determine the smallest supercell that is able to enumerate
the provided structure with the given order parameter
"""
def lcm(n1, n2):
"""
Find least common multiple of two numbers
"""
return n1 * n2 / gcd(n1, n2)
# assumes all order parameters for a given species are the same
mag_species_order_parameter = {}
mag_species_occurrences = {}
for idx, site in enumerate(disordered_structure):
if not site.is_ordered:
op = max(site.species.values())
# this very hacky bit of code only works because we know
# that on disordered sites in this class, all species are the same
# but have different spins, and this is comma-delimited
sp = str(list(site.species.keys())[0]).split(",")[0]
if sp in mag_species_order_parameter:
mag_species_occurrences[sp] += 1
else:
mag_species_order_parameter[sp] = op
mag_species_occurrences[sp] = 1
smallest_n = []
for sp, order_parameter in mag_species_order_parameter.items():
denom = Fraction(order_parameter).limit_denominator(100).denominator
num_atom_per_specie = mag_species_occurrences[sp]
n_gcd = gcd(denom, num_atom_per_specie)
smallest_n.append(lcm(int(n_gcd), denom) / n_gcd)
return max(smallest_n)
@staticmethod
def _add_dummy_species(structure, order_parameters):
"""
:param structure: ordered Structure
:param order_parameters: list of MagOrderParameterConstraints
:return: A structure decorated with disordered
DummySpecies on which to perform the enumeration.
Note that the DummySpecies are super-imposed on
to the original sites, to make it easier to
retrieve the original site after enumeration is
performed (this approach is preferred over a simple
mapping since multiple species may have the same
DummySpecie, depending on the constraints specified).
This approach can also preserve site properties even after
enumeration.
"""
dummy_struct = structure.copy()
def generate_dummy_specie():
"""
Generator which returns DummySpecie symbols Mma, Mmb, etc.
"""
subscript_length = 1
while True:
for subscript in product(ascii_lowercase, repeat=subscript_length):
yield "Mm" + "".join(subscript)
subscript_length += 1
dummy_species_gen = generate_dummy_specie()
# one dummy species for each order parameter constraint
dummy_species_symbols = [next(dummy_species_gen) for i in range(len(order_parameters))]
dummy_species = [{
DummySpecie(symbol, properties={'spin': Spin.up}): constraint.order_parameter,
DummySpecie(symbol, properties={'spin': Spin.down}): 1 - constraint.order_parameter
} for symbol, constraint in zip(dummy_species_symbols, order_parameters)]
sites_to_add = []
for idx, site in enumerate(dummy_struct):
satisfies_constraints = [c.satisfies_constraint(site) for c in order_parameters]
if satisfies_constraints.count(True) > 1:
# site should either not satisfy any constraints, or satisfy
# one constraint
raise ValueError("Order parameter constraints conflict for site: {}, {}"
.format(str(site.specie), site.properties))
elif any(satisfies_constraints):
dummy_specie_idx = satisfies_constraints.index(True)
dummy_struct.append(
dummy_species[dummy_specie_idx],
site.coords,
site.lattice
)
return dummy_struct
@staticmethod
def _remove_dummy_species(structure):
"""
:return: Structure with dummy species removed, but
their corresponding spin properties merged with the
original sites. Used after performing enumeration.
"""
if not structure.is_ordered:
raise Exception("Something went wrong with enumeration.")
sites_to_remove = []
logger.debug('Dummy species structure:\n{}'.format(str(structure)))
for idx, site in enumerate(structure):
if isinstance(site.specie, DummySpecie):
sites_to_remove.append(idx)
spin = site.specie._properties.get('spin', None)
neighbors = structure.get_neighbors(
site,
0.05, # arbitrary threshold, needs to be << any bond length
# but >> floating point precision issues
include_index=True
)
if len(neighbors) != 1:
raise Exception("This shouldn't happen, found neighbors: {}"
.format(neighbors))
orig_site_idx = neighbors[0][2]
orig_specie = structure[orig_site_idx].specie
new_specie = Specie(orig_specie.symbol,
getattr(orig_specie, 'oxi_state', None),
properties={'spin': spin})
structure.replace(orig_site_idx,
new_specie,
properties=structure[orig_site_idx].properties)
structure.remove_sites(sites_to_remove)
logger.debug('Structure with dummy species removed:\n{}'.format(str(structure)))
return structure
def _add_spin_magnitudes(self, structure):
"""
Replaces Spin.up/Spin.down with spin magnitudes specified
by mag_species_spin.
:param structure:
:return:
"""
for idx, site in enumerate(structure):
if getattr(site.specie, '_properties', None):
spin = site.specie._properties.get('spin', None)
sign = int(spin) if spin else 0
if spin:
new_properties = site.specie._properties.copy()
# this very hacky bit of code only works because we know
# that on disordered sites in this class, all species are the same
# but have different spins, and this is comma-delimited
sp = str(site.specie).split(",")[0]
new_properties.update({
'spin': sign * self.mag_species_spin.get(sp, 0)
})
new_specie = Specie(site.specie.symbol,
getattr(site.specie, 'oxi_state', None),
new_properties)
structure.replace(idx, new_specie,
properties=site.properties)
logger.debug('Structure with spin magnitudes:\n{}'.format(str(structure)))
return structure
def apply_transformation(self, structure, return_ranked_list=False):
"""
Apply MagOrderTransformation to an input structure.
:param structure: Any ordered structure.
:param return_ranked_list: As in other Transformations.
:return:
"""
if not structure.is_ordered:
raise ValueError("Create an ordered approximation of "
"your input structure first.")
# retrieve order parameters
order_parameters = [MagOrderParameterConstraint.from_dict(op_dict)
for op_dict in self.order_parameter]
# add dummy species on which to perform enumeration
structure = self._add_dummy_species(structure, order_parameters)
# trivial case
if structure.is_ordered:
structure = self._remove_dummy_species(structure)
return [structure] if return_ranked_list > 1 else structure
enum_kwargs = self.enum_kwargs.copy()
enum_kwargs["min_cell_size"] = max(
int(self.determine_min_cell(structure)),
enum_kwargs.get("min_cell_size", 1)
)
if enum_kwargs.get("max_cell_size", None):
if enum_kwargs["min_cell_size"] > enum_kwargs["max_cell_size"]:
warnings.warn("Specified max cell size ({}) is smaller "
"than the minimum enumerable cell size ({}), "
"changing max cell size to {}".format(enum_kwargs["max_cell_size"],
enum_kwargs["min_cell_size"],
enum_kwargs["min_cell_size"]))
enum_kwargs["max_cell_size"] = enum_kwargs["min_cell_size"]
else:
enum_kwargs["max_cell_size"] = enum_kwargs["min_cell_size"]
t = EnumerateStructureTransformation(**enum_kwargs)
alls = t.apply_transformation(structure,
return_ranked_list=return_ranked_list)
# handle the fact that EnumerateStructureTransformation can either
# return a single Structure or a list
if isinstance(alls, Structure):
# remove dummy species and replace Spin.up or Spin.down
# with spin magnitudes given in mag_species_spin arg
alls = self._remove_dummy_species(alls)
alls = self._add_spin_magnitudes(alls)
else:
for idx, _ in enumerate(alls):
alls[idx]["structure"] = self._remove_dummy_species(alls[idx]["structure"])
alls[idx]["structure"] = self._add_spin_magnitudes(alls[idx]["structure"])
try:
num_to_return = int(return_ranked_list)
except ValueError:
num_to_return = 1
if num_to_return == 1 or not return_ranked_list:
return alls[0]["structure"] if num_to_return else alls
# remove duplicate structures and group according to energy model
m = StructureMatcher(comparator=SpinComparator())
key = lambda x: SpacegroupAnalyzer(x, 0.1).get_space_group_number()
out = []
for _, g in groupby(sorted([d["structure"] for d in alls],
key=key), key):
g = list(g)
grouped = m.group_structures(g)
out.extend([{"structure": g[0],
"energy": self.energy_model.get_energy(g[0])}
for g in grouped])
self._all_structures = sorted(out, key=lambda d: d["energy"])
return self._all_structures[0:num_to_return]
def __str__(self):
return "MagOrderingTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
def _find_codopant(target, oxidation_state, allowed_elements=None):
"""
Finds the element from "allowed elements" that (i) possesses the desired
"oxidation state" and (ii) is closest in ionic radius to the target specie
Args:
target: (Specie) provides target ionic radius.
oxidation_state: (float) codopant oxidation state.
allowed_elements: ([str]) List of allowed elements. If None,
all elements are tried.
Returns:
(Specie) with oxidation_state that has ionic radius closest to
target.
"""
ref_radius = target.ionic_radius
candidates = []
symbols = allowed_elements or [el.symbol for el in Element]
for sym in symbols:
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sp = Specie(sym, oxidation_state)
r = sp.ionic_radius
if r is not None:
candidates.append((r, sp))
except:
pass
return min(candidates, key=lambda l: abs(l[0] / ref_radius - 1))[1]
class DopingTransformation(AbstractTransformation):
"""
A transformation that performs doping of a structure.
"""
def __init__(self, dopant, ionic_radius_tol=float("inf"), min_length=10,
alio_tol=0, codopant=False, max_structures_per_enum=100,
allowed_doping_species=None, **kwargs):
"""
Args:
dopant (Specie-like): E.g., Al3+. Must have oxidation state.
ionic_radius_tol (float): E.g., Fractional allowable ionic radii
mismatch for dopant to fit into a site. Default of inf means
that any dopant with the right oxidation state is allowed.
min_Length (float): Min. lattice parameter between periodic
images of dopant. Defaults to 10A for now.
alio_tol (int): If this is not 0, attempt will be made to dope
sites with oxidation_states +- alio_tol of the dopant. E.g.,
1 means that the ions like Ca2+ and Ti4+ are considered as
potential doping sites for Al3+.
codopant (bool): If True, doping will be carried out with a
codopant to maintain charge neutrality. Otherwise, vacancies
will be used.
max_structures_per_enum (float): Maximum number of structures to
return per enumeration. Note that there can be more than one
candidate doping site, and each site enumeration will return at
max max_structures_per_enum structures. Defaults to 100.
allowed_doping_species (list): Species that are allowed to be
doping sites. This is an inclusionary list. If specified,
any sites which are not
\\*\\*kwargs:
Same keyword args as :class:`EnumerateStructureTransformation`,
i.e., min_cell_size, etc.
"""
self.dopant = get_el_sp(dopant)
self.ionic_radius_tol = ionic_radius_tol
self.min_length = min_length
self.alio_tol = alio_tol
self.codopant = codopant
self.max_structures_per_enum = max_structures_per_enum
self.allowed_doping_species = allowed_doping_species
self.kwargs = kwargs
def apply_transformation(self, structure, return_ranked_list=False):
"""
Args:
structure (Structure): Input structure to dope
Returns:
[{"structure": Structure, "energy": float}]
"""
comp = structure.composition
logger.info("Composition: %s" % comp)
for sp in comp:
try:
sp.oxi_state
except AttributeError:
analyzer = BVAnalyzer()
structure = analyzer.get_oxi_state_decorated_structure(
structure)
comp = structure.composition
break
ox = self.dopant.oxi_state
radius = self.dopant.ionic_radius
compatible_species = [
sp for sp in comp if sp.oxi_state == ox and
abs(sp.ionic_radius / radius - 1) < self.ionic_radius_tol]
if (not compatible_species) and self.alio_tol:
# We only consider aliovalent doping if there are no compatible
# isovalent species.
compatible_species = [
sp for sp in comp
if abs(sp.oxi_state - ox) <= self.alio_tol and
abs(sp.ionic_radius / radius - 1) < self.ionic_radius_tol and
sp.oxi_state * ox >= 0]
if self.allowed_doping_species is not None:
# Only keep allowed doping species.
compatible_species = [
sp for sp in compatible_species
if sp in [get_el_sp(s) for s in self.allowed_doping_species]]
logger.info("Compatible species: %s" % compatible_species)
lengths = structure.lattice.abc
scaling = [max(1, int(round(math.ceil(self.min_length / x))))
for x in lengths]
logger.info("Lengths are %s" % str(lengths))
logger.info("Scaling = %s" % str(scaling))
all_structures = []
t = EnumerateStructureTransformation(**self.kwargs)
for sp in compatible_species:
supercell = structure * scaling
nsp = supercell.composition[sp]
if sp.oxi_state == ox:
supercell.replace_species({sp: {sp: (nsp - 1) / nsp,
self.dopant: 1 / nsp}})
logger.info("Doping %s for %s at level %.3f" % (
sp, self.dopant, 1 / nsp))
elif self.codopant:
codopant = _find_codopant(sp, 2 * sp.oxi_state - ox)
supercell.replace_species({sp: {sp: (nsp - 2) / nsp,
self.dopant: 1 / nsp,
codopant: 1 / nsp}})
logger.info("Doping %s for %s + %s at level %.3f" % (
sp, self.dopant, codopant, 1 / nsp))
elif abs(sp.oxi_state) < abs(ox):
# Strategy: replace the target species with a
# combination of dopant and vacancy.
# We will choose the lowest oxidation state species as a
# vacancy compensation species as it is likely to be lower in
# energy
sp_to_remove = min([s for s in comp if s.oxi_state * ox > 0],
key=lambda ss: abs(ss.oxi_state))
if sp_to_remove == sp:
common_charge = lcm(int(abs(sp.oxi_state)), int(abs(ox)))
ndopant = common_charge / abs(ox)
nsp_to_remove = common_charge / abs(sp.oxi_state)
logger.info("Doping %d %s with %d %s." %
(nsp_to_remove, sp, ndopant, self.dopant))
supercell.replace_species(
{sp: {sp: (nsp - nsp_to_remove) / nsp,
self.dopant: ndopant / nsp}})
else:
ox_diff = int(abs(round(sp.oxi_state - ox)))
vac_ox = int(abs(sp_to_remove.oxi_state))
common_charge = lcm(vac_ox, ox_diff)
ndopant = common_charge / ox_diff
nx_to_remove = common_charge / vac_ox
nx = supercell.composition[sp_to_remove]
logger.info("Doping %d %s with %s and removing %d %s." %
(ndopant, sp, self.dopant,
nx_to_remove, sp_to_remove))
supercell.replace_species(
{sp: {sp: (nsp - ndopant) / nsp,
self.dopant: ndopant / nsp},
sp_to_remove: {
sp_to_remove: (nx - nx_to_remove) / nx}})
elif abs(sp.oxi_state) > abs(ox):
# Strategy: replace the target species with dopant and also
# remove some opposite charged species for charge neutrality
if ox > 0:
sp_to_remove = max(supercell.composition.keys(),
key=lambda el: el.X)
else:
sp_to_remove = min(supercell.composition.keys(),
key=lambda el: el.X)
# Confirm species are of opposite oxidation states.
assert sp_to_remove.oxi_state * sp.oxi_state < 0
ox_diff = int(abs(round(sp.oxi_state - ox)))
anion_ox = int(abs(sp_to_remove.oxi_state))
nx = supercell.composition[sp_to_remove]
common_charge = lcm(anion_ox, ox_diff)
ndopant = common_charge / ox_diff
nx_to_remove = common_charge / anion_ox
logger.info("Doping %d %s with %s and removing %d %s." %
(ndopant, sp, self.dopant,
nx_to_remove, sp_to_remove))
supercell.replace_species(
{sp: {sp: (nsp - ndopant) / nsp,
self.dopant: ndopant / nsp},
sp_to_remove: {sp_to_remove: (nx - nx_to_remove) / nx}})
ss = t.apply_transformation(
supercell, return_ranked_list=self.max_structures_per_enum)
logger.info("%s distinct structures" % len(ss))
all_structures.extend(ss)
logger.info("Total %s doped structures" % len(all_structures))
if return_ranked_list:
return all_structures[:return_ranked_list]
return all_structures[0]["structure"]
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
class SlabTransformation(AbstractTransformation):
"""
A transformation that creates a slab from a structure.
"""
def __init__(self, miller_index, min_slab_size, min_vacuum_size,
lll_reduce=False, center_slab=False,
in_unit_planes=False, primitive=True,
max_normal_search=None, shift=0, tol=0.1):
"""
Args:
miller_index (3-tuple or list): miller index of slab
min_slab_size (float): minimum slab size in angstroms
min_vacuum_size (float): minimum size of vacuum
lll_reduce (bool): whether to apply LLL reduction
center_slab (bool): whether to center the slab
primitive (bool): whether to reduce slabs to most primitive cell
max_normal_search (int): maximum index to include in linear
combinations of indices to find c lattice vector orthogonal
to slab surface
shift (float): shift to get termination
tol (float): tolerance for primitive cell finding
"""
self.miller_index = miller_index
self.min_slab_size = min_slab_size
self.min_vacuum_size = min_vacuum_size
self.lll_reduce = lll_reduce
self.center_slab = center_slab
self.in_unit_planes = in_unit_planes
self.primitive = primitive
self.max_normal_search = max_normal_search
self.shift = shift
self.tol = 0.1
def apply_transformation(self, structure):
sg = SlabGenerator(structure, self.miller_index, self.min_slab_size,
self.min_vacuum_size, self.lll_reduce,
self.center_slab, self.in_unit_planes,
self.primitive, self.max_normal_search)
slab = sg.get_slab(self.shift, self.tol)
return slab
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return None
class DisorderOrderedTransformation(AbstractTransformation):
"""
Not to be confused with OrderDisorderedTransformation,
this transformation attempts to obtain a
*disordered* structure from an input ordered structure.
This may or may not be physically plausible, further
inspection of the returned structures is advised.
The main purpose for this transformation is for structure
matching to crystal prototypes for structures that have
been derived from a parent prototype structure by
substitutions or alloying additions.
"""
def __init__(self, max_sites_to_merge=2):
"""
Args:
max_sites_to_merge: only merge this number of sites together
"""
self.max_sites_to_merge = max_sites_to_merge
def apply_transformation(self, structure, return_ranked_list=False):
"""
Args:
structure: ordered structure
return_ranked_list: as in other pymatgen Transformations
Returns: transformed disordered structure(s)
"""
if not structure.is_ordered:
raise ValueError("This transformation is for disordered structures only.")
partitions = self._partition_species(structure.composition,
max_components=self.max_sites_to_merge)
disorder_mappings = self._get_disorder_mappings(structure.composition, partitions)
disordered_structures = []
for mapping in disorder_mappings:
disordered_structure = structure.copy()
disordered_structure.replace_species(mapping)
disordered_structures.append({'structure': disordered_structure,
'mapping': mapping})
if len(disordered_structures) == 0:
return None
elif not return_ranked_list:
return disordered_structures[0]['structure']
else:
if len(disordered_structures) > return_ranked_list:
disordered_structures = disordered_structures[0:return_ranked_list]
return disordered_structures
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
@staticmethod
def _partition_species(composition, max_components=2):
"""
Private method to split a list of species into
various partitions.
"""
def _partition(collection):
# thanks https://stackoverflow.com/a/30134039
if len(collection) == 1:
yield [collection]
return
first = collection[0]
for smaller in _partition(collection[1:]):
# insert `first` in each of the subpartition's subsets
for n, subset in enumerate(smaller):
yield smaller[:n] + [[first] + subset] + smaller[n + 1:]
# put `first` in its own subset
yield [[first]] + smaller
def _sort_partitions(partitions_to_sort):
"""
Sort partitions by those we want to check first
(typically, merging two sites into one is the
one to try first).
"""
partition_indices = [(idx, [len(p) for p in partition])
for idx, partition in enumerate(partitions_to_sort)]
# sort by maximum length of partition first (try smallest maximums first)
# and secondarily by number of partitions (most partitions first, i.e.
# create the 'least disordered' structures first)
partition_indices = sorted(partition_indices, key=lambda x: (max(x[1]), -len(x[1])))
# merge at most max_component sites,
# e.g. merge at most 2 species into 1 disordered site
partition_indices = [x for x in partition_indices if max(x[1]) <= max_components]
partition_indices.pop(0) # this is just the input structure
sorted_partitions = [partitions_to_sort[x[0]] for x in partition_indices]
return sorted_partitions
collection = list(composition.keys())
partitions = list(_partition(collection))
partitions = _sort_partitions(partitions)
return partitions
@staticmethod
def _get_disorder_mappings(composition, partitions):
"""
Private method to obtain the mapping to create
a disordered structure from a given partition.
"""
def _get_replacement_dict_from_partition(partition):
d = {} # to be passed to Structure.replace_species()
for sp_list in partition:
if len(sp_list) > 1:
total_occ = sum([composition[sp] for sp in sp_list])
merged_comp = {sp: composition[sp] / total_occ for sp in sp_list}
for sp in sp_list:
d[sp] = merged_comp
return d
disorder_mapping = [_get_replacement_dict_from_partition(p)
for p in partitions]
return disorder_mapping
class GrainBoundaryTransformation(AbstractTransformation):
"""
A transformation that creates a gb from a bulk structure.
"""
def __init__(self, rotation_axis, rotation_angle, expand_times=4, vacuum_thickness=0.0,
ab_shift=[0, 0], normal=False, ratio=None, plane=None, max_search=50,
tol_coi=1.e-3):
"""
Args:
rotation_axis (list): Rotation axis of GB in the form of a list of integer
e.g.: [1, 1, 0]
rotation_angle (float, in unit of degree): rotation angle used to generate GB.
Make sure the angle is accurate enough. You can use the enum* functions
in this class to extract the accurate angle.
e.g.: The rotation angle of sigma 3 twist GB with the rotation axis
[1, 1, 1] and GB plane (1, 1, 1) can be 60.000000000 degree.
If you do not know the rotation angle, but know the sigma value, we have
provide the function get_rotation_angle_from_sigma which is able to return
all the rotation angles of sigma value you provided.
expand_times (int): The multiple times used to expand one unit grain to larger grain.
This is used to tune the grain length of GB to warrant that the two GBs in one
cell do not interact with each other. Default set to 4.
vacuum_thickness (float): The thickness of vacuum that you want to insert between
two grains of the GB. Default to 0.
ab_shift (list of float, in unit of a, b vectors of Gb): in plane shift of two grains
normal (logic):
determine if need to require the c axis of top grain (first transformation matrix)
perperdicular to the surface or not.
default to false.
ratio (list of integers):
lattice axial ratio.
For cubic system, ratio is not needed.
For tetragonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
For orthorhombic system, ratio = [mu, lam, mv], list of three integers,
that is, mu:lam:mv = c2:b2:a2. If irrational for one axis, set it to None.
e.g. mu:lam:mv = c2,None,a2, means b2 is irrational.
For rhombohedral system, ratio = [mu, mv], list of two integers,
that is, mu/mv is the ratio of (1+2*cos(alpha))/cos(alpha).
If irrational, set it to None.
For hexagonal system, ratio = [mu, mv], list of two integers,
that is, mu/mv = c2/a2. If it is irrational, set it to none.
plane (list): Grain boundary plane in the form of a list of integers
e.g.: [1, 2, 3]. If none, we set it as twist GB. The plane will be perpendicular
to the rotation axis.
max_search (int): max search for the GB lattice vectors that give the smallest GB
lattice. If normal is true, also max search the GB c vector that perpendicular
to the plane. For complex GB, if you want to speed up, you can reduce this value.
But too small of this value may lead to error.
tol_coi (float): tolerance to find the coincidence sites. When making approximations to
the ratio needed to generate the GB, you probably need to increase this tolerance to
obtain the correct number of coincidence sites. To check the number of coincidence
sites are correct or not, you can compare the generated Gb object's sigma with enum*
sigma values (what user expected by input).
Returns:
Grain boundary structure (Gb (Structure) object).
"""
self.rotation_axis = rotation_axis
self.rotation_angle = rotation_angle
self.expand_times = expand_times
self.vacuum_thickness = vacuum_thickness
self.ab_shift = ab_shift
self.normal = normal
self.ratio = ratio
self.plane = plane
self.max_search = max_search
self.tol_coi = tol_coi
def apply_transformation(self, structure):
gbg = GrainBoundaryGenerator(structure)
gb_struct = gbg.gb_from_parameters(
self.rotation_axis,
self.rotation_angle,
self.expand_times,
self.vacuum_thickness,
self.ab_shift,
self.normal,
self.ratio,
self.plane,
self.max_search,
self.tol_coi)
return gb_struct
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return False
|
dongsenfo/pymatgen
|
pymatgen/transformations/advanced_transformations.py
|
Python
|
mit
| 57,841
|
[
"CRYSTAL",
"pymatgen"
] |
cd029ff3c0bf7efe854785aa40ccbf5944963f1dbdfac86a365009e3dcefcece
|
#!/usr/bin/python
import httplib
import httplib2
import os
import random
import sys
import time
from apiclient.discovery import build
from apiclient.errors import HttpError
from apiclient.http import MediaFileUpload
from oauth2client.file import Storage
from oauth2client.client import flow_from_clientsecrets
from oauth2client.tools import run
from optparse import OptionParser
# Explicitly tell the underlying HTTP transport library not to retry, since
# we are handling retry logic ourselves.
httplib2.RETRIES = 1
# Maximum number of times to retry before giving up.
MAX_RETRIES = 10
# Always retry when these exceptions are raised.
RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected,
httplib.IncompleteRead, httplib.ImproperConnectionState,
httplib.CannotSendRequest, httplib.CannotSendHeader,
httplib.ResponseNotReady, httplib.BadStatusLine)
# Always retry when an apiclient.errors.HttpError with one of these status
# codes is raised.
RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
# CLIENT_SECRETS_FILE, name of a file containing the OAuth 2.0 information for
# this application, including client_id and client_secret. You can acquire an
# ID/secret pair from the API Access tab on the Google APIs Console
# http://code.google.com/apis/console#access
# For more information about using OAuth2 to access Google APIs, please visit:
# https://developers.google.com/accounts/docs/OAuth2
# For more information about the client_secrets.json file format, please visit:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
# Please ensure that you have enabled the YouTube Data API for your project.
CLIENT_SECRETS_FILE = "client_secrets.json"
# A limited OAuth 2 access scope that allows for uploading files, but not other
# types of account access.
YOUTUBE_UPLOAD_SCOPE = "https://www.googleapis.com/auth/youtube.upload"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
# Helpful message to display if the CLIENT_SECRETS_FILE is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console
https://code.google.com/apis/console#access
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS_FILE))
def get_authenticated_service():
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=YOUTUBE_UPLOAD_SCOPE,
message=MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage("%s-oauth2.json" % sys.argv[0])
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(flow, storage)
return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
http=credentials.authorize(httplib2.Http()))
def initialize_upload(options):
youtube = get_authenticated_service()
tags = None
if options.keywords:
tags = options.keywords.split(",")
insert_request = youtube.videos().insert(
part="snippet,status",
body=dict(
snippet=dict(
title=options.title,
description=options.description,
tags=tags,
categoryId=options.category
),
status = dict(
privacyStatus=options.privacyStatus
)
),
media_body=MediaFileUpload(options.file, chunksize=-1, resumable=True)
)
resumable_upload(insert_request)
def resumable_upload(insert_request):
response = None
error = None
retry = 0
while response is None:
try:
print "Uploading file..."
status, response = insert_request.next_chunk()
if 'id' in response:
print "'%s' (video id: %s) was successfully uploaded." % (
options.title, response['id'])
else:
exit("The upload failed with an unexpected response: %s" % response)
except HttpError, e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = "A retriable HTTP error %d occurred:\n%s" % (e.resp.status,
e.content)
else:
raise
except RETRIABLE_EXCEPTIONS, e:
error = "A retriable error occurred: %s" % e
if error is not None:
print error
retry += 1
if retry > MAX_RETRIES:
exit("No longer attempting to retry.")
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
print "Sleeping %f seconds and then retrying..." % sleep_seconds
time.sleep(sleep_seconds)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("--file", dest="file", help="Video file to upload")
parser.add_option("--title", dest="title", help="Video title",
default="Test Title")
parser.add_option("--description", dest="description", help="Video description",
default="Test Description")
parser.add_option("--category", dest="category", help="Video category",
default="22")
parser.add_option("--keywords", dest="keywords",
help="Video keywords, comma separated", default="")
parser.add_option("--privacyStatus", dest="privacyStatus", help="Video privacy status",
default="unlisted")
(options, args) = parser.parse_args()
if options.file is None or not os.path.exists(options.file):
exit("Please specify a valid file using the --file= parameter.")
else:
initialize_upload(options)
|
lhongskie/yt-samples-python
|
upload_video.py
|
Python
|
apache-2.0
| 5,540
|
[
"VisIt"
] |
9c4e2d76726f21e5fb306a16dd8f7045a7ef58e61950c65fd796da66129b6f11
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import glob
import json
import mmap
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
parser.add_argument("--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
parser.add_argument("--boilerplate-dir", default=default_boilerplate_dir)
args = parser.parse_args()
def get_refs():
refs = {}
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except:
return False
data = f.read()
f.close()
basename = os.path.basename(filename)
extension = file_extension(filename)
if extension != "":
ref = refs[extension]
else:
ref = refs[basename]
# remove build tags from the top of Go files
if extension == "go":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
# remove shebang from the top of shell files
if extension == "sh":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
return False
# Replace all occurrences of the regex "2017|2016|2015|2014" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git', 'cluster/env.sh', "vendor", "test/e2e/generated/bindata.go"]
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile( 'YEAR' )
# dates can be 2014, 2015, 2016 or 2017, company holder names can be anything
regexs["date"] = re.compile( '(2014|2015|2016|2017)' )
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
if __name__ == "__main__":
sys.exit(main())
|
rawlingsj/gofabric8
|
vendor/k8s.io/minikube/hack/boilerplate/boilerplate.py
|
Python
|
apache-2.0
| 5,254
|
[
"VisIt"
] |
1644762af6684d5a9fc4840f9503f6eb8d1426de67b5b813c24a1a2dfc87c17e
|
# coding: utf-8
"""
Factory functions producing ABINIT workflows. Entry points for client code (high-level interface)
"""
from __future__ import unicode_literals, division, print_function
from .abiobjects import KSampling, Screening, SelfEnergy, ExcHamiltonian, HilbertTransform
from .strategies import ScfStrategy, NscfStrategy, ScreeningStrategy, SelfEnergyStrategy, MDFBSE_Strategy
from .workflows import BandStructureWorkflow, G0W0_Workflow, BSEMDF_Workflow
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__email__ = "gmatteo at gmail.com"
def bandstructure(structure, pseudos, scf_kppa, nscf_nband,
ndivsm, accuracy="normal", spin_mode="polarized",
smearing="fermi_dirac:0.1 eV", charge=0.0, scf_algorithm=None,
dos_kppa=None, workdir=None, manager=None, **extra_abivars):
"""
Returns a Workflow for bandstructure calculations.
Args:
structure:
Pymatgen structure.
pseudos:
List of `Pseudo` objects.
scf_kppa:
Defines the sampling used for the SCF run.
nscf_nband:
Number of bands included in the NSCF run.
ndivs:
Number of divisions used to sample the smallest segment of the
k-path.
accuracy:
Accuracy of the calculation.
spin_mode:
Spin polarization.
smearing:
Smearing technique.
charge:
Electronic charge added to the unit cell.
scf_algorithm:
Algorithm used for solving of the SCF cycle.
dos_kppa:
Defines the k-point sampling used for the computation of the DOS
(None if DOS is not wanted).
workdir:
Working directory.
manager:
`TaskManager` instance.
extra_abivars:
Dictionary with extra variables passed to ABINIT.
"""
# SCF calculation.
scf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0)
scf_strategy = ScfStrategy(structure, pseudos, scf_ksampling,
accuracy=accuracy, spin_mode=spin_mode,
smearing=smearing, charge=charge,
scf_algorithm=scf_algorithm, **extra_abivars)
# Band structure calculation.
nscf_ksampling = KSampling.path_from_structure(ndivsm, structure)
nscf_strategy = NscfStrategy(scf_strategy, nscf_ksampling, nscf_nband, **extra_abivars)
# DOS calculation.
dos_strategy = None
if dos_kppa is not None:
dos_ksampling = KSampling.automatic_density(structure, dos_kppa, chksymbreak=0)
#dos_ksampling = KSampling.monkhorst(dos_ngkpt, shiftk=dos_shiftk, chksymbreak=0)
dos_strategy = NscfStrategy(scf_strategy, dos_ksampling, nscf_nband, nscf_solver=None, **extra_abivars)
return BandStructureWorkflow(scf_strategy, nscf_strategy, dos_inputs=dos_strategy,
workdir=workdir, manager=manager)
#def relaxation(workdir, manager, structure, pseudos, scf_kppa,
# accuracy="normal", spin_mode="polarized",
# smearing="fermi_dirac:0.1 eV", charge=0.0, scf_algorithm=None, **extra_abivars):
# """
# Returns a Work object that performs structural relaxations.
#
# Args:
# workdir:
# Working directory.
# manager:
# `TaskManager` object.
# structure:
# Pymatgen structure.
# pseudos:
# List of `Pseudo` objects.
# scf_kppa:
# Defines the sampling used for the SCF run.
# accuracy:
# Accuracy of the calculation.
# spin_mode:
# Spin polarization.
# smearing:
# Smearing technique.
# charge:
# Electronic charge added to the unit cell.
# scf_algorithm:
# Algorithm used for solving the SCF cycle.
# """
# # SCF calculation.
# scf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0)
# relax_algo =
#
# relax_strategy = RelaxStrategy(structure, pseudos, scf_ksampling, relax_algo,
# accuracy=accuracy, spin_mode=spin_mode, smearing=smearing,
# charge=charge, scf_algorithm=scf_algorithm)
#
# #return Relaxation(relax_strategy, workdir=workdir, manager=manager)
def g0w0_with_ppmodel(structure, pseudos, scf_kppa, nscf_nband, ecuteps, ecutsigx,
accuracy="normal", spin_mode="polarized", smearing="fermi_dirac:0.1 eV",
ppmodel="godby", charge=0.0, scf_algorithm=None, inclvkb=2, scr_nband=None,
sigma_nband=None, gw_qprange=1, workdir=None, manager=None, **extra_abivars):
"""
Returns a Work object that performs G0W0 calculations for the given the material.
Args:
structure:
Pymatgen structure.
pseudos:
List of `Pseudo` objects.
scf_kppa:
Defines the sampling used for the SCF run.
nscf_nband:
Number of bands included in the NSCF run.
ecuteps:
Cutoff energy [Ha] for the screening matrix.
ecutsigx:
Cutoff energy [Ha] for the exchange part of the self-energy.
accuracy:
Accuracy of the calculation.
spin_mode:
Spin polarization.
smearing:
Smearing technique.
ppmodel:
Plasmonpole technique.
charge:
Electronic charge added to the unit cell.
scf_algorithm:
Algorithm used for solving of the SCF cycle.
inclvkb:
Treatment of the dipole matrix elements (see abinit variable).
scr_nband:
Number of bands used to compute the screening (default is nscf_nband)
sigma_nband:
Number of bands used to compute the self-energy (default is nscf_nband)
gw_qprange:
Option for the automatic selection of k-points and bands for GW corrections.
See Abinit docs for more detail. The default value makes the code compute the
QP energies for all the point in the IBZ and one band above and one band below the Fermi level.
workdir:
Working directory.
manager:
`TaskManager` instance.
extra_abivars
Dictionary with extra variables passed to ABINIT.
"""
# TODO: Cannot use istwfk != 1.
if "istwfk" not in extra_abivars:
extra_abivars["istwfk"] = "*1"
scf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0)
scf_strategy = ScfStrategy(structure, pseudos, scf_ksampling,
accuracy=accuracy, spin_mode=spin_mode,
smearing=smearing, charge=charge,
scf_algorithm=scf_algorithm, **extra_abivars)
nscf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0)
nscf_strategy = NscfStrategy(scf_strategy, nscf_ksampling, nscf_nband, **extra_abivars)
if scr_nband is None: scr_nband = nscf_nband
if sigma_nband is None: sigma_nband = nscf_nband
screening = Screening(ecuteps, scr_nband, w_type="RPA", sc_mode="one_shot",
hilbert=None, ecutwfn=None, inclvkb=inclvkb)
self_energy = SelfEnergy("gw", "one_shot", sigma_nband, ecutsigx, screening,
gw_qprange=gw_qprange, ppmodel=ppmodel)
scr_strategy = ScreeningStrategy(scf_strategy, nscf_strategy, screening, **extra_abivars)
sigma_strategy = SelfEnergyStrategy(scf_strategy, nscf_strategy, scr_strategy, self_energy,
**extra_abivars)
return G0W0_Workflow(scf_strategy, nscf_strategy, scr_strategy, sigma_strategy,
workdir=workdir, manager=manager)
def g0w0_extended(structure, pseudos, scf_kppa, nscf_nband, ecuteps, ecutsigx, accuracy="normal", spin_mode="polarized",
smearing="fermi_dirac:0.1 eV", response_models=["godby"], charge=0.0, scf_algorithm=None, inclvkb=2,
scr_nband=None, sigma_nband=None, gw_qprange=1, workdir=None, manager=None, gamma=True,
**extra_abivars):
"""
Returns a Work object that performs G0W0 calculations for the given the material.
Args:
structure:
Pymatgen structure.
pseudos:
List of `Pseudo` objects.
scf_
Defines the sampling used for the SCF run.
nscf_nband:
Number of bands included in the NSCF run.
ecuteps:
Cutoff energy [Ha] for the screening matrix.
ecutsigx:
Cutoff energy [Ha] for the exchange part of the self-energy.
accuracy:
Accuracy of the calculation.
spin_mode:
Spin polarization.
smearing:
Smearing technique.
ppmodel:
Plasmonpole technique.
charge:
Electronic charge added to the unit cell.
scf_algorithm:
Algorithm used for solving of the SCF cycle.
inclvkb:
Treatment of the dipole matrix elements (see abinit variable).
scr_nband:
Number of bands used to compute the screening (default is nscf_nband)
sigma_nband:
Number of bands used to compute the self-energy (default is nscf_nband)
workdir:
Working directory.
manager:
`TaskManager` instance.
extra_abivars
Dictionary with extra variables passed to ABINIT.
"""
# TODO: Cannot use istwfk != 1.
if gamma:
if scf_kppa == 1:
scf_ksampling = KSampling.gamma_centered(kpts=(1, 1, 1))
nscf_ksampling = KSampling.gamma_centered(kpts=(1, 1, 1))
elif scf_kppa == 2:
scf_ksampling = KSampling.gamma_centered(kpts=(2, 2, 2))
nscf_ksampling = KSampling.gamma_centered(kpts=(2, 2, 2))
elif scf_kppa <= 10:
scf_ksampling = KSampling.gamma_centered(kpts=(scf_kppa, scf_kppa, scf_kppa))
nscf_ksampling = KSampling.gamma_centered(kpts=(scf_kppa, scf_kppa, scf_kppa))
else:
scf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0, shifts=(0, 0, 0))
nscf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0, shifts=(0, 0, 0))
else:
scf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0)
nscf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0)
if "istwfk" not in extra_abivars:
extra_abivars["istwfk"] = "*1"
scf_strategy = []
to_add = {}
extra_abivars.update(to_add)
for k in extra_abivars.keys():
if k[-2:] == '_s':
var = k[:len(k)-2]
values = extra_abivars.pop(k)
to_add.update({k: values[-1]})
for value in values:
extra_abivars[var] = value
extra_abivars['pawecutdg'] = extra_abivars['ecut']*2
scf_strategy.append(ScfStrategy(structure, pseudos, scf_ksampling, accuracy=accuracy, spin_mode=spin_mode,
smearing=smearing, charge=charge, scf_algorithm=None, **extra_abivars))
if len(scf_strategy) == 0:
scf_strategy.append(ScfStrategy(structure, pseudos, scf_ksampling, accuracy=accuracy, spin_mode=spin_mode,
smearing=smearing, charge=charge, scf_algorithm=None, **extra_abivars))
nscf_strategy = NscfStrategy(scf_strategy[-1], nscf_ksampling, max(nscf_nband), **extra_abivars)
if scr_nband is None:
scr_nband = nscf_nband
if sigma_nband is None:
sigma_nband = nscf_nband
if ecutsigx < max(ecuteps):
ecutsigx = max(ecuteps)
sigma_strategy = []
if 'cd' in response_models:
hilbert = HilbertTransform(nomegasf=100, domegasf=None, spmeth=1, nfreqre=None, freqremax=None, nfreqim=None,
freqremin=None)
for response_model in response_models:
for ecuteps_v in ecuteps:
for nscf_nband_v in nscf_nband:
scr_nband = nscf_nband_v
sigma_nband = nscf_nband_v
if response_model == 'cd':
screening = Screening(ecuteps_v, scr_nband, w_type="RPA", sc_mode="one_shot", hilbert=hilbert, ecutwfn=None, inclvkb=inclvkb)
self_energy = SelfEnergy("gw", "one_shot", sigma_nband, ecutsigx, screening, hilbert=hilbert)
else:
ppmodel = response_model
screening = Screening(ecuteps_v, scr_nband, w_type="RPA", sc_mode="one_shot", ecutwfn=None, inclvkb=inclvkb)
self_energy = SelfEnergy("gw", "one_shot", sigma_nband, ecutsigx, screening, ppmodel=ppmodel, gw_qprange=1)
scr_strategy = ScreeningStrategy(scf_strategy[-1], nscf_strategy, screening, **extra_abivars)
sigma_strategy.append(SelfEnergyStrategy(scf_strategy[-1], nscf_strategy, scr_strategy, self_energy, **extra_abivars))
return G0W0_Workflow(scf_strategy, nscf_strategy, scr_strategy, sigma_strategy, workdir=workdir, manager=manager)
#def g0w0_with_cd(structure, pseudos, scf_kppa, nscf_nband, ecuteps, ecutsigx, hilbert,
# accuracy="normal", spin_mode="polarized", smearing="fermi_dirac:0.1 eV",
# charge=0.0, scf_algorithm=None, inclvkb=2, scr_nband=None,
# sigma_nband=None, workdir=None, manager=None, **extra_abivars):
# """
# Returns a Work object that performs G0W0 calculations for the given the material.
#
# Args:
# structure:
# Pymatgen structure.
# pseudos:
# List of `Pseudo` objects.
# scf_kppa:
# Defines the sampling used for the SCF run.
# nscf_nband:
# Number of bands included in the NSCF run.
# ecuteps:
# Cutoff energy [Ha] for the screening matrix.
# ecutsigx:
# Cutoff energy [Ha] for the exchange part of the self-energy.
# hilbert:
# `HilbertTransform` object with the parameters defining the frequency mesh
# used for the spectral function and the frequency mesh used for the polarizability
# accuracy:
# Accuracy of the calculation.
# spin_mode:
# Spin polarization.
# smearing:
# Smearing technique.
# charge:
# Electronic charge added to the unit cell.
# scf_algorithm:
# Algorithm used for solving of the SCF cycle.
# inclvkb:
# Treatment of the dipole matrix elements (see abinit variable).
# scr_nband:
# Number of bands used to compute the screening (default is nscf_nband)
# sigma_nband:
# Number of bands used to compute the self-energy (default is nscf_nband)
# workdir:
# Working directory.
# manager:
# `TaskManager` instance.
# extra_abivars
# Dictionary with extra variables passed to ABINIT.
# """
# # TODO: Cannot use istwfk != 1.
# if "istwfk" not in extra_abivars:
# extra_abivars["istwfk"] = "*1"
#
# scf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0)
#
# scf_strategy = ScfStrategy(structure, pseudos, scf_ksampling,
# accuracy=accuracy, spin_mode=spin_mode,
# smearing=smearing, charge=charge,
# scf_algorithm=None, **extra_abivars)
#
# nscf_ksampling = KSampling.automatic_density(structure, 1, chksymbreak=0)
#
# nscf_strategy = NscfStrategy(scf_strategy, nscf_ksampling, nscf_nband, **extra_abivars)
#
# if scr_nband is None: scr_nband = nscf_nband
# if sigma_nband is None: sigma_nband = nscf_nband
#
# screening = Screening(ecuteps, scr_nband, w_type="RPA", sc_mode="one_shot",
# hilbert=hilbert, ecutwfn=None, inclvkb=inclvkb)
#
# self_energy = SelfEnergy("gw", "one_shot", sigma_nband, ecutsigx, screening,
# hilbert=hilbert)
#
# scr_strategy = ScreeningStrategy(scf_strategy, nscf_strategy, screening,
# **extra_abivars)
#
# sigma_strategy = SelfEnergyStrategy(scf_strategy, nscf_strategy, scr_strategy, self_energy,
# **extra_abivars)
#
# return G0W0_Workflow(scf_strategy, nscf_strategy, scr_strategy, sigma_strategy,
# workdir=workdir, manager=manager)
def bse_with_mdf(structure, pseudos, scf_kppa, nscf_nband, nscf_ngkpt, nscf_shiftk,
ecuteps, bs_loband, bs_nband, soenergy, mdf_epsinf,
exc_type="TDA", bs_algo="haydock", accuracy="normal", spin_mode="polarized",
smearing="fermi_dirac:0.1 eV", charge=0.0, scf_algorithm=None, workdir=None, manager=None,
**extra_abivars):
"""
Returns a `Workflow` object that performs a GS + NSCF + Bethe-Salpeter calculation.
The self-energy corrections are approximated with the scissors operator. The screening
in modeled by the model dielectric function.
Args:
structure:
`Structure` object.
pseudos:
List of `Pseudo` objects.
scf_kppa:
Defines the sampling used for the SCF run.
nscf_nband:
Number of bands included in the NSCF run.
nscf_ngkpt:
Division of the k-mesh used for the NSCF and the BSE run.
nscf_shiftk:
Shifts used for the NSCF and the BSE run.
ecuteps:
Cutoff energy [Ha] for the screening matrix.
bs_loband:
Index of the first occupied band included the e-h basis set
(ABINIT convention i.e. first band starts at 1).
Can be scalar or array of shape (nsppol,)
bs_nband:
Highest band idex used for the construction of the e-h basis set.
soenergy:
Scissor energy in Hartree.
mdf_epsinf:
Value of the macroscopic dielectric function used in expression for the model dielectric function.
exc_type:
Approximation used for the BSE Hamiltonian (Tamm-Dancoff or coupling).
bs_algo:
Algorith for the computatio of the macroscopic dielectric function.
accuracy:
Accuracy of the calculation.
spin_mode:
Spin polarization.
smearing:
Smearing technique.
charge:
Electronic charge added to the unit cell.
scf_algorithm:
Algorithm used for solving the SCF cycle.
workdir:
Working directory.
manager:
`TaskManger` instance.
extra_abivars:
Dictionary with extra variables passed to ABINIT.
"""
# TODO: Cannot use istwfk != 1.
if "istwfk" not in extra_abivars:
extra_abivars["istwfk"] = "*1"
# Ground-state strategy.
scf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0)
scf_strategy = ScfStrategy(structure, pseudos, scf_ksampling,
accuracy=accuracy, spin_mode=spin_mode,
smearing=smearing, charge=charge, scf_algorithm=None, **extra_abivars)
# NSCF calculation with the randomly-shifted k-mesh.
nscf_ksampling = KSampling.monkhorst(nscf_ngkpt, shiftk=nscf_shiftk, chksymbreak=0)
nscf_strategy = NscfStrategy(scf_strategy, nscf_ksampling, nscf_nband, **extra_abivars)
# Strategy for the BSE calculation.
exc_ham = ExcHamiltonian(bs_loband, bs_nband, soenergy, coulomb_mode="model_df", ecuteps=ecuteps,
spin_mode=spin_mode, mdf_epsinf=mdf_epsinf, exc_type=exc_type, algo=bs_algo,
bs_freq_mesh=None, with_lf=True, zcut=None)
bse_strategy = MDFBSE_Strategy(scf_strategy, nscf_strategy, exc_ham, **extra_abivars)
return BSEMDF_Workflow(scf_strategy, nscf_strategy, bse_strategy, workdir=workdir, manager=manager)
|
yanikou19/pymatgen
|
pymatgen/io/abinitio/calculations.py
|
Python
|
mit
| 20,426
|
[
"ABINIT",
"pymatgen"
] |
7ef538a19a18ccbb1e798ac0ac5263f88aba23c30183c5e10c6ddc450405bd19
|
#!/usr/bin/env python
import os
try:
__IPYTHON__
import sys
del sys.argv[1:]
except:
pass
import srwl_bl
import srwlib
import srwlpy
import math
import srwl_uti_smp
def set_optics(v=None):
el = []
pp = []
names = ['Lens', 'Lens_CRL', 'CRL', 'CRL_Zone_Plate', 'Zone_Plate', 'Zone_Plate_Fiber', 'Fiber', 'Fiber_Aperture', 'Aperture', 'Aperture_Obstacle', 'Obstacle', 'Obstacle_Mask', 'Mask', 'Mask_Sample', 'Sample', 'Sample_Planar', 'Planar', 'Planar_Circular_Cylinder', 'Circular_Cylinder', 'Circular_Cylinder_Circular_Cylinder2', 'Circular_Cylinder2', 'Circular_Cylinder2_Elliptical_Cylinder', 'Elliptical_Cylinder', 'Elliptical_Cylinder_Elliptical_Cylinder2', 'Elliptical_Cylinder2', 'Elliptical_Cylinder2_Toroid', 'Toroid', 'Toroid_Toroid2', 'Toroid2', 'Toroid2_Crystal', 'Crystal', 'Crystal_Crystal2', 'Crystal2', 'Crystal2_Grating', 'Grating', 'Grating_Watchpoint', 'Watchpoint']
for el_name in names:
if el_name == 'Lens':
# Lens: lens 20.0m
el.append(srwlib.SRWLOptL(
_Fx=v.op_Lens_Fx,
_Fy=v.op_Lens_Fy,
_x=v.op_Lens_x,
_y=v.op_Lens_y,
))
pp.append(v.op_Lens_pp)
elif el_name == 'Lens_CRL':
# Lens_CRL: drift 20.0m
el.append(srwlib.SRWLOptD(
_L=v.op_Lens_CRL_L,
))
pp.append(v.op_Lens_CRL_pp)
elif el_name == 'CRL':
# CRL: crl 21.0m
el.append(srwlib.srwl_opt_setup_CRL(
_foc_plane=v.op_CRL_foc_plane,
_delta=v.op_CRL_delta,
_atten_len=v.op_CRL_atten_len,
_shape=v.op_CRL_shape,
_apert_h=v.op_CRL_apert_h,
_apert_v=v.op_CRL_apert_v,
_r_min=v.op_CRL_r_min,
_n=v.op_CRL_n,
_wall_thick=v.op_CRL_wall_thick,
_xc=v.op_CRL_x,
_yc=v.op_CRL_y,
))
pp.append(v.op_CRL_pp)
elif el_name == 'CRL_Zone_Plate':
# CRL_Zone_Plate: drift 21.0m
el.append(srwlib.SRWLOptD(
_L=v.op_CRL_Zone_Plate_L,
))
pp.append(v.op_CRL_Zone_Plate_pp)
elif el_name == 'Zone_Plate':
# Zone_Plate: zonePlate 22.0m
el.append(srwlib.SRWLOptZP(
_nZones=v.op_Zone_Plate_nZones,
_rn=v.op_Zone_Plate_rn,
_thick=v.op_Zone_Plate_thick,
_delta1=v.op_Zone_Plate_delta1,
_atLen1=v.op_Zone_Plate_atLen1,
_delta2=v.op_Zone_Plate_delta2,
_atLen2=v.op_Zone_Plate_atLen2,
_x=v.op_Zone_Plate_x,
_y=v.op_Zone_Plate_y,
))
pp.append(v.op_Zone_Plate_pp)
elif el_name == 'Zone_Plate_Fiber':
# Zone_Plate_Fiber: drift 22.0m
el.append(srwlib.SRWLOptD(
_L=v.op_Zone_Plate_Fiber_L,
))
pp.append(v.op_Zone_Plate_Fiber_pp)
elif el_name == 'Fiber':
# Fiber: fiber 23.0m
el.append(srwlib.srwl_opt_setup_cyl_fiber(
_foc_plane=v.op_Fiber_foc_plane,
_delta_ext=v.op_Fiber_delta_ext,
_delta_core=v.op_Fiber_delta_core,
_atten_len_ext=v.op_Fiber_atten_len_ext,
_atten_len_core=v.op_Fiber_atten_len_core,
_diam_ext=v.op_Fiber_externalDiameter,
_diam_core=v.op_Fiber_diam_core,
_xc=v.op_Fiber_xc,
_yc=v.op_Fiber_yc,
))
pp.append(v.op_Fiber_pp)
elif el_name == 'Fiber_Aperture':
# Fiber_Aperture: drift 23.0m
el.append(srwlib.SRWLOptD(
_L=v.op_Fiber_Aperture_L,
))
pp.append(v.op_Fiber_Aperture_pp)
elif el_name == 'Aperture':
# Aperture: aperture 24.0m
el.append(srwlib.SRWLOptA(
_shape=v.op_Aperture_shape,
_ap_or_ob='a',
_Dx=v.op_Aperture_Dx,
_Dy=v.op_Aperture_Dy,
_x=v.op_Aperture_x,
_y=v.op_Aperture_y,
))
pp.append(v.op_Aperture_pp)
elif el_name == 'Aperture_Obstacle':
# Aperture_Obstacle: drift 24.0m
el.append(srwlib.SRWLOptD(
_L=v.op_Aperture_Obstacle_L,
))
pp.append(v.op_Aperture_Obstacle_pp)
elif el_name == 'Obstacle':
# Obstacle: obstacle 25.0m
el.append(srwlib.SRWLOptA(
_shape=v.op_Obstacle_shape,
_ap_or_ob='o',
_Dx=v.op_Obstacle_Dx,
_Dy=v.op_Obstacle_Dy,
_x=v.op_Obstacle_x,
_y=v.op_Obstacle_y,
))
pp.append(v.op_Obstacle_pp)
elif el_name == 'Obstacle_Mask':
# Obstacle_Mask: drift 25.0m
el.append(srwlib.SRWLOptD(
_L=v.op_Obstacle_Mask_L,
))
pp.append(v.op_Obstacle_Mask_pp)
elif el_name == 'Mask':
# Mask: mask 26.0m
el.append(srwlib.srwl_opt_setup_mask(
_delta=v.op_Mask_delta,
_atten_len=v.op_Mask_atten_len,
_thick=v.op_Mask_thick,
_grid_sh=v.op_Mask_grid_sh,
_grid_dx=v.op_Mask_grid_dx,
_grid_dy=v.op_Mask_grid_dy,
_pitch_x=v.op_Mask_pitch_x,
_pitch_y=v.op_Mask_pitch_y,
_grid_nx=v.op_Mask_grid_nx,
_grid_ny=v.op_Mask_grid_ny,
_mask_Nx=v.op_Mask_mask_Nx,
_mask_Ny=v.op_Mask_mask_Ny,
_grid_angle=v.op_Mask_gridTiltAngle,
_hx=v.op_Mask_hx,
_hy=v.op_Mask_hy,
_mask_x0=v.op_Mask_mask_x0,
_mask_y0=v.op_Mask_mask_y0,
))
pp.append(v.op_Mask_pp)
elif el_name == 'Mask_Sample':
# Mask_Sample: drift 26.0m
el.append(srwlib.SRWLOptD(
_L=v.op_Mask_Sample_L,
))
pp.append(v.op_Mask_Sample_pp)
elif el_name == 'Sample':
# Sample: sample 27.0m
el.append(srwl_uti_smp.srwl_opt_setup_transm_from_file(
file_path=v.op_Sample_file_path,
resolution=v.op_Sample_resolution,
thickness=v.op_Sample_thick,
delta=v.op_Sample_delta,
atten_len=v.op_Sample_atten_len,
xc=v.op_Sample_xc,
yc=v.op_Sample_yc,
area=None if not v.op_Sample_cropArea else (
v.op_Sample_areaXStart,
v.op_Sample_areaXEnd,
v.op_Sample_areaYStart,
v.op_Sample_areaYEnd,
),
extTr=v.op_Sample_extTransm,
rotate_angle=v.op_Sample_rotateAngle,
rotate_reshape=bool(int(v.op_Sample_rotateReshape)),
cutoff_background_noise=v.op_Sample_cutoffBackgroundNoise,
background_color=v.op_Sample_backgroundColor,
tile=None if not v.op_Sample_tileImage else (
v.op_Sample_tileRows,
v.op_Sample_tileColumns,
),
shift_x=v.op_Sample_shiftX,
shift_y=v.op_Sample_shiftY,
invert=bool(int(v.op_Sample_invert)),
is_save_images=True,
prefix='Sample_sample',
output_image_format=v.op_Sample_outputImageFormat,
))
pp.append(v.op_Sample_pp)
elif el_name == 'Sample_Planar':
# Sample_Planar: drift 27.0m
el.append(srwlib.SRWLOptD(
_L=v.op_Sample_Planar_L,
))
pp.append(v.op_Sample_Planar_pp)
elif el_name == 'Planar':
# Planar: mirror 28.0m
mirror_file = v.op_Planar_hfn
assert os.path.isfile(mirror_file), \
'Missing input file {}, required by Planar beamline element'.format(mirror_file)
el.append(srwlib.srwl_opt_setup_surf_height_1d(
srwlib.srwl_uti_read_data_cols(mirror_file, "\t", 0, 1),
_dim=v.op_Planar_dim,
_ang=abs(v.op_Planar_ang),
_amp_coef=v.op_Planar_amp_coef,
_size_x=v.op_Planar_size_x,
_size_y=v.op_Planar_size_y,
))
pp.append(v.op_Planar_pp)
elif el_name == 'Planar_Circular_Cylinder':
# Planar_Circular_Cylinder: drift 28.0m
el.append(srwlib.SRWLOptD(
_L=v.op_Planar_Circular_Cylinder_L,
))
pp.append(v.op_Planar_Circular_Cylinder_pp)
elif el_name == 'Circular_Cylinder':
# Circular_Cylinder: sphericalMirror 29.0m
el.append(srwlib.SRWLOptMirSph(
_r=v.op_Circular_Cylinder_r,
_size_tang=v.op_Circular_Cylinder_size_tang,
_size_sag=v.op_Circular_Cylinder_size_sag,
_nvx=v.op_Circular_Cylinder_nvx,
_nvy=v.op_Circular_Cylinder_nvy,
_nvz=v.op_Circular_Cylinder_nvz,
_tvx=v.op_Circular_Cylinder_tvx,
_tvy=v.op_Circular_Cylinder_tvy,
_x=v.op_Circular_Cylinder_x,
_y=v.op_Circular_Cylinder_y,
))
pp.append(v.op_Circular_Cylinder_pp)
elif el_name == 'Circular_Cylinder_Circular_Cylinder2':
# Circular_Cylinder_Circular_Cylinder2: drift 29.0m
el.append(srwlib.SRWLOptD(
_L=v.op_Circular_Cylinder_Circular_Cylinder2_L,
))
pp.append(v.op_Circular_Cylinder_Circular_Cylinder2_pp)
elif el_name == 'Circular_Cylinder2':
# Circular_Cylinder2: sphericalMirror 29.5m
el.append(srwlib.SRWLOptMirSph(
_r=v.op_Circular_Cylinder2_r,
_size_tang=v.op_Circular_Cylinder2_size_tang,
_size_sag=v.op_Circular_Cylinder2_size_sag,
_nvx=v.op_Circular_Cylinder2_nvx,
_nvy=v.op_Circular_Cylinder2_nvy,
_nvz=v.op_Circular_Cylinder2_nvz,
_tvx=v.op_Circular_Cylinder2_tvx,
_tvy=v.op_Circular_Cylinder2_tvy,
_x=v.op_Circular_Cylinder2_x,
_y=v.op_Circular_Cylinder2_y,
))
pp.append(v.op_Circular_Cylinder2_pp)
mirror_file = v.op_Circular_Cylinder2_hfn
assert os.path.isfile(mirror_file), \
'Missing input file {}, required by Circular_Cylinder2 beamline element'.format(mirror_file)
el.append(srwlib.srwl_opt_setup_surf_height_1d(
srwlib.srwl_uti_read_data_cols(mirror_file, "\t", 0, 1),
_dim=v.op_Circular_Cylinder2_dim,
_ang=abs(v.op_Circular_Cylinder2_ang),
_amp_coef=v.op_Circular_Cylinder2_amp_coef,
))
pp.append([0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0])
elif el_name == 'Circular_Cylinder2_Elliptical_Cylinder':
# Circular_Cylinder2_Elliptical_Cylinder: drift 29.5m
el.append(srwlib.SRWLOptD(
_L=v.op_Circular_Cylinder2_Elliptical_Cylinder_L,
))
pp.append(v.op_Circular_Cylinder2_Elliptical_Cylinder_pp)
elif el_name == 'Elliptical_Cylinder':
# Elliptical_Cylinder: ellipsoidMirror 30.0m
el.append(srwlib.SRWLOptMirEl(
_p=v.op_Elliptical_Cylinder_p,
_q=v.op_Elliptical_Cylinder_q,
_ang_graz=v.op_Elliptical_Cylinder_ang,
_size_tang=v.op_Elliptical_Cylinder_size_tang,
_size_sag=v.op_Elliptical_Cylinder_size_sag,
_nvx=v.op_Elliptical_Cylinder_nvx,
_nvy=v.op_Elliptical_Cylinder_nvy,
_nvz=v.op_Elliptical_Cylinder_nvz,
_tvx=v.op_Elliptical_Cylinder_tvx,
_tvy=v.op_Elliptical_Cylinder_tvy,
_x=v.op_Elliptical_Cylinder_x,
_y=v.op_Elliptical_Cylinder_y,
))
pp.append(v.op_Elliptical_Cylinder_pp)
elif el_name == 'Elliptical_Cylinder_Elliptical_Cylinder2':
# Elliptical_Cylinder_Elliptical_Cylinder2: drift 30.0m
el.append(srwlib.SRWLOptD(
_L=v.op_Elliptical_Cylinder_Elliptical_Cylinder2_L,
))
pp.append(v.op_Elliptical_Cylinder_Elliptical_Cylinder2_pp)
elif el_name == 'Elliptical_Cylinder2':
# Elliptical_Cylinder2: ellipsoidMirror 30.5m
el.append(srwlib.SRWLOptMirEl(
_p=v.op_Elliptical_Cylinder2_p,
_q=v.op_Elliptical_Cylinder2_q,
_ang_graz=v.op_Elliptical_Cylinder2_ang,
_size_tang=v.op_Elliptical_Cylinder2_size_tang,
_size_sag=v.op_Elliptical_Cylinder2_size_sag,
_nvx=v.op_Elliptical_Cylinder2_nvx,
_nvy=v.op_Elliptical_Cylinder2_nvy,
_nvz=v.op_Elliptical_Cylinder2_nvz,
_tvx=v.op_Elliptical_Cylinder2_tvx,
_tvy=v.op_Elliptical_Cylinder2_tvy,
_x=v.op_Elliptical_Cylinder2_x,
_y=v.op_Elliptical_Cylinder2_y,
))
pp.append(v.op_Elliptical_Cylinder2_pp)
mirror_file = v.op_Elliptical_Cylinder2_hfn
assert os.path.isfile(mirror_file), \
'Missing input file {}, required by Elliptical_Cylinder2 beamline element'.format(mirror_file)
el.append(srwlib.srwl_opt_setup_surf_height_2d(
srwlib.srwl_uti_read_data_cols(mirror_file, "\t"),
_dim=v.op_Elliptical_Cylinder2_dim,
_ang=abs(v.op_Elliptical_Cylinder2_ang),
_amp_coef=v.op_Elliptical_Cylinder2_amp_coef,
))
pp.append([0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0])
elif el_name == 'Elliptical_Cylinder2_Toroid':
# Elliptical_Cylinder2_Toroid: drift 30.5m
el.append(srwlib.SRWLOptD(
_L=v.op_Elliptical_Cylinder2_Toroid_L,
))
pp.append(v.op_Elliptical_Cylinder2_Toroid_pp)
elif el_name == 'Toroid':
# Toroid: toroidalMirror 31.0m
el.append(srwlib.SRWLOptMirTor(
_rt=v.op_Toroid_rt,
_rs=v.op_Toroid_rs,
_size_tang=v.op_Toroid_size_tang,
_size_sag=v.op_Toroid_size_sag,
_x=v.op_Toroid_horizontalPosition,
_y=v.op_Toroid_verticalPosition,
_ap_shape=v.op_Toroid_ap_shape,
_nvx=v.op_Toroid_nvx,
_nvy=v.op_Toroid_nvy,
_nvz=v.op_Toroid_nvz,
_tvx=v.op_Toroid_tvx,
_tvy=v.op_Toroid_tvy,
))
pp.append(v.op_Toroid_pp)
elif el_name == 'Toroid_Toroid2':
# Toroid_Toroid2: drift 31.0m
el.append(srwlib.SRWLOptD(
_L=v.op_Toroid_Toroid2_L,
))
pp.append(v.op_Toroid_Toroid2_pp)
elif el_name == 'Toroid2':
# Toroid2: toroidalMirror 31.5m
el.append(srwlib.SRWLOptMirTor(
_rt=v.op_Toroid2_rt,
_rs=v.op_Toroid2_rs,
_size_tang=v.op_Toroid2_size_tang,
_size_sag=v.op_Toroid2_size_sag,
_x=v.op_Toroid2_horizontalPosition,
_y=v.op_Toroid2_verticalPosition,
_ap_shape=v.op_Toroid2_ap_shape,
_nvx=v.op_Toroid2_nvx,
_nvy=v.op_Toroid2_nvy,
_nvz=v.op_Toroid2_nvz,
_tvx=v.op_Toroid2_tvx,
_tvy=v.op_Toroid2_tvy,
))
pp.append(v.op_Toroid2_pp)
mirror_file = v.op_Toroid2_hfn
assert os.path.isfile(mirror_file), \
'Missing input file {}, required by Toroid2 beamline element'.format(mirror_file)
el.append(srwlib.srwl_opt_setup_surf_height_1d(
srwlib.srwl_uti_read_data_cols(mirror_file, "\t", 0, 1),
_dim=v.op_Toroid2_dim,
_ang=abs(v.op_Toroid2_ang),
_amp_coef=v.op_Toroid2_amp_coef,
))
pp.append([0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0])
elif el_name == 'Toroid2_Crystal':
# Toroid2_Crystal: drift 31.5m
el.append(srwlib.SRWLOptD(
_L=v.op_Toroid2_Crystal_L,
))
pp.append(v.op_Toroid2_Crystal_pp)
elif el_name == 'Crystal':
# Crystal: crystal 32.0m
crystal = srwlib.SRWLOptCryst(
_d_sp=v.op_Crystal_d_sp,
_psi0r=v.op_Crystal_psi0r,
_psi0i=v.op_Crystal_psi0i,
_psi_hr=v.op_Crystal_psiHr,
_psi_hi=v.op_Crystal_psiHi,
_psi_hbr=v.op_Crystal_psiHBr,
_psi_hbi=v.op_Crystal_psiHBi,
_tc=v.op_Crystal_tc,
_ang_as=v.op_Crystal_ang_as,
_nvx=v.op_Crystal_nvx,
_nvy=v.op_Crystal_nvy,
_nvz=v.op_Crystal_nvz,
_tvx=v.op_Crystal_tvx,
_tvy=v.op_Crystal_tvy,
_uc=v.op_Crystal_uc,
_e_avg=v.op_Crystal_energy,
_ang_roll=v.op_Crystal_diffractionAngle
)
el.append(crystal)
pp.append(v.op_Crystal_pp)
elif el_name == 'Crystal_Crystal2':
# Crystal_Crystal2: drift 32.0m
el.append(srwlib.SRWLOptD(
_L=v.op_Crystal_Crystal2_L,
))
pp.append(v.op_Crystal_Crystal2_pp)
elif el_name == 'Crystal2':
# Crystal2: crystal 32.5m
crystal = srwlib.SRWLOptCryst(
_d_sp=v.op_Crystal2_d_sp,
_psi0r=v.op_Crystal2_psi0r,
_psi0i=v.op_Crystal2_psi0i,
_psi_hr=v.op_Crystal2_psiHr,
_psi_hi=v.op_Crystal2_psiHi,
_psi_hbr=v.op_Crystal2_psiHBr,
_psi_hbi=v.op_Crystal2_psiHBi,
_tc=v.op_Crystal2_tc,
_ang_as=v.op_Crystal2_ang_as,
_nvx=v.op_Crystal2_nvx,
_nvy=v.op_Crystal2_nvy,
_nvz=v.op_Crystal2_nvz,
_tvx=v.op_Crystal2_tvx,
_tvy=v.op_Crystal2_tvy,
_uc=v.op_Crystal2_uc,
_e_avg=v.op_Crystal2_energy,
_ang_roll=v.op_Crystal2_diffractionAngle
)
el.append(crystal)
pp.append(v.op_Crystal2_pp)
mirror_file = v.op_Crystal2_hfn
assert os.path.isfile(mirror_file), \
'Missing input file {}, required by Crystal2 beamline element'.format(mirror_file)
el.append(srwlib.srwl_opt_setup_surf_height_1d(
srwlib.srwl_uti_read_data_cols(mirror_file, "\t", 0, 1),
_dim=v.op_Crystal2_dim,
_ang=abs(v.op_Crystal2_ang),
_amp_coef=v.op_Crystal2_amp_coef,
))
pp.append([0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0])
elif el_name == 'Crystal2_Grating':
# Crystal2_Grating: drift 32.5m
el.append(srwlib.SRWLOptD(
_L=v.op_Crystal2_Grating_L,
))
pp.append(v.op_Crystal2_Grating_pp)
elif el_name == 'Grating':
# Grating: grating 33.0m
mirror = srwlib.SRWLOptMirPl(
_size_tang=v.op_Grating_size_tang,
_size_sag=v.op_Grating_size_sag,
_nvx=v.op_Grating_nvx,
_nvy=v.op_Grating_nvy,
_nvz=v.op_Grating_nvz,
_tvx=v.op_Grating_tvx,
_tvy=v.op_Grating_tvy,
_x=v.op_Grating_x,
_y=v.op_Grating_y,
)
opEl=srwlib.SRWLOptG(
_mirSub=mirror,
_m=v.op_Grating_m,
_grDen=v.op_Grating_grDen,
_grDen1=v.op_Grating_grDen1,
_grDen2=v.op_Grating_grDen2,
_grDen3=v.op_Grating_grDen3,
_grDen4=v.op_Grating_grDen4,
_e_avg=v.op_Grating_e_avg,
_cff=v.op_Grating_cff,
_ang_graz=v.op_Grating_ang,
_ang_roll=v.op_Grating_rollAngle,
)
el.append(opEl)
pp.append(v.op_Grating_pp)
elif el_name == 'Grating_Watchpoint':
# Grating_Watchpoint: drift 33.0m
el.append(srwlib.SRWLOptD(
_L=v.op_Grating_Watchpoint_L,
))
pp.append(v.op_Grating_Watchpoint_pp)
elif el_name == 'Watchpoint':
# Watchpoint: watch 34.0m
pass
pp.append(v.op_fin_pp)
return srwlib.SRWLOptC(el, pp)
varParam = [
['name', 's', 'All optical elements', 'simulation name'],
#---Data Folder
['fdir', 's', '', 'folder (directory) name for reading-in input and saving output data files'],
#---Electron Beam
['ebm_nm', 's', '', 'standard electron beam name'],
['ebm_nms', 's', '', 'standard electron beam name suffix: e.g. can be Day1, Final'],
['ebm_i', 'f', 0.5, 'electron beam current [A]'],
['ebm_e', 'f', 3.0, 'electron beam avarage energy [GeV]'],
['ebm_de', 'f', 0.0, 'electron beam average energy deviation [GeV]'],
['ebm_x', 'f', 0.0, 'electron beam initial average horizontal position [m]'],
['ebm_y', 'f', 0.0, 'electron beam initial average vertical position [m]'],
['ebm_xp', 'f', 0.0, 'electron beam initial average horizontal angle [rad]'],
['ebm_yp', 'f', 0.0, 'electron beam initial average vertical angle [rad]'],
['ebm_z', 'f', 0., 'electron beam initial average longitudinal position [m]'],
['ebm_dr', 'f', -1.54, 'electron beam longitudinal drift [m] to be performed before a required calculation'],
['ebm_ens', 'f', 0.00089, 'electron beam relative energy spread'],
['ebm_emx', 'f', 5.500000000000001e-10, 'electron beam horizontal emittance [m]'],
['ebm_emy', 'f', 8e-12, 'electron beam vertical emittance [m]'],
# Definition of the beam through Twiss:
['ebm_betax', 'f', 2.02, 'horizontal beta-function [m]'],
['ebm_betay', 'f', 1.06, 'vertical beta-function [m]'],
['ebm_alphax', 'f', 0.0, 'horizontal alpha-function [rad]'],
['ebm_alphay', 'f', 0.0, 'vertical alpha-function [rad]'],
['ebm_etax', 'f', 0.0, 'horizontal dispersion function [m]'],
['ebm_etay', 'f', 0.0, 'vertical dispersion function [m]'],
['ebm_etaxp', 'f', 0.0, 'horizontal dispersion function derivative [rad]'],
['ebm_etayp', 'f', 0.0, 'vertical dispersion function derivative [rad]'],
#---Undulator
['und_bx', 'f', 0.0, 'undulator horizontal peak magnetic field [T]'],
['und_by', 'f', 0.88770981, 'undulator vertical peak magnetic field [T]'],
['und_phx', 'f', 0.0, 'initial phase of the horizontal magnetic field [rad]'],
['und_phy', 'f', 0.0, 'initial phase of the vertical magnetic field [rad]'],
['und_b2e', '', '', 'estimate undulator fundamental photon energy (in [eV]) for the amplitude of sinusoidal magnetic field defined by und_b or und_bx, und_by', 'store_true'],
['und_e2b', '', '', 'estimate undulator field amplitude (in [T]) for the photon energy defined by w_e', 'store_true'],
['und_per', 'f', 0.02, 'undulator period [m]'],
['und_len', 'f', 3.0, 'undulator length [m]'],
['und_zc', 'f', 0.0, 'undulator center longitudinal position [m]'],
['und_sx', 'i', 1, 'undulator horizontal magnetic field symmetry vs longitudinal position'],
['und_sy', 'i', -1, 'undulator vertical magnetic field symmetry vs longitudinal position'],
['und_g', 'f', 6.72, 'undulator gap [mm] (assumes availability of magnetic measurement or simulation data)'],
['und_ph', 'f', 0.0, 'shift of magnet arrays [mm] for which the field should be set up'],
['und_mdir', 's', '', 'name of magnetic measurements sub-folder'],
['und_mfs', 's', '', 'name of magnetic measurements for different gaps summary file'],
#---Calculation Types
# Electron Trajectory
['tr', '', '', 'calculate electron trajectory', 'store_true'],
['tr_cti', 'f', 0.0, 'initial time moment (c*t) for electron trajectory calculation [m]'],
['tr_ctf', 'f', 0.0, 'final time moment (c*t) for electron trajectory calculation [m]'],
['tr_np', 'f', 10000, 'number of points for trajectory calculation'],
['tr_mag', 'i', 1, 'magnetic field to be used for trajectory calculation: 1- approximate, 2- accurate'],
['tr_fn', 's', 'res_trj.dat', 'file name for saving calculated trajectory data'],
['tr_pl', 's', '', 'plot the resulting trajectiry in graph(s): ""- dont plot, otherwise the string should list the trajectory components to plot'],
#Single-Electron Spectrum vs Photon Energy
['ss', '', '', 'calculate single-e spectrum vs photon energy', 'store_true'],
['ss_ei', 'f', 100.0, 'initial photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ef', 'f', 20000.0, 'final photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ne', 'i', 10000, 'number of points vs photon energy for single-e spectrum vs photon energy calculation'],
['ss_x', 'f', 0.0, 'horizontal position [m] for single-e spectrum vs photon energy calculation'],
['ss_y', 'f', 0.0, 'vertical position [m] for single-e spectrum vs photon energy calculation'],
['ss_meth', 'i', 1, 'method to use for single-e spectrum vs photon energy calculation: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['ss_prec', 'f', 0.01, 'relative precision for single-e spectrum vs photon energy calculation (nominal value is 0.01)'],
['ss_pol', 'i', 6, 'polarization component to extract after spectrum vs photon energy calculation: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['ss_mag', 'i', 1, 'magnetic field to be used for single-e spectrum vs photon energy calculation: 1- approximate, 2- accurate'],
['ss_ft', 's', 'f', 'presentation/domain: "f"- frequency (photon energy), "t"- time'],
['ss_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['ss_fn', 's', 'res_spec_se.dat', 'file name for saving calculated single-e spectrum vs photon energy'],
['ss_pl', 's', '', 'plot the resulting single-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#Multi-Electron Spectrum vs Photon Energy (taking into account e-beam emittance, energy spread and collection aperture size)
['sm', '', '', 'calculate multi-e spectrum vs photon energy', 'store_true'],
['sm_ei', 'f', 100.0, 'initial photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ef', 'f', 20000.0, 'final photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ne', 'i', 10000, 'number of points vs photon energy for multi-e spectrum vs photon energy calculation'],
['sm_x', 'f', 0.0, 'horizontal center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_rx', 'f', 0.001, 'range of horizontal position / horizontal aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_nx', 'i', 1, 'number of points vs horizontal position for multi-e spectrum vs photon energy calculation'],
['sm_y', 'f', 0.0, 'vertical center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_ry', 'f', 0.001, 'range of vertical position / vertical aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_ny', 'i', 1, 'number of points vs vertical position for multi-e spectrum vs photon energy calculation'],
['sm_mag', 'i', 1, 'magnetic field to be used for calculation of multi-e spectrum spectrum or intensity distribution: 1- approximate, 2- accurate'],
['sm_hi', 'i', 1, 'initial UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_hf', 'i', 15, 'final UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_prl', 'f', 1.0, 'longitudinal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_pra', 'f', 1.0, 'azimuthal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_meth', 'i', -1, 'method to use for spectrum vs photon energy calculation in case of arbitrary input magnetic field: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler", -1- dont use this accurate integration method (rather use approximate if possible)'],
['sm_prec', 'f', 0.01, 'relative precision for spectrum vs photon energy calculation in case of arbitrary input magnetic field (nominal value is 0.01)'],
['sm_nm', 'i', 1, 'number of macro-electrons for calculation of spectrum in case of arbitrary input magnetic field'],
['sm_na', 'i', 5, 'number of macro-electrons to average on each node at parallel (MPI-based) calculation of spectrum in case of arbitrary input magnetic field'],
['sm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons) for intermediate intensity at calculation of multi-electron spectrum in case of arbitrary input magnetic field'],
['sm_type', 'i', 1, 'calculate flux (=1) or flux per unit surface (=2)'],
['sm_pol', 'i', 6, 'polarization component to extract after calculation of multi-e flux or intensity: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['sm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['sm_fn', 's', 'res_spec_me.dat', 'file name for saving calculated milti-e spectrum vs photon energy'],
['sm_pl', 's', '', 'plot the resulting spectrum-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#to add options for the multi-e calculation from "accurate" magnetic field
#Power Density Distribution vs horizontal and vertical position
['pw', '', '', 'calculate SR power density distribution', 'store_true'],
['pw_x', 'f', 0.0, 'central horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_rx', 'f', 0.015, 'range of horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_nx', 'i', 100, 'number of points vs horizontal position for calculation of power density distribution'],
['pw_y', 'f', 0.0, 'central vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ry', 'f', 0.015, 'range of vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ny', 'i', 100, 'number of points vs vertical position for calculation of power density distribution'],
['pw_pr', 'f', 1.0, 'precision factor for calculation of power density distribution'],
['pw_meth', 'i', 1, 'power density computation method (1- "near field", 2- "far field")'],
['pw_zst', 'f', 0., 'initial longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_zfi', 'f', 0., 'final longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_mag', 'i', 1, 'magnetic field to be used for power density calculation: 1- approximate, 2- accurate'],
['pw_fn', 's', 'res_pow.dat', 'file name for saving calculated power density distribution'],
['pw_pl', 's', '', 'plot the resulting power density distribution in a graph: ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
#Single-Electron Intensity distribution vs horizontal and vertical position
['si', '', '', 'calculate single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position', 'store_true'],
#Single-Electron Wavefront Propagation
['ws', '', '', 'calculate single-electron (/ fully coherent) wavefront propagation', 'store_true'],
#Multi-Electron (partially-coherent) Wavefront Propagation
['wm', '', '', 'calculate multi-electron (/ partially coherent) wavefront propagation', 'store_true'],
['w_e', 'f', 9000.0, 'photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ef', 'f', -1.0, 'final photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ne', 'i', 1, 'number of points vs photon energy for calculation of intensity distribution'],
['w_x', 'f', 0.0, 'central horizontal position [m] for calculation of intensity distribution'],
['w_rx', 'f', 0.0004, 'range of horizontal position [m] for calculation of intensity distribution'],
['w_nx', 'i', 100, 'number of points vs horizontal position for calculation of intensity distribution'],
['w_y', 'f', 0.0, 'central vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ry', 'f', 0.0006, 'range of vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ny', 'i', 100, 'number of points vs vertical position for calculation of intensity distribution'],
['w_smpf', 'f', 1.0, 'sampling factor for calculation of intensity distribution vs horizontal and vertical position'],
['w_meth', 'i', 1, 'method to use for calculation of intensity distribution vs horizontal and vertical position: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['w_prec', 'f', 0.01, 'relative precision for calculation of intensity distribution vs horizontal and vertical position'],
['w_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['si_pol', 'i', 6, 'polarization component to extract after calculation of intensity distribution: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['si_type', 'i', 0, 'type of a characteristic to be extracted after calculation of intensity distribution: 0- Single-Electron Intensity, 1- Multi-Electron Intensity, 2- Single-Electron Flux, 3- Multi-Electron Flux, 4- Single-Electron Radiation Phase, 5- Re(E): Real part of Single-Electron Electric Field, 6- Im(E): Imaginary part of Single-Electron Electric Field, 7- Single-Electron Intensity, integrated over Time or Photon Energy'],
['w_mag', 'i', 1, 'magnetic field to be used for calculation of intensity distribution vs horizontal and vertical position: 1- approximate, 2- accurate'],
['si_fn', 's', 'res_int_se.dat', 'file name for saving calculated single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position'],
['si_pl', 's', '', 'plot the input intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['ws_fni', 's', 'res_int_pr_se.dat', 'file name for saving propagated single-e intensity distribution vs horizontal and vertical position'],
['ws_pl', 's', '', 'plot the resulting intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['wm_nm', 'i', 100000, 'number of macro-electrons (coherent wavefronts) for calculation of multi-electron wavefront propagation'],
['wm_na', 'i', 5, 'number of macro-electrons (coherent wavefronts) to average on each node for parallel (MPI-based) calculation of multi-electron wavefront propagation'],
['wm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons / coherent wavefronts) for intermediate intensity at multi-electron wavefront propagation calculation'],
['wm_ch', 'i', 0, 'type of a characteristic to be extracted after calculation of multi-electron wavefront propagation: #0- intensity (s0); 1- four Stokes components; 2- mutual intensity cut vs x; 3- mutual intensity cut vs y; 40- intensity(s0), mutual intensity cuts and degree of coherence vs X & Y'],
['wm_ap', 'i', 0, 'switch specifying representation of the resulting Stokes parameters: coordinate (0) or angular (1)'],
['wm_x0', 'f', 0.0, 'horizontal center position for mutual intensity cut calculation'],
['wm_y0', 'f', 0.0, 'vertical center position for mutual intensity cut calculation'],
['wm_ei', 'i', 0, 'integration over photon energy is required (1) or not (0); if the integration is required, the limits are taken from w_e, w_ef'],
['wm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['wm_am', 'i', 0, 'multi-electron integration approximation method: 0- no approximation (use the standard 5D integration method), 1- integrate numerically only over e-beam energy spread and use convolution to treat transverse emittance'],
['wm_fni', 's', 'res_int_pr_me.dat', 'file name for saving propagated multi-e intensity distribution vs horizontal and vertical position'],
['wm_fbk', '', '', 'create backup file(s) with propagated multi-e intensity distribution vs horizontal and vertical position and other radiation characteristics', 'store_true'],
#to add options
['op_r', 'f', 20.0, 'longitudinal position of the first optical element [m]'],
# Former appParam:
['rs_type', 's', 'u', 'source type, (u) idealized undulator, (t), tabulated undulator, (m) multipole, (g) gaussian beam'],
#---Beamline optics:
# Lens: lens
['op_Lens_Fx', 'f', 3.0, 'horizontalFocalLength'],
['op_Lens_Fy', 'f', 1e+23, 'verticalFocalLength'],
['op_Lens_x', 'f', 0.0, 'horizontalOffset'],
['op_Lens_y', 'f', 0.0, 'verticalOffset'],
# Lens_CRL: drift
['op_Lens_CRL_L', 'f', 1.0, 'length'],
# CRL: crl
['op_CRL_foc_plane', 'f', 2, 'focalPlane'],
['op_CRL_delta', 'f', 4.20757e-06, 'refractiveIndex'],
['op_CRL_atten_len', 'f', 0.007313, 'attenuationLength'],
['op_CRL_shape', 'f', 1, 'shape'],
['op_CRL_apert_h', 'f', 0.001, 'horizontalApertureSize'],
['op_CRL_apert_v', 'f', 0.001, 'verticalApertureSize'],
['op_CRL_r_min', 'f', 0.0015, 'tipRadius'],
['op_CRL_wall_thick', 'f', 8e-05, 'tipWallThickness'],
['op_CRL_x', 'f', 0.0, 'horizontalOffset'],
['op_CRL_y', 'f', 0.0, 'verticalOffset'],
['op_CRL_n', 'i', 3, 'numberOfLenses'],
# CRL_Zone_Plate: drift
['op_CRL_Zone_Plate_L', 'f', 1.0, 'length'],
# Zone_Plate: zonePlate
['op_Zone_Plate_rn', 'f', 0.0001, 'outerRadius'],
['op_Zone_Plate_thick', 'f', 1e-05, 'thickness'],
['op_Zone_Plate_delta1', 'f', 1e-06, 'mainRefractiveIndex'],
['op_Zone_Plate_atLen1', 'f', 0.1, 'mainAttenuationLength'],
['op_Zone_Plate_delta2', 'f', 0.0, 'complementaryRefractiveIndex'],
['op_Zone_Plate_atLen2', 'f', 1e-06, 'complementaryAttenuationLength'],
['op_Zone_Plate_x', 'f', 0.0, 'horizontalOffset'],
['op_Zone_Plate_y', 'f', 0.0, 'verticalOffset'],
['op_Zone_Plate_nZones', 'i', 100, 'numberOfZones'],
# Zone_Plate_Fiber: drift
['op_Zone_Plate_Fiber_L', 'f', 1.0, 'length'],
# Fiber: fiber
['op_Fiber_foc_plane', 'f', 1, 'focalPlane'],
['op_Fiber_delta_ext', 'f', 4.207568e-06, 'externalRefractiveIndex'],
['op_Fiber_delta_core', 'f', 4.207568e-06, 'coreRefractiveIndex'],
['op_Fiber_atten_len_ext', 'f', 0.007313, 'externalAttenuationLength'],
['op_Fiber_atten_len_core', 'f', 0.007313, 'coreAttenuationLength'],
['op_Fiber_externalDiameter', 'f', 0.0001, 'externalDiameter'],
['op_Fiber_diam_core', 'f', 1e-05, 'coreDiameter'],
['op_Fiber_xc', 'f', 0.0, 'horizontalCenterPosition'],
['op_Fiber_yc', 'f', 0.0, 'verticalCenterPosition'],
# Fiber_Aperture: drift
['op_Fiber_Aperture_L', 'f', 1.0, 'length'],
# Aperture: aperture
['op_Aperture_shape', 's', 'r', 'shape'],
['op_Aperture_Dx', 'f', 0.001, 'horizontalSize'],
['op_Aperture_Dy', 'f', 0.001, 'verticalSize'],
['op_Aperture_x', 'f', 0.0, 'horizontalOffset'],
['op_Aperture_y', 'f', 0.0, 'verticalOffset'],
# Aperture_Obstacle: drift
['op_Aperture_Obstacle_L', 'f', 1.0, 'length'],
# Obstacle: obstacle
['op_Obstacle_shape', 's', 'r', 'shape'],
['op_Obstacle_Dx', 'f', 0.0005, 'horizontalSize'],
['op_Obstacle_Dy', 'f', 0.0005, 'verticalSize'],
['op_Obstacle_x', 'f', 0.0, 'horizontalOffset'],
['op_Obstacle_y', 'f', 0.0, 'verticalOffset'],
# Obstacle_Mask: drift
['op_Obstacle_Mask_L', 'f', 1.0, 'length'],
# Mask: mask
['op_Mask_delta', 'f', 1.0, 'refractiveIndex'],
['op_Mask_atten_len', 'f', 1.0, 'attenuationLength'],
['op_Mask_thick', 'f', 1.0, 'maskThickness'],
['op_Mask_grid_sh', 'f', 0, 'gridShape'],
['op_Mask_grid_dx', 'f', 5e-06, 'horizontalGridDimension'],
['op_Mask_grid_dy', 'f', 5e-06, 'verticalGridDimension'],
['op_Mask_pitch_x', 'f', 2e-05, 'horizontalGridPitch'],
['op_Mask_pitch_y', 'f', 2e-05, 'verticalGridPitch'],
['op_Mask_gridTiltAngle', 'f', 0.4363323129985824, 'gridTiltAngle'],
['op_Mask_hx', 'f', 7.319999999999999e-07, 'horizontalSamplingInterval'],
['op_Mask_hy', 'f', 7.319999999999999e-07, 'verticalSamplingInterval'],
['op_Mask_mask_x0', 'f', 0.0, 'horizontalMaskCoordinate'],
['op_Mask_mask_y0', 'f', 0.0, 'verticalMaskCoordinate'],
['op_Mask_mask_Nx', 'i', 1024, 'horizontalPixelsNumber'],
['op_Mask_mask_Ny', 'i', 1024, 'verticalPixelsNumber'],
['op_Mask_grid_nx', 'i', 21, 'horizontalGridsNumber'],
['op_Mask_grid_ny', 'i', 21, 'verticalGridsNumber'],
# Mask_Sample: drift
['op_Mask_Sample_L', 'f', 1.0, 'length'],
# Sample: sample
['op_Sample_file_path', 's', 'sample.tif', 'imageFile'],
['op_Sample_outputImageFormat', 's', 'tif', 'outputImageFormat'],
['op_Sample_position', 'f', 27.0, 'position'],
['op_Sample_resolution', 'f', 2.480469e-09, 'resolution'],
['op_Sample_thick', 'f', 1e-05, 'thickness'],
['op_Sample_delta', 'f', 3.738856e-05, 'refractiveIndex'],
['op_Sample_atten_len', 'f', 3.38902e-06, 'attenuationLength'],
['op_Sample_xc', 'f', 0.0, 'horizontalCenterCoordinate'],
['op_Sample_yc', 'f', 0.0, 'verticalCenterCoordinate'],
['op_Sample_rotateAngle', 'f', 0.0, 'rotateAngle'],
['op_Sample_cutoffBackgroundNoise', 'f', 0.5, 'cutoffBackgroundNoise'],
['op_Sample_rx', 'f', 1e-05, 'rx'],
['op_Sample_ry', 'f', 1e-05, 'ry'],
['op_Sample_dens', 'f', 20000000.0, 'dens'],
['op_Sample_r_min_bw_obj', 'f', 1e-09, 'r_min_bw_obj'],
['op_Sample_edge_frac', 'f', 0.02, 'edge_frac'],
['op_Sample_obj_size_min', 'f', 1e-07, 'obj_size_min'],
['op_Sample_ang_min', 'f', 0.0, 'ang_min'],
['op_Sample_obj_size_max', 'f', 1.2e-07, 'obj_size_max'],
['op_Sample_ang_max', 'f', 45.0, 'ang_max'],
['op_Sample_obj_size_ratio', 'f', 0.5, 'obj_size_ratio'],
['op_Sample_cropArea', 'i', 1, 'cropArea'],
['op_Sample_extTransm', 'i', 1, 'transmissionImage'],
['op_Sample_areaXStart', 'i', 0, 'areaXStart'],
['op_Sample_areaXEnd', 'i', 1280, 'areaXEnd'],
['op_Sample_areaYStart', 'i', 0, 'areaYStart'],
['op_Sample_areaYEnd', 'i', 834, 'areaYEnd'],
['op_Sample_rotateReshape', 'i', 0, 'rotateReshape'],
['op_Sample_backgroundColor', 'i', 0, 'backgroundColor'],
['op_Sample_tileImage', 'i', 0, 'tileImage'],
['op_Sample_tileRows', 'i', 1, 'tileRows'],
['op_Sample_tileColumns', 'i', 1, 'tileColumns'],
['op_Sample_shiftX', 'i', 0, 'shiftX'],
['op_Sample_shiftY', 'i', 0, 'shiftY'],
['op_Sample_invert', 'i', 0, 'invert'],
['op_Sample_nx', 'i', 1001, 'nx'],
['op_Sample_ny', 'i', 1001, 'ny'],
['op_Sample_obj_type', 'i', 1, 'obj_type'],
['op_Sample_size_dist', 'i', 1, 'size_dist'],
['op_Sample_ang_dist', 'i', 1, 'ang_dist'],
['op_Sample_rand_alg', 'i', 1, 'rand_alg'],
['op_Sample_poly_sides', 'i', 6, 'poly_sides'],
['op_Sample_rand_shapes', 'i', [1, 2, 3, 4], 'rand_shapes'],
['op_Sample_rand_obj_size', 'b', False, 'rand_obj_size'],
['op_Sample_rand_poly_side', 'b', False, 'rand_poly_side'],
# Sample_Planar: drift
['op_Sample_Planar_L', 'f', 1.0, 'length'],
# Planar: mirror
['op_Planar_hfn', 's', 'mirror_1d.dat', 'heightProfileFile'],
['op_Planar_dim', 's', 'x', 'orientation'],
['op_Planar_ang', 'f', 0.0031415926, 'grazingAngle'],
['op_Planar_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_Planar_size_x', 'f', 0.001, 'horizontalTransverseSize'],
['op_Planar_size_y', 'f', 0.001, 'verticalTransverseSize'],
# Planar_Circular_Cylinder: drift
['op_Planar_Circular_Cylinder_L', 'f', 1.0, 'length'],
# Circular_Cylinder: sphericalMirror
['op_Circular_Cylinder_hfn', 's', '', 'heightProfileFile'],
['op_Circular_Cylinder_dim', 's', 'x', 'orientation'],
['op_Circular_Cylinder_r', 'f', 1049.0, 'radius'],
['op_Circular_Cylinder_size_tang', 'f', 0.3, 'tangentialSize'],
['op_Circular_Cylinder_size_sag', 'f', 0.11, 'sagittalSize'],
['op_Circular_Cylinder_ang', 'f', 0.0031415926, 'grazingAngle'],
['op_Circular_Cylinder_nvx', 'f', 0.9999950652020265, 'normalVectorX'],
['op_Circular_Cylinder_nvy', 'f', 0.0, 'normalVectorY'],
['op_Circular_Cylinder_nvz', 'f', -0.003141587432290035, 'normalVectorZ'],
['op_Circular_Cylinder_tvx', 'f', 0.003141587432290035, 'tangentialVectorX'],
['op_Circular_Cylinder_tvy', 'f', 0.0, 'tangentialVectorY'],
['op_Circular_Cylinder_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_Circular_Cylinder_x', 'f', 0.0, 'horizontalOffset'],
['op_Circular_Cylinder_y', 'f', 0.0, 'verticalOffset'],
# Circular_Cylinder_Circular_Cylinder2: drift
['op_Circular_Cylinder_Circular_Cylinder2_L', 'f', 0.5, 'length'],
# Circular_Cylinder2: sphericalMirror
['op_Circular_Cylinder2_hfn', 's', 'mirror_1d.dat', 'heightProfileFile'],
['op_Circular_Cylinder2_dim', 's', 'x', 'orientation'],
['op_Circular_Cylinder2_r', 'f', 1049.0, 'radius'],
['op_Circular_Cylinder2_size_tang', 'f', 0.3, 'tangentialSize'],
['op_Circular_Cylinder2_size_sag', 'f', 0.11, 'sagittalSize'],
['op_Circular_Cylinder2_ang', 'f', 0.0031415926, 'grazingAngle'],
['op_Circular_Cylinder2_nvx', 'f', 0.9999950652020265, 'normalVectorX'],
['op_Circular_Cylinder2_nvy', 'f', 0.0, 'normalVectorY'],
['op_Circular_Cylinder2_nvz', 'f', -0.003141587432290035, 'normalVectorZ'],
['op_Circular_Cylinder2_tvx', 'f', 0.003141587432290035, 'tangentialVectorX'],
['op_Circular_Cylinder2_tvy', 'f', 0.0, 'tangentialVectorY'],
['op_Circular_Cylinder2_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_Circular_Cylinder2_x', 'f', 0.0, 'horizontalOffset'],
['op_Circular_Cylinder2_y', 'f', 0.0, 'verticalOffset'],
# Circular_Cylinder2_Elliptical_Cylinder: drift
['op_Circular_Cylinder2_Elliptical_Cylinder_L', 'f', 0.5, 'length'],
# Elliptical_Cylinder: ellipsoidMirror
['op_Elliptical_Cylinder_hfn', 's', '', 'heightProfileFile'],
['op_Elliptical_Cylinder_dim', 's', 'x', 'orientation'],
['op_Elliptical_Cylinder_p', 'f', 30.0, 'firstFocusLength'],
['op_Elliptical_Cylinder_q', 'f', 1.7, 'focalLength'],
['op_Elliptical_Cylinder_ang', 'f', 0.0036, 'grazingAngle'],
['op_Elliptical_Cylinder_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_Elliptical_Cylinder_size_tang', 'f', 0.5, 'tangentialSize'],
['op_Elliptical_Cylinder_size_sag', 'f', 0.01, 'sagittalSize'],
['op_Elliptical_Cylinder_nvx', 'f', 0.9999935200069984, 'normalVectorX'],
['op_Elliptical_Cylinder_nvy', 'f', 0.0, 'normalVectorY'],
['op_Elliptical_Cylinder_nvz', 'f', -0.0035999922240050387, 'normalVectorZ'],
['op_Elliptical_Cylinder_tvx', 'f', -0.0035999922240050387, 'tangentialVectorX'],
['op_Elliptical_Cylinder_tvy', 'f', 0.0, 'tangentialVectorY'],
['op_Elliptical_Cylinder_x', 'f', 0.0, 'horizontalOffset'],
['op_Elliptical_Cylinder_y', 'f', 0.0, 'verticalOffset'],
# Elliptical_Cylinder_Elliptical_Cylinder2: drift
['op_Elliptical_Cylinder_Elliptical_Cylinder2_L', 'f', 0.5, 'length'],
# Elliptical_Cylinder2: ellipsoidMirror
['op_Elliptical_Cylinder2_hfn', 's', 'mirror_2d.dat', 'heightProfileFile'],
['op_Elliptical_Cylinder2_dim', 's', 'x', 'orientation'],
['op_Elliptical_Cylinder2_p', 'f', 35.0, 'firstFocusLength'],
['op_Elliptical_Cylinder2_q', 'f', 1.7, 'focalLength'],
['op_Elliptical_Cylinder2_ang', 'f', 0.0036, 'grazingAngle'],
['op_Elliptical_Cylinder2_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_Elliptical_Cylinder2_size_tang', 'f', 0.5, 'tangentialSize'],
['op_Elliptical_Cylinder2_size_sag', 'f', 0.01, 'sagittalSize'],
['op_Elliptical_Cylinder2_nvx', 'f', 0.9999935200069984, 'normalVectorX'],
['op_Elliptical_Cylinder2_nvy', 'f', 0.0, 'normalVectorY'],
['op_Elliptical_Cylinder2_nvz', 'f', -0.0035999922240050387, 'normalVectorZ'],
['op_Elliptical_Cylinder2_tvx', 'f', -0.0035999922240050387, 'tangentialVectorX'],
['op_Elliptical_Cylinder2_tvy', 'f', 0.0, 'tangentialVectorY'],
['op_Elliptical_Cylinder2_x', 'f', 0.0, 'horizontalOffset'],
['op_Elliptical_Cylinder2_y', 'f', 0.0, 'verticalOffset'],
# Elliptical_Cylinder2_Toroid: drift
['op_Elliptical_Cylinder2_Toroid_L', 'f', 0.5, 'length'],
# Toroid: toroidalMirror
['op_Toroid_hfn', 's', '', 'heightProfileFile'],
['op_Toroid_dim', 's', 'x', 'orientation'],
['op_Toroid_ap_shape', 's', 'r', 'apertureShape'],
['op_Toroid_rt', 'f', 7592.12, 'tangentialRadius'],
['op_Toroid_rs', 'f', 0.186, 'sagittalRadius'],
['op_Toroid_size_tang', 'f', 0.96, 'tangentialSize'],
['op_Toroid_size_sag', 'f', 0.08, 'sagittalSize'],
['op_Toroid_ang', 'f', 0.007, 'grazingAngle'],
['op_Toroid_horizontalPosition', 'f', 0.0, 'horizontalPosition'],
['op_Toroid_verticalPosition', 'f', 0.0, 'verticalPosition'],
['op_Toroid_nvx', 'f', 0.9999755001000415, 'normalVectorX'],
['op_Toroid_nvy', 'f', 0.0, 'normalVectorY'],
['op_Toroid_nvz', 'f', -0.006999942833473391, 'normalVectorZ'],
['op_Toroid_tvx', 'f', 0.006999942833473391, 'tangentialVectorX'],
['op_Toroid_tvy', 'f', 0.0, 'tangentialVectorY'],
['op_Toroid_amp_coef', 'f', 1.0, 'heightAmplification'],
# Toroid_Toroid2: drift
['op_Toroid_Toroid2_L', 'f', 0.5, 'length'],
# Toroid2: toroidalMirror
['op_Toroid2_hfn', 's', 'mirror2_1d.dat', 'heightProfileFile'],
['op_Toroid2_dim', 's', 'x', 'orientation'],
['op_Toroid2_ap_shape', 's', 'r', 'apertureShape'],
['op_Toroid2_rt', 'f', 7592.12, 'tangentialRadius'],
['op_Toroid2_rs', 'f', 0.186, 'sagittalRadius'],
['op_Toroid2_size_tang', 'f', 0.96, 'tangentialSize'],
['op_Toroid2_size_sag', 'f', 0.08, 'sagittalSize'],
['op_Toroid2_ang', 'f', 0.007, 'grazingAngle'],
['op_Toroid2_horizontalPosition', 'f', 0.0, 'horizontalPosition'],
['op_Toroid2_verticalPosition', 'f', 0.0, 'verticalPosition'],
['op_Toroid2_nvx', 'f', 0.9999755001000415, 'normalVectorX'],
['op_Toroid2_nvy', 'f', 0.0, 'normalVectorY'],
['op_Toroid2_nvz', 'f', -0.006999942833473391, 'normalVectorZ'],
['op_Toroid2_tvx', 'f', 0.006999942833473391, 'tangentialVectorX'],
['op_Toroid2_tvy', 'f', 0.0, 'tangentialVectorY'],
['op_Toroid2_amp_coef', 'f', 1.0, 'heightAmplification'],
# Toroid2_Crystal: drift
['op_Toroid2_Crystal_L', 'f', 0.5, 'length'],
# Crystal: crystal
['op_Crystal_hfn', 's', '', 'heightProfileFile'],
['op_Crystal_dim', 's', 'x', 'orientation'],
['op_Crystal_d_sp', 'f', 3.1355713563754857, 'dSpacing'],
['op_Crystal_psi0r', 'f', -1.2078420054211536e-05, 'psi0r'],
['op_Crystal_psi0i', 'f', 2.2634827546786884e-07, 'psi0i'],
['op_Crystal_psiHr', 'f', -6.385703370530202e-06, 'psiHr'],
['op_Crystal_psiHi', 'f', 1.580304012971715e-07, 'psiHi'],
['op_Crystal_psiHBr', 'f', -6.385703370530202e-06, 'psiHBr'],
['op_Crystal_psiHBi', 'f', 1.580304012971715e-07, 'psiHBi'],
['op_Crystal_tc', 'f', 0.01, 'crystalThickness'],
['op_Crystal_uc', 'f', 1, 'useCase'],
['op_Crystal_ang_as', 'f', 0.0, 'asymmetryAngle'],
['op_Crystal_nvx', 'f', 0.0, 'nvx'],
['op_Crystal_nvy', 'f', 0.9755673185032753, 'nvy'],
['op_Crystal_nvz', 'f', -0.21970072159264525, 'nvz'],
['op_Crystal_tvx', 'f', 0.0, 'tvx'],
['op_Crystal_tvy', 'f', 0.21970072159264525, 'tvy'],
['op_Crystal_ang', 'f', 0.2215076861825632, 'grazingAngle'],
['op_Crystal_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_Crystal_energy', 'f', 9000.0, 'energy'],
['op_Crystal_diffractionAngle', 'f', 0, 'diffractionAngle'],
# Crystal_Crystal2: drift
['op_Crystal_Crystal2_L', 'f', 0.5, 'length'],
# Crystal2: crystal
['op_Crystal2_hfn', 's', 'mirror_1d.dat', 'heightProfileFile'],
['op_Crystal2_dim', 's', 'x', 'orientation'],
['op_Crystal2_d_sp', 'f', 3.1355713563754857, 'dSpacing'],
['op_Crystal2_psi0r', 'f', -1.2078420054211536e-05, 'psi0r'],
['op_Crystal2_psi0i', 'f', 2.2634827546786884e-07, 'psi0i'],
['op_Crystal2_psiHr', 'f', -6.385703370530202e-06, 'psiHr'],
['op_Crystal2_psiHi', 'f', 1.580304012971715e-07, 'psiHi'],
['op_Crystal2_psiHBr', 'f', -6.385703370530202e-06, 'psiHBr'],
['op_Crystal2_psiHBi', 'f', 1.580304012971715e-07, 'psiHBi'],
['op_Crystal2_tc', 'f', 0.01, 'crystalThickness'],
['op_Crystal2_uc', 'f', 1, 'useCase'],
['op_Crystal2_ang_as', 'f', 0.0, 'asymmetryAngle'],
['op_Crystal2_nvx', 'f', 0.0, 'nvx'],
['op_Crystal2_nvy', 'f', 0.9755673185032753, 'nvy'],
['op_Crystal2_nvz', 'f', -0.21970072159264525, 'nvz'],
['op_Crystal2_tvx', 'f', 0.0, 'tvx'],
['op_Crystal2_tvy', 'f', 0.21970072159264525, 'tvy'],
['op_Crystal2_ang', 'f', 0.2215076861825632, 'grazingAngle'],
['op_Crystal2_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_Crystal2_energy', 'f', 9000.0, 'energy'],
['op_Crystal2_diffractionAngle', 'f', 0, 'diffractionAngle'],
# Crystal2_Grating: drift
['op_Crystal2_Grating_L', 'f', 0.5, 'length'],
# Grating: grating
['op_Grating_hfn', 's', '', 'heightProfileFile'],
['op_Grating_dim', 's', 'y', 'orientation'],
['op_Grating_size_tang', 'f', 0.2, 'tangentialSize'],
['op_Grating_size_sag', 'f', 0.015, 'sagittalSize'],
['op_Grating_nvx', 'f', 0.0, 'nvx'],
['op_Grating_nvy', 'f', 0.99991607766, 'nvy'],
['op_Grating_nvz', 'f', -0.012955216595673743, 'nvz'],
['op_Grating_tvx', 'f', 0.0, 'tvx'],
['op_Grating_tvy', 'f', 0.012955216595673743, 'tvy'],
['op_Grating_x', 'f', 0.0, 'horizontalOffset'],
['op_Grating_y', 'f', 0.0, 'verticalOffset'],
['op_Grating_m', 'f', 1.0, 'diffractionOrder'],
['op_Grating_grDen', 'f', 1800.0, 'grooveDensity0'],
['op_Grating_grDen1', 'f', 0.08997, 'grooveDensity1'],
['op_Grating_grDen2', 'f', 3.004e-06, 'grooveDensity2'],
['op_Grating_grDen3', 'f', 9.7e-11, 'grooveDensity3'],
['op_Grating_grDen4', 'f', 0.0, 'grooveDensity4'],
['op_Grating_e_avg', 'f', 9000.0, 'energyAvg'],
['op_Grating_cff', 'f', 1.9885286191675648, 'cff'],
['op_Grating_ang', 'f', 0.0129555790185373, 'grazingAngle'],
['op_Grating_rollAngle', 'f', 0.0, 'rollAngle'],
['op_Grating_outoptvx', 'f', 0.0, 'outoptvx'],
['op_Grating_outoptvy', 'f', 0.038710573855210595, 'outoptvy'],
['op_Grating_outoptvz', 'f', 0.9992504648344179, 'outoptvz'],
['op_Grating_outframevx', 'f', 1.0, 'outframevx'],
['op_Grating_outframevy', 'f', 0.0, 'outframevy'],
['op_Grating_computeParametersFrom', 'f', 2, 'computeParametersFrom'],
['op_Grating_amp_coef', 'f', 0.001, 'heightAmplification'],
# Grating_Watchpoint: drift
['op_Grating_Watchpoint_L', 'f', 1.0, 'length'],
#---Propagation parameters
['op_Lens_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Lens'],
['op_Lens_CRL_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Lens_CRL'],
['op_CRL_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'CRL'],
['op_CRL_Zone_Plate_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'CRL_Zone_Plate'],
['op_Zone_Plate_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Zone_Plate'],
['op_Zone_Plate_Fiber_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Zone_Plate_Fiber'],
['op_Fiber_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Fiber'],
['op_Fiber_Aperture_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Fiber_Aperture'],
['op_Aperture_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Aperture'],
['op_Aperture_Obstacle_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Aperture_Obstacle'],
['op_Obstacle_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Obstacle'],
['op_Obstacle_Mask_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Obstacle_Mask'],
['op_Mask_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Mask'],
['op_Mask_Sample_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Mask_Sample'],
['op_Sample_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Sample'],
['op_Sample_Planar_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Sample_Planar'],
['op_Planar_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Planar'],
['op_Planar_Circular_Cylinder_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Planar_Circular_Cylinder'],
['op_Circular_Cylinder_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Circular_Cylinder'],
['op_Circular_Cylinder_Circular_Cylinder2_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Circular_Cylinder_Circular_Cylinder2'],
['op_Circular_Cylinder2_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Circular_Cylinder2'],
['op_Circular_Cylinder2_Elliptical_Cylinder_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Circular_Cylinder2_Elliptical_Cylinder'],
['op_Elliptical_Cylinder_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Elliptical_Cylinder'],
['op_Elliptical_Cylinder_Elliptical_Cylinder2_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Elliptical_Cylinder_Elliptical_Cylinder2'],
['op_Elliptical_Cylinder2_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Elliptical_Cylinder2'],
['op_Elliptical_Cylinder2_Toroid_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Elliptical_Cylinder2_Toroid'],
['op_Toroid_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Toroid'],
['op_Toroid_Toroid2_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Toroid_Toroid2'],
['op_Toroid2_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Toroid2'],
['op_Toroid2_Crystal_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Toroid2_Crystal'],
['op_Crystal_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Crystal'],
['op_Crystal_Crystal2_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Crystal_Crystal2'],
['op_Crystal2_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Crystal2'],
['op_Crystal2_Grating_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Crystal2_Grating'],
['op_Grating_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Grating'],
['op_Grating_Watchpoint_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Grating_Watchpoint'],
['op_fin_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'final post-propagation (resize) parameters'],
#[ 0]: Auto-Resize (1) or not (0) Before propagation
#[ 1]: Auto-Resize (1) or not (0) After propagation
#[ 2]: Relative Precision for propagation with Auto-Resizing (1. is nominal)
#[ 3]: Allow (1) or not (0) for semi-analytical treatment of the quadratic (leading) phase terms at the propagation
#[ 4]: Do any Resizing on Fourier side, using FFT, (1) or not (0)
#[ 5]: Horizontal Range modification factor at Resizing (1. means no modification)
#[ 6]: Horizontal Resolution modification factor at Resizing
#[ 7]: Vertical Range modification factor at Resizing
#[ 8]: Vertical Resolution modification factor at Resizing
#[ 9]: Type of wavefront Shift before Resizing (not yet implemented)
#[10]: New Horizontal wavefront Center position after Shift (not yet implemented)
#[11]: New Vertical wavefront Center position after Shift (not yet implemented)
#[12]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Horizontal Coordinate
#[13]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Vertical Coordinate
#[14]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Longitudinal Coordinate
#[15]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Horizontal Coordinate
#[16]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Vertical Coordinate
]
def main():
v = srwl_bl.srwl_uti_parse_options(srwl_bl.srwl_uti_ext_options(varParam), use_sys_argv=True)
op = set_optics(v)
v.ss = True
v.ss_pl = 'e'
v.sm = True
v.sm_pl = 'e'
v.pw = True
v.pw_pl = 'xy'
v.si = True
v.si_pl = 'xy'
v.tr = True
v.tr_pl = 'xz'
v.ws = True
v.ws_pl = 'xy'
mag = None
if v.rs_type == 'm':
mag = srwlib.SRWLMagFldC()
mag.arXc.append(0)
mag.arYc.append(0)
mag.arMagFld.append(srwlib.SRWLMagFldM(v.mp_field, v.mp_order, v.mp_distribution, v.mp_len))
mag.arZc.append(v.mp_zc)
srwl_bl.SRWLBeamline(_name=v.name, _mag_approx=mag).calc_all(v, op)
main()
|
mkeilman/sirepo
|
tests/template/srw_generate_data/srw-all-optics.py
|
Python
|
apache-2.0
| 64,879
|
[
"CRYSTAL",
"Gaussian"
] |
bef37b48756734b3a781f8dad3b8217578660a119ea940d8923011382e75025a
|
import pandas as pd
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
import matplotlib.dates as md
import requests
from netCDF4 import Dataset
from matplotlib import path
def tidal_currents(T, a1, a2, alpha):
"""Function estimates a depth averaged velocity
for a simple tidal channel
Inputs:
T = Tide period (hours)
a1 = tidal amplitude ocean side (m), greater than 0
a2 = tidal amplitude estuary side (m), greater than 0
alpha = a phase difference in degrees (º), greater than zero
L = channel length (m)
H = channel depth (m)
Outputs:
u_t = depth average current at several times between 0 and 4T"""
if alpha < 0:
raise ValueError('Alpha must be greater than 0')
if a1 < 0:
raise ValueError('Amplitude 1 must be greater than 0')
if a2 < 0:
raise ValueError('Amplitude 2 must be greater than 0')
g = 9.81
L = 200000
H = 200
alpha = alpha*np.pi/180
# pass period to seconds
T = T*3600
w = 2*np.pi/T
dt = T/100
t = np.arange(0, 4*T, dt)
u_t = g*a1/L*1/w*np.sin(w*t)-g*a2/L*1/w*np.sin(w*t+alpha)
return(u_t, t)
def plot_currents(T, a1, a2, alpha, N):
"""Plots results of analytical currents,
plots a time series of u(t), and a dot
in a specific velocity. Also plots an arrow
showing the direction of the current and its magnitude
Inputs:
T = Tide period (hours)
a1 = tidal amplitude ocean side (m)
a2 = tidal amplitude estuary side (m)
alpha = a phase difference in degrees (º), greater than zero
N = a time position in which to estimate u, must be less than 101"""
# Raise value error if N>400
if N > 400:
raise ValueError('N must be less than 400')
[u, time] = tidal_currents(T, a1, a2, alpha)
abs_u = np.absolute(u)
max_u = np.amax(abs_u)
u_single = u[N]
t_single = time[N]
fig, ax = plt.subplots(2, figsize={10, 4})
# Arrow showing velocity
ax[0].set_ylim([-0.5, 0.5])
ax[0].set_xlim([-max_u-1, max_u+1])
if u_single > 0:
ax[0].arrow(0-u_single/2, 0, u_single, 0,
head_width=0.1, head_length=0.05, fc='g', ec='g')
ax[0].text(0, -0.3, 'Flood', horizontalalignment='center', color='g',
verticalalignment='center', fontsize=14, fontweight='bold')
else:
ax[0].arrow(0-u_single/2, 0, u_single, 0,
head_width=0.1, head_length=0.05, fc='r', ec='r')
ax[0].text(0, -0.3, 'Ebb', horizontalalignment='center', color='r',
verticalalignment='center', fontsize=14, fontweight='bold')
ax[0].text(-max_u, 0.3, 'Ocean', horizontalalignment='center',
verticalalignment='center', fontsize=14, fontweight='bold')
ax[0].text(max_u, 0.3, 'Estuary', horizontalalignment='center',
verticalalignment='center', fontsize=14, fontweight='bold')
ax[0].text(0, 0.45, 'V = ' + str(round(u_single, 1)) + ' m/s',
horizontalalignment='center', verticalalignment='center',
fontsize=14, fontweight='bold')
ax[0].axis('off')
# Time Series
ax[1].plot(time/3600, u, color='blue')
ax[1].plot(t_single/3600, u_single, color='blue',
marker='o', markersize=15)
ax[1].set_xlabel('Time (hours)')
ax[1].set_ylabel('Velocity (m/s)')
ax[1].set_ylim([-2.5, 2.5])
return
def ferry_data_download(URL):
"""Downloads the ADCP data from ferry
Inputs:
URL with the location of the ferry NetCDF file,
must end with .ncml
Outputs:
ferry: An object containing the link to the ferry data
ferry_downloaded: A boolean for file download
explanation: A message"""
explanation = 'File exists'
file_downloaded = True
# Request if the thredds server is working, add .html to URL
req = requests.get(URL + '.html')
if req.status_code == 200:
"""File exists and is good for download, so write file"""
print('File is ok')
explanation = 'Good URL, File downloaded'
file_downloaded = True
ferry = xr.open_dataset(URL)
else:
print('File not found or unavailable')
explanation = ' File not found or unavailable'
file_downloaded = False
ferry = np.nan
return (ferry, file_downloaded, explanation)
def ferry_data_QC(ferry, TH_abs, TH_u, TH_d):
"""Gets the variables and pass a QC to estimate final velocities
Inputs:
ferry: ferry link object as an xarray DataSet
TH_abs: treshold for absolute velocities
TH_u: treshold for true velicities
TH_d: treshold for depth
Outputs:
ferryQC: Quality Controled ferry data in a xarray DataSet"""
if type(ferry) is not xr.core.dataset.Dataset:
raise ValueError('Ferry is not defined')
# QC1: make nan all Absolute velocities that are greater than 6.5 m/s
abs_u = ferry.eastward_absolute_water_velocity.where(
(abs(ferry.eastward_absolute_water_velocity) < TH_abs) &
(abs(ferry.northward_absolute_water_velocity) < TH_abs))
abs_v = ferry.northward_absolute_water_velocity.where(
(abs(ferry.eastward_absolute_water_velocity) < TH_abs) &
(abs(ferry.northward_absolute_water_velocity) < TH_abs))
abs_w = ferry.vertical_absolute_water_velocity.where(
(abs(ferry.eastward_absolute_water_velocity) < TH_abs) &
(abs(ferry.northward_absolute_water_velocity) < TH_abs))
# Get bottom track velocity for reference
# and also clean for TH in abs velocity
east_btv = ferry.eastward_bottom_tracking_velocity.where(
(abs(ferry.eastward_absolute_water_velocity) < TH_abs) &
(abs(ferry.northward_absolute_water_velocity) < TH_abs))
north_btv = ferry.northward_bottom_tracking_velocity.where(
(abs(ferry.eastward_absolute_water_velocity) < TH_abs) &
(abs(ferry.northward_absolute_water_velocity) < TH_abs))
vert_btv = ferry.vertical_bottom_tracking_velocity.where(
(abs(ferry.eastward_absolute_water_velocity) < TH_abs) &
(abs(ferry.northward_absolute_water_velocity) < TH_abs))
# Estimate u_true = abs_u + east_bt_v
u_true = abs_u + east_btv
v_true = abs_v + north_btv
w_true = abs_w + vert_btv
U = np.sqrt(u_true**2 + v_true**2)
# QC2: check that u_true and v_true are less than 4 m/s
u_true = u_true.where((u_true < TH_u) & (v_true < TH_u) & (U < TH_u))
v_true = v_true.where((u_true < TH_u) & (v_true < TH_u) & (U < TH_u))
w_true = w_true.where((u_true) < TH_u & (v_true < TH_u) & (U < TH_u))
U = U.where((u_true) < TH_u & (v_true < TH_u) & (U < TH_u))
# Add true velocity data to the dataset
ferry['u_true'] = u_true
ferry['v_true'] = v_true
ferry['w_true'] = w_true
ferry['Horizontal_speed'] = U
# Remove first 5 rows of depth
ferryQC = ferry.isel(depth=slice(TH_d, None))
goodQC = True
return(ferryQC, goodQC)
def count_route_num(ferryQc):
""" adds a variable to ferryQc dataset to mark the crossing number
INPUTS:
ferryQc - the xarray DataSet object with the ferry data
OUTPUTS:
ferryQc - xarray DataSet object with the ferry data, now with
the added variable, xing_num
"""
# initialize vars
counter = 0
xing_num = np.empty((ferryQc.time.size), dtype=int)
tdiff = np.diff(ferryQc.time.values)
# Loop through all time values. Time gaps greater than 10mins indicate
# ferry docking, and the start of a new transect
for ii in range(1, ferryQc.time.size-1):
if tdiff[ii] > np.timedelta64(10, 'm'):
xing_num[ii] = -9
counter = counter+1
else:
xing_num[ii] = counter
# add to ferry structure
ferryQc['xing_num'] = xr.DataArray(xing_num,
coords=ferryQc.time.indexes,
dims=['time'])
return(ferryQc)
def plt_tide(pt_tide, time_index, start_date, end_date):
"""
plot tidal elevations at port townsend station over a specified
date range with a marker at a location specified by time_index
INPUTS:
pt_tide - pandas DataFrame with the Port Townsend tidal
elevation data
time_index - integer specifying iloc of vertical marker on plot
start_date - string with first date of display range
end_date - string with last date of display range
OUTPUTS: A matplotlib subplot with tidal elevations,
and a vertical marker
at the location specified in time_index
"""
if pt_tide[start_date:end_date].size < time_index:
raise ValueError('time_index out of specified date range')
# sub_selected time for vertical bar, as chosen by time_index
time = pt_tide[start_date:end_date].index[time_index]
# note conversion to meters
plt.plot(pt_tide[start_date:end_date]*0.3048)
max = 3.5
min = -0.5
plt.hold('True')
# plot vertical line at location specified in time
plt.plot([time, time], [min, max])
# Clean up and label axes
plt.ylabel('Elevation [m]')
plt.gca().xaxis.tick_top()
def plt_ferry_and_tide(ferryQc, pt_tide, crossing_index, start_date, end_date):
""" plots Port Townsend tidal elevations and ferry crossing data on two
subplots.
INPUTS
ferryQc: xarray data set with quality controlled ferry data
pt_tide: port townsend tidal elevations as a pandas DataFrame
cross_index: integer of ferry crossing to be displayed
start_date: start of tidal elevation time series to be plotted
end_date: end of tidal elevation time series to be plotted
OUTPUTS
Figure with two subplots
"""
# subsample ferryQc Data
ferry_subsample = ferryQc.sel(time=slice(start_date, end_date))
min_xing = min(ferry_subsample.xing_num
[ferry_subsample.xing_num != -9].values)
max_xing = max(ferry_subsample.xing_num
[ferry_subsample.xing_num != -9].values)
valid_xings = np.arange(min_xing, max_xing, 2)
# check cross_index is in range
if (crossing_index > valid_xings.size):
# print('Invalid Ferry Xing index')
# print(str(valid_xings.size) + ' number of crossings ' +
# 'for the chosen date range')
raise ValueError('Invalid Ferry Xing index')
chosen_xing_num = valid_xings[crossing_index]
# find indexing for both datasets
ferry_time_index = ferry_subsample['time'].to_index()
pt_time_index = pt_tide[start_date:end_date].index
# find crossing that matches time_index
mid_ferry_time = ferry_time_index[np.where(
ferry_subsample.xing_num == chosen_xing_num)]
mid_ferry_time = mid_ferry_time[int(mid_ferry_time.size/2)]
# find numeric index in the pt_tides DataFrame that matches
# the nearest in the ferry data
pt_time_val = pt_time_index.searchsorted(mid_ferry_time)
fig, axes = plt.subplots(nrows=2, ncols=1, figsize={10, 4})
# plot tidal elevations
plt.sca(axes[0])
plt_tide(pt_tide, pt_time_val, start_date, end_date)
plt.gca().xaxis.tick_top()
# Ferry Plots
plt.sca(axes[1])
plt_index = ferry_subsample.xing_num.values == chosen_xing_num
# Check for existing values in array
if not np.all(np.isnan(
ferry_subsample.Horizontal_speed[plt_index].values)):
ferry_subsample.Horizontal_speed[plt_index].plot(x='time', y='depth')
else:
print('All NaN speed values')
# plot bottom track
ferry_subsample.bottom_tracking_depth_beam_1[plt_index].plot()
# format axes
plt.gca().invert_yaxis()
plt.ylabel('Depth [m]')
# plt.clim(0, 3)
# format xticks
xfmt = md.DateFormatter('%m-%d %H:%M')
axes[1].xaxis.set_major_formatter(xfmt)
return
|
mguerrap/tydal
|
tydal/module3_utils.py
|
Python
|
mit
| 12,090
|
[
"NetCDF"
] |
5c7e12fefa81be298f8e38e086cacc7de11490900c08b1bd3b5b59627ebad2b4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.