text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Densitymapbox(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "densitymapbox"
_valid_props = {
"autocolorscale",
"below",
"coloraxis",
"colorbar",
"colorscale",
"customdata",
"customdatasrc",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"lat",
"latsrc",
"legendgroup",
"legendgrouptitle",
"legendrank",
"lon",
"lonsrc",
"meta",
"metasrc",
"name",
"opacity",
"radius",
"radiussrc",
"reversescale",
"showlegend",
"showscale",
"stream",
"subplot",
"text",
"textsrc",
"type",
"uid",
"uirevision",
"visible",
"z",
"zauto",
"zmax",
"zmid",
"zmin",
"zsrc",
}
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# below
# -----
@property
def below(self):
"""
Determines if the densitymapbox trace will be inserted before
the layer with the specified ID. By default, densitymapbox
traces are placed below the first layer of type symbol If set
to '', the layer will be inserted above every existing layer.
The 'below' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["below"]
@below.setter
def below(self, val):
self["below"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorbar
# --------
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.densitymapbox.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Supported dict properties:
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this
number. This only has an effect when
`tickformat` is "SI" or "B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see: h
ttps://github.com/d3/d3-format/tree/v1.4.5#d3-f
ormat. And for dates see:
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two
items to d3's date formatter: "%h" for half of
the year as a decimal number as well as "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.density
mapbox.colorbar.Tickformatstop` instances or
dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.densitymapbox.colorbar.tickformatstopdefaults
), sets the default property values to use for
elements of
densitymapbox.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of
the axis. The default value for inside tick
labels is *hide past domain*. In other cases
the default is *hide past div*.
ticklabelposition
Determines where tick labels are drawn relative
to the ticks. Left and right options are used
when `orientation` is "h", top and bottom when
`orientation` is "v".
ticklabelstep
Sets the spacing between tick labels as
compared to the spacing between ticks. A value
of 1 (default) means each tick gets a label. A
value of 2 means shows every 2nd label. A
larger value n means only every nth tick is
labeled. `tick0` determines which labels are
shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is
"array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for `ticktext`.
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for `tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.densitymapbox.colo
rbar.Title` instance or dict with compatible
properties
titlefont
Deprecated: Please use
densitymapbox.colorbar.title.font instead. Sets
this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
densitymapbox.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Defaults to
"top" when `orientation` if "v" and defaults
to "right" when `orientation` if "h". Note that
the title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction). Defaults to 1.02 when `orientation`
is "v" and 0.5 when `orientation` is "h".
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar. Defaults to "left" when `orientation` is
"v" and "center" when `orientation` is "h".
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction). Defaults to 0.5 when `orientation`
is "v" and 1.02 when `orientation` is "h".
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
Defaults to "middle" when `orientation` is "v"
and "bottom" when `orientation` is "h".
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
plotly.graph_objs.densitymapbox.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. The colorscale must be an array containing
arrays mapping a normalized value to an rgb, rgba, hex, hsl,
hsv, or named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the
bounds of the colorscale in color space, use`zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,RdBu,Reds,Viridis,
YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['lon', 'lat', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'lon+lat')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.densitymapbox.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for `align`.
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for `bgcolor`.
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for `bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for `namelength`.
Returns
-------
plotly.graph_objs.densitymapbox.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. The variables available in `hovertemplate`
are the ones emitted as event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-data.
Additionally, every attributes that can be specified per-point
(the ones that are `arrayOk: true`) are available. Anything
contained in tag `<extra>` is displayed in the secondary box,
for example "<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Sets hover text elements associated with each (lon,lat) pair If
a single string, the same string appears over all the data
points. If an array of string, the items are mapped in order to
the this trace's (lon,lat) coordinates. To be seen, trace
`hoverinfo` must contain a "text" flag.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
# hovertextsrc
# ------------
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertext`.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ids`.
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# lat
# ---
@property
def lat(self):
"""
Sets the latitude coordinates (in degrees North).
The 'lat' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["lat"]
@lat.setter
def lat(self, val):
self["lat"] = val
# latsrc
# ------
@property
def latsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `lat`.
The 'latsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["latsrc"]
@latsrc.setter
def latsrc(self, val):
self["latsrc"] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces part of the same
legend group hide/show at the same time when toggling legend
items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
# legendgrouptitle
# ----------------
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.densitymapbox.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Supported dict properties:
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
plotly.graph_objs.densitymapbox.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
# legendrank
# ----------
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
`*reversed* `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
# lon
# ---
@property
def lon(self):
"""
Sets the longitude coordinates (in degrees East).
The 'lon' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["lon"]
@lon.setter
def lon(self, val):
self["lon"] = val
# lonsrc
# ------
@property
def lonsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `lon`.
The 'lonsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["lonsrc"]
@lonsrc.setter
def lonsrc(self, val):
self["lonsrc"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for `meta`.
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# radius
# ------
@property
def radius(self):
"""
Sets the radius of influence of one `lon` / `lat` point in
pixels. Increasing the value makes the densitymapbox trace
smoother, but less detailed.
The 'radius' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["radius"]
@radius.setter
def radius(self, val):
self["radius"] = val
# radiussrc
# ---------
@property
def radiussrc(self):
"""
Sets the source reference on Chart Studio Cloud for `radius`.
The 'radiussrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["radiussrc"]
@radiussrc.setter
def radiussrc(self, val):
self["radiussrc"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. If true, `zmin` will
correspond to the last color in the array and `zmax` will
correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
# showscale
# ---------
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.densitymapbox.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.densitymapbox.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# subplot
# -------
@property
def subplot(self):
"""
Sets a reference between this trace's data coordinates and a
mapbox subplot. If "mapbox" (the default value), the data refer
to `layout.mapbox`. If "mapbox2", the data refer to
`layout.mapbox2`, and so on.
The 'subplot' property is an identifier of a particular
subplot, of type 'mapbox', that may be specified as the string 'mapbox'
optionally followed by an integer >= 1
(e.g. 'mapbox', 'mapbox1', 'mapbox2', 'mapbox3', etc.)
Returns
-------
str
"""
return self["subplot"]
@subplot.setter
def subplot(self, val):
self["subplot"] = val
# text
# ----
@property
def text(self):
"""
Sets text elements associated with each (lon,lat) pair If a
single string, the same string appears over all the data
points. If an array of string, the items are mapped in order to
the this trace's (lon,lat) coordinates. If trace `hoverinfo`
contains a "text" flag and "hovertext" is not set, these
elements will be seen in the hover labels.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# textsrc
# -------
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `text`.
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# z
# -
@property
def z(self):
"""
Sets the points' weight. For example, a value of 10 would be
equivalent to having 10 points of weight 1 in the same spot
The 'z' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
# zauto
# -----
@property
def zauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `z`) or the bounds set in
`zmin` and `zmax` Defaults to `false` when `zmin` and `zmax`
are set by the user.
The 'zauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["zauto"]
@zauto.setter
def zauto(self, val):
self["zauto"] = val
# zmax
# ----
@property
def zmax(self):
"""
Sets the upper bound of the color domain. Value should have the
same units as in `z` and if set, `zmin` must be set as well.
The 'zmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmax"]
@zmax.setter
def zmax(self, val):
self["zmax"] = val
# zmid
# ----
@property
def zmid(self):
"""
Sets the mid-point of the color domain by scaling `zmin` and/or
`zmax` to be equidistant to this point. Value should have the
same units as in `z`. Has no effect when `zauto` is `false`.
The 'zmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmid"]
@zmid.setter
def zmid(self, val):
self["zmid"] = val
# zmin
# ----
@property
def zmin(self):
"""
Sets the lower bound of the color domain. Value should have the
same units as in `z` and if set, `zmax` must be set as well.
The 'zmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmin"]
@zmin.setter
def zmin(self, val):
self["zmin"] = val
# zsrc
# ----
@property
def zsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `z`.
The 'zsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["zsrc"]
@zsrc.setter
def zsrc(self, val):
self["zsrc"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
below
Determines if the densitymapbox trace will be inserted
before the layer with the specified ID. By default,
densitymapbox traces are placed below the first layer
of type symbol If set to '', the layer will be inserted
above every existing layer.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.densitymapbox.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.densitymapbox.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Sets hover text elements associated with each (lon,lat)
pair If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to the this trace's (lon,lat)
coordinates. To be seen, trace `hoverinfo` must contain
a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
lat
Sets the latitude coordinates (in degrees North).
latsrc
Sets the source reference on Chart Studio Cloud for
`lat`.
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.densitymapbox.Legendgroupt
itle` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
lon
Sets the longitude coordinates (in degrees East).
lonsrc
Sets the source reference on Chart Studio Cloud for
`lon`.
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
radius
Sets the radius of influence of one `lon` / `lat` point
in pixels. Increasing the value makes the densitymapbox
trace smoother, but less detailed.
radiussrc
Sets the source reference on Chart Studio Cloud for
`radius`.
reversescale
Reverses the color mapping if true. If true, `zmin`
will correspond to the last color in the array and
`zmax` will correspond to the first color.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.densitymapbox.Stream`
instance or dict with compatible properties
subplot
Sets a reference between this trace's data coordinates
and a mapbox subplot. If "mapbox" (the default value),
the data refer to `layout.mapbox`. If "mapbox2", the
data refer to `layout.mapbox2`, and so on.
text
Sets text elements associated with each (lon,lat) pair
If a single string, the same string appears over all
the data points. If an array of string, the items are
mapped in order to the this trace's (lon,lat)
coordinates. If trace `hoverinfo` contains a "text"
flag and "hovertext" is not set, these elements will be
seen in the hover labels.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
z
Sets the points' weight. For example, a value of 10
would be equivalent to having 10 points of weight 1 in
the same spot
zauto
Determines whether or not the color domain is computed
with respect to the input data (here in `z`) or the
bounds set in `zmin` and `zmax` Defaults to `false`
when `zmin` and `zmax` are set by the user.
zmax
Sets the upper bound of the color domain. Value should
have the same units as in `z` and if set, `zmin` must
be set as well.
zmid
Sets the mid-point of the color domain by scaling
`zmin` and/or `zmax` to be equidistant to this point.
Value should have the same units as in `z`. Has no
effect when `zauto` is `false`.
zmin
Sets the lower bound of the color domain. Value should
have the same units as in `z` and if set, `zmax` must
be set as well.
zsrc
Sets the source reference on Chart Studio Cloud for
`z`.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
below=None,
coloraxis=None,
colorbar=None,
colorscale=None,
customdata=None,
customdatasrc=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
lat=None,
latsrc=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
lon=None,
lonsrc=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
radius=None,
radiussrc=None,
reversescale=None,
showlegend=None,
showscale=None,
stream=None,
subplot=None,
text=None,
textsrc=None,
uid=None,
uirevision=None,
visible=None,
z=None,
zauto=None,
zmax=None,
zmid=None,
zmin=None,
zsrc=None,
**kwargs
):
"""
Construct a new Densitymapbox object
Draws a bivariate kernel density estimation with a Gaussian
kernel from `lon` and `lat` coordinates and optional `z` values
using a colorscale.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Densitymapbox`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
below
Determines if the densitymapbox trace will be inserted
before the layer with the specified ID. By default,
densitymapbox traces are placed below the first layer
of type symbol If set to '', the layer will be inserted
above every existing layer.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.densitymapbox.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.densitymapbox.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Sets hover text elements associated with each (lon,lat)
pair If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to the this trace's (lon,lat)
coordinates. To be seen, trace `hoverinfo` must contain
a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
lat
Sets the latitude coordinates (in degrees North).
latsrc
Sets the source reference on Chart Studio Cloud for
`lat`.
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.densitymapbox.Legendgroupt
itle` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
lon
Sets the longitude coordinates (in degrees East).
lonsrc
Sets the source reference on Chart Studio Cloud for
`lon`.
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
radius
Sets the radius of influence of one `lon` / `lat` point
in pixels. Increasing the value makes the densitymapbox
trace smoother, but less detailed.
radiussrc
Sets the source reference on Chart Studio Cloud for
`radius`.
reversescale
Reverses the color mapping if true. If true, `zmin`
will correspond to the last color in the array and
`zmax` will correspond to the first color.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.densitymapbox.Stream`
instance or dict with compatible properties
subplot
Sets a reference between this trace's data coordinates
and a mapbox subplot. If "mapbox" (the default value),
the data refer to `layout.mapbox`. If "mapbox2", the
data refer to `layout.mapbox2`, and so on.
text
Sets text elements associated with each (lon,lat) pair
If a single string, the same string appears over all
the data points. If an array of string, the items are
mapped in order to the this trace's (lon,lat)
coordinates. If trace `hoverinfo` contains a "text"
flag and "hovertext" is not set, these elements will be
seen in the hover labels.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
z
Sets the points' weight. For example, a value of 10
would be equivalent to having 10 points of weight 1 in
the same spot
zauto
Determines whether or not the color domain is computed
with respect to the input data (here in `z`) or the
bounds set in `zmin` and `zmax` Defaults to `false`
when `zmin` and `zmax` are set by the user.
zmax
Sets the upper bound of the color domain. Value should
have the same units as in `z` and if set, `zmin` must
be set as well.
zmid
Sets the mid-point of the color domain by scaling
`zmin` and/or `zmax` to be equidistant to this point.
Value should have the same units as in `z`. Has no
effect when `zauto` is `false`.
zmin
Sets the lower bound of the color domain. Value should
have the same units as in `z` and if set, `zmax` must
be set as well.
zsrc
Sets the source reference on Chart Studio Cloud for
`z`.
Returns
-------
Densitymapbox
"""
super(Densitymapbox, self).__init__("densitymapbox")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Densitymapbox
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Densitymapbox`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autocolorscale", None)
_v = autocolorscale if autocolorscale is not None else _v
if _v is not None:
self["autocolorscale"] = _v
_v = arg.pop("below", None)
_v = below if below is not None else _v
if _v is not None:
self["below"] = _v
_v = arg.pop("coloraxis", None)
_v = coloraxis if coloraxis is not None else _v
if _v is not None:
self["coloraxis"] = _v
_v = arg.pop("colorbar", None)
_v = colorbar if colorbar is not None else _v
if _v is not None:
self["colorbar"] = _v
_v = arg.pop("colorscale", None)
_v = colorscale if colorscale is not None else _v
if _v is not None:
self["colorscale"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hovertemplate", None)
_v = hovertemplate if hovertemplate is not None else _v
if _v is not None:
self["hovertemplate"] = _v
_v = arg.pop("hovertemplatesrc", None)
_v = hovertemplatesrc if hovertemplatesrc is not None else _v
if _v is not None:
self["hovertemplatesrc"] = _v
_v = arg.pop("hovertext", None)
_v = hovertext if hovertext is not None else _v
if _v is not None:
self["hovertext"] = _v
_v = arg.pop("hovertextsrc", None)
_v = hovertextsrc if hovertextsrc is not None else _v
if _v is not None:
self["hovertextsrc"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("lat", None)
_v = lat if lat is not None else _v
if _v is not None:
self["lat"] = _v
_v = arg.pop("latsrc", None)
_v = latsrc if latsrc is not None else _v
if _v is not None:
self["latsrc"] = _v
_v = arg.pop("legendgroup", None)
_v = legendgroup if legendgroup is not None else _v
if _v is not None:
self["legendgroup"] = _v
_v = arg.pop("legendgrouptitle", None)
_v = legendgrouptitle if legendgrouptitle is not None else _v
if _v is not None:
self["legendgrouptitle"] = _v
_v = arg.pop("legendrank", None)
_v = legendrank if legendrank is not None else _v
if _v is not None:
self["legendrank"] = _v
_v = arg.pop("lon", None)
_v = lon if lon is not None else _v
if _v is not None:
self["lon"] = _v
_v = arg.pop("lonsrc", None)
_v = lonsrc if lonsrc is not None else _v
if _v is not None:
self["lonsrc"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("radius", None)
_v = radius if radius is not None else _v
if _v is not None:
self["radius"] = _v
_v = arg.pop("radiussrc", None)
_v = radiussrc if radiussrc is not None else _v
if _v is not None:
self["radiussrc"] = _v
_v = arg.pop("reversescale", None)
_v = reversescale if reversescale is not None else _v
if _v is not None:
self["reversescale"] = _v
_v = arg.pop("showlegend", None)
_v = showlegend if showlegend is not None else _v
if _v is not None:
self["showlegend"] = _v
_v = arg.pop("showscale", None)
_v = showscale if showscale is not None else _v
if _v is not None:
self["showscale"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("subplot", None)
_v = subplot if subplot is not None else _v
if _v is not None:
self["subplot"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textsrc", None)
_v = textsrc if textsrc is not None else _v
if _v is not None:
self["textsrc"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("z", None)
_v = z if z is not None else _v
if _v is not None:
self["z"] = _v
_v = arg.pop("zauto", None)
_v = zauto if zauto is not None else _v
if _v is not None:
self["zauto"] = _v
_v = arg.pop("zmax", None)
_v = zmax if zmax is not None else _v
if _v is not None:
self["zmax"] = _v
_v = arg.pop("zmid", None)
_v = zmid if zmid is not None else _v
if _v is not None:
self["zmid"] = _v
_v = arg.pop("zmin", None)
_v = zmin if zmin is not None else _v
if _v is not None:
self["zmin"] = _v
_v = arg.pop("zsrc", None)
_v = zsrc if zsrc is not None else _v
if _v is not None:
self["zsrc"] = _v
# Read-only literals
# ------------------
self._props["type"] = "densitymapbox"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| plotly/plotly.py | packages/python/plotly/plotly/graph_objs/_densitymapbox.py | Python | mit | 80,557 | [
"Gaussian"
] | d4429adda8160c5314dae707e2f20c29027bf3123962f9db07e1bdb52f04d64e |
# -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
UNESCO-IHE 2017
Contact: t.hessels@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: Function/Four
"""
def Fraction_Based(nc_outname, Startdate, Enddate):
"""
This functions divides an array into groundwater and surface water based by using the fractions that are given in the get dictionary script
Parameters
----------
Name_NC_Parameter : str
Path to the NetCDF that must be splitted
Name_NC_LU : str
Path to the NetCDF containing the LU data
Startdate : str
Contains the start date of the model 'yyyy-mm-dd'
Enddate : str
Contains the end date of the model 'yyyy-mm-dd'
Returns
-------
DataCube_SW : Array
Array containing the total supply [time,lat,lon]
DataCube_GW : Array
Array containing the amount of non consumed water [time,lat,lon]
"""
# import water accounting plus modules
import wa.General.raster_conversions as RC
import wa.Functions.Start as Start
# import general modules
import numpy as np
# Open Arrays
DataCube_LU = RC.Open_nc_array(nc_outname, "Landuse")
DataCube_Parameter = RC.Open_nc_array(nc_outname, "Total_Supply", Startdate, Enddate)
# Get Classes
LU_Classes = Start.Get_Dictionaries.get_sheet5_classes()
LU_Classes_Keys = LU_Classes.keys()
# Get fractions
sw_supply_dict = Start.Get_Dictionaries.sw_supply_fractions()
# Create Array for consumed fractions
DataCube_Parameter_Fractions = np.ones(DataCube_LU.shape) * np.nan
# Create array with consumed_fractions
for Classes_LULC in LU_Classes_Keys:
Values_LULC = LU_Classes[Classes_LULC]
for Value_LULC in Values_LULC:
DataCube_Parameter_Fractions[DataCube_LU == Value_LULC] = sw_supply_dict[Classes_LULC]
# Calculate the Surface water and groundwater components based on the fraction
DataCube_SW_Parameter = DataCube_Parameter[:,:,:] * DataCube_Parameter_Fractions[None,:,:]
DataCube_GW_Parameter = DataCube_Parameter - DataCube_SW_Parameter
return(DataCube_SW_Parameter, DataCube_GW_Parameter) | wateraccounting/wa | Functions/Four/SplitGW_SW_Supply.py | Python | apache-2.0 | 2,179 | [
"NetCDF"
] | 27577c6d3e8bb2c3b6a36760035db719984925dc0d476ea75143094c53601bcb |
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_analyticsprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of AnalyticsProfile Avi RESTful Object
description:
- This module is used to configure AnalyticsProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
apdex_response_threshold:
description:
- If a client receives an http response in less than the satisfactory latency threshold, the request is considered satisfied.
- It is considered tolerated if it is not satisfied and less than tolerated latency factor multiplied by the satisfactory latency threshold.
- Greater than this number and the client's request is considered frustrated.
- Allowed values are 1-30000.
- Default value when not specified in API or module is interpreted by Avi Controller as 500.
- Units(MILLISECONDS).
apdex_response_tolerated_factor:
description:
- Client tolerated response latency factor.
- Client must receive a response within this factor times the satisfactory threshold (apdex_response_threshold) to be considered tolerated.
- Allowed values are 1-1000.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
apdex_rtt_threshold:
description:
- Satisfactory client to avi round trip time(rtt).
- Allowed values are 1-2000.
- Default value when not specified in API or module is interpreted by Avi Controller as 250.
- Units(MILLISECONDS).
apdex_rtt_tolerated_factor:
description:
- Tolerated client to avi round trip time(rtt) factor.
- It is a multiple of apdex_rtt_tolerated_factor.
- Allowed values are 1-1000.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
apdex_rum_threshold:
description:
- If a client is able to load a page in less than the satisfactory latency threshold, the pageload is considered satisfied.
- It is considered tolerated if it is greater than satisfied but less than the tolerated latency multiplied by satisifed latency.
- Greater than this number and the client's request is considered frustrated.
- A pageload includes the time for dns lookup, download of all http objects, and page render time.
- Allowed values are 1-30000.
- Default value when not specified in API or module is interpreted by Avi Controller as 5000.
- Units(MILLISECONDS).
apdex_rum_tolerated_factor:
description:
- Virtual service threshold factor for tolerated page load time (plt) as multiple of apdex_rum_threshold.
- Allowed values are 1-1000.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
apdex_server_response_threshold:
description:
- A server http response is considered satisfied if latency is less than the satisfactory latency threshold.
- The response is considered tolerated when it is greater than satisfied but less than the tolerated latency factor * s_latency.
- Greater than this number and the server response is considered frustrated.
- Allowed values are 1-30000.
- Default value when not specified in API or module is interpreted by Avi Controller as 400.
- Units(MILLISECONDS).
apdex_server_response_tolerated_factor:
description:
- Server tolerated response latency factor.
- Servermust response within this factor times the satisfactory threshold (apdex_server_response_threshold) to be considered tolerated.
- Allowed values are 1-1000.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
apdex_server_rtt_threshold:
description:
- Satisfactory client to avi round trip time(rtt).
- Allowed values are 1-2000.
- Default value when not specified in API or module is interpreted by Avi Controller as 125.
- Units(MILLISECONDS).
apdex_server_rtt_tolerated_factor:
description:
- Tolerated client to avi round trip time(rtt) factor.
- It is a multiple of apdex_rtt_tolerated_factor.
- Allowed values are 1-1000.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
client_log_config:
description:
- Configure which logs are sent to the avi controller from ses and how they are processed.
client_log_streaming_config:
description:
- Configure to stream logs to an external server.
- Field introduced in 17.1.1.
version_added: "2.4"
conn_lossy_ooo_threshold:
description:
- A connection between client and avi is considered lossy when more than this percentage of out of order packets are received.
- Allowed values are 1-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 50.
- Units(PERCENT).
conn_lossy_timeo_rexmt_threshold:
description:
- A connection between client and avi is considered lossy when more than this percentage of packets are retransmitted due to timeout.
- Allowed values are 1-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 20.
- Units(PERCENT).
conn_lossy_total_rexmt_threshold:
description:
- A connection between client and avi is considered lossy when more than this percentage of packets are retransmitted.
- Allowed values are 1-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 50.
- Units(PERCENT).
conn_lossy_zero_win_size_event_threshold:
description:
- A client connection is considered lossy when percentage of times a packet could not be trasmitted due to tcp zero window is above this threshold.
- Allowed values are 0-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.
- Units(PERCENT).
conn_server_lossy_ooo_threshold:
description:
- A connection between avi and server is considered lossy when more than this percentage of out of order packets are received.
- Allowed values are 1-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 50.
- Units(PERCENT).
conn_server_lossy_timeo_rexmt_threshold:
description:
- A connection between avi and server is considered lossy when more than this percentage of packets are retransmitted due to timeout.
- Allowed values are 1-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 20.
- Units(PERCENT).
conn_server_lossy_total_rexmt_threshold:
description:
- A connection between avi and server is considered lossy when more than this percentage of packets are retransmitted.
- Allowed values are 1-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 50.
- Units(PERCENT).
conn_server_lossy_zero_win_size_event_threshold:
description:
- A server connection is considered lossy when percentage of times a packet could not be trasmitted due to tcp zero window is above this threshold.
- Allowed values are 0-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.
- Units(PERCENT).
description:
description:
- User defined description for the object.
disable_se_analytics:
description:
- Disable node (service engine) level analytics forvs metrics.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
disable_server_analytics:
description:
- Disable analytics on backend servers.
- This may be desired in container environment when there are large number of ephemeral servers.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_client_close_before_request_as_error:
description:
- Exclude client closed connection before an http request could be completed from being classified as an error.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_dns_policy_drop_as_significant:
description:
- Exclude dns policy drops from the list of errors.
- Field introduced in 17.2.2.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.5"
type: bool
exclude_gs_down_as_error:
description:
- Exclude queries to gslb services that are operationally down from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_http_error_codes:
description:
- List of http status codes to be excluded from being classified as an error.
- Error connections or responses impacts health score, are included as significant logs, and may be classified as part of a dos attack.
exclude_invalid_dns_domain_as_error:
description:
- Exclude dns queries to domains outside the domains configured in the dns application profile from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_invalid_dns_query_as_error:
description:
- Exclude invalid dns queries from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_no_dns_record_as_error:
description:
- Exclude queries to domains that did not have configured services/records from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_no_valid_gs_member_as_error:
description:
- Exclude queries to gslb services that have no available members from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_persistence_change_as_error:
description:
- Exclude persistence server changed while load balancing' from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_server_dns_error_as_error:
description:
- Exclude server dns error response from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_server_tcp_reset_as_error:
description:
- Exclude server tcp reset from errors.
- It is common for applications like ms exchange.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_syn_retransmit_as_error:
description:
- Exclude 'server unanswered syns' from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_tcp_reset_as_error:
description:
- Exclude tcp resets by client from the list of potential errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
exclude_unsupported_dns_query_as_error:
description:
- Exclude unsupported dns queries from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
hs_event_throttle_window:
description:
- Time window (in secs) within which only unique health change events should occur.
- Default value when not specified in API or module is interpreted by Avi Controller as 1209600.
hs_max_anomaly_penalty:
description:
- Maximum penalty that may be deducted from health score for anomalies.
- Allowed values are 0-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
hs_max_resources_penalty:
description:
- Maximum penalty that may be deducted from health score for high resource utilization.
- Allowed values are 0-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 25.
hs_max_security_penalty:
description:
- Maximum penalty that may be deducted from health score based on security assessment.
- Allowed values are 0-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 100.
hs_min_dos_rate:
description:
- Dos connection rate below which the dos security assessment will not kick in.
- Default value when not specified in API or module is interpreted by Avi Controller as 1000.
hs_performance_boost:
description:
- Adds free performance score credits to health score.
- It can be used for compensating health score for known slow applications.
- Allowed values are 0-100.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
hs_pscore_traffic_threshold_l4_client:
description:
- Threshold number of connections in 5min, below which apdexr, apdexc, rum_apdex, and other network quality metrics are not computed.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.0.
hs_pscore_traffic_threshold_l4_server:
description:
- Threshold number of connections in 5min, below which apdexr, apdexc, rum_apdex, and other network quality metrics are not computed.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.0.
hs_security_certscore_expired:
description:
- Score assigned when the certificate has expired.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.0.
hs_security_certscore_gt30d:
description:
- Score assigned when the certificate expires in more than 30 days.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.0.
hs_security_certscore_le07d:
description:
- Score assigned when the certificate expires in less than or equal to 7 days.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.0.
hs_security_certscore_le30d:
description:
- Score assigned when the certificate expires in less than or equal to 30 days.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
hs_security_chain_invalidity_penalty:
description:
- Penalty for allowing certificates with invalid chain.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.0.
hs_security_cipherscore_eq000b:
description:
- Score assigned when the minimum cipher strength is 0 bits.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.0.
hs_security_cipherscore_ge128b:
description:
- Score assigned when the minimum cipher strength is greater than equal to 128 bits.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.0.
hs_security_cipherscore_lt128b:
description:
- Score assigned when the minimum cipher strength is less than 128 bits.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 3.5.
hs_security_encalgo_score_none:
description:
- Score assigned when no algorithm is used for encryption.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.0.
hs_security_encalgo_score_rc4:
description:
- Score assigned when rc4 algorithm is used for encryption.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.5.
hs_security_hsts_penalty:
description:
- Penalty for not enabling hsts.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.0.
hs_security_nonpfs_penalty:
description:
- Penalty for allowing non-pfs handshakes.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.0.
hs_security_selfsignedcert_penalty:
description:
- Deprecated.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.0.
hs_security_ssl30_score:
description:
- Score assigned when supporting ssl3.0 encryption protocol.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 3.5.
hs_security_tls10_score:
description:
- Score assigned when supporting tls1.0 encryption protocol.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.0.
hs_security_tls11_score:
description:
- Score assigned when supporting tls1.1 encryption protocol.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.0.
hs_security_tls12_score:
description:
- Score assigned when supporting tls1.2 encryption protocol.
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.0.
hs_security_weak_signature_algo_penalty:
description:
- Penalty for allowing weak signature algorithm(s).
- Allowed values are 0-5.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.0.
name:
description:
- The name of the analytics profile.
required: true
ranges:
description:
- List of http status code ranges to be excluded from being classified as an error.
resp_code_block:
description:
- Block of http response codes to be excluded from being classified as an error.
- Enum options - AP_HTTP_RSP_4XX, AP_HTTP_RSP_5XX.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the analytics profile.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create a custom Analytics profile object
avi_analyticsprofile:
controller: '{{ controller }}'
username: '{{ username }}'
password: '{{ password }}'
apdex_response_threshold: 500
apdex_response_tolerated_factor: 4.0
apdex_rtt_threshold: 250
apdex_rtt_tolerated_factor: 4.0
apdex_rum_threshold: 5000
apdex_rum_tolerated_factor: 4.0
apdex_server_response_threshold: 400
apdex_server_response_tolerated_factor: 4.0
apdex_server_rtt_threshold: 125
apdex_server_rtt_tolerated_factor: 4.0
conn_lossy_ooo_threshold: 50
conn_lossy_timeo_rexmt_threshold: 20
conn_lossy_total_rexmt_threshold: 50
conn_lossy_zero_win_size_event_threshold: 2
conn_server_lossy_ooo_threshold: 50
conn_server_lossy_timeo_rexmt_threshold: 20
conn_server_lossy_total_rexmt_threshold: 50
conn_server_lossy_zero_win_size_event_threshold: 2
disable_se_analytics: false
disable_server_analytics: false
exclude_client_close_before_request_as_error: false
exclude_persistence_change_as_error: false
exclude_server_tcp_reset_as_error: false
exclude_syn_retransmit_as_error: false
exclude_tcp_reset_as_error: false
hs_event_throttle_window: 1209600
hs_max_anomaly_penalty: 10
hs_max_resources_penalty: 25
hs_max_security_penalty: 100
hs_min_dos_rate: 1000
hs_performance_boost: 20
hs_pscore_traffic_threshold_l4_client: 10.0
hs_pscore_traffic_threshold_l4_server: 10.0
hs_security_certscore_expired: 0.0
hs_security_certscore_gt30d: 5.0
hs_security_certscore_le07d: 2.0
hs_security_certscore_le30d: 4.0
hs_security_chain_invalidity_penalty: 1.0
hs_security_cipherscore_eq000b: 0.0
hs_security_cipherscore_ge128b: 5.0
hs_security_cipherscore_lt128b: 3.5
hs_security_encalgo_score_none: 0.0
hs_security_encalgo_score_rc4: 2.5
hs_security_hsts_penalty: 0.0
hs_security_nonpfs_penalty: 1.0
hs_security_selfsignedcert_penalty: 1.0
hs_security_ssl30_score: 3.5
hs_security_tls10_score: 5.0
hs_security_tls11_score: 5.0
hs_security_tls12_score: 5.0
hs_security_weak_signature_algo_penalty: 1.0
name: jason-analytics-profile
tenant_ref: Demo
"""
RETURN = '''
obj:
description: AnalyticsProfile (api/analyticsprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
apdex_response_threshold=dict(type='int',),
apdex_response_tolerated_factor=dict(type='float',),
apdex_rtt_threshold=dict(type='int',),
apdex_rtt_tolerated_factor=dict(type='float',),
apdex_rum_threshold=dict(type='int',),
apdex_rum_tolerated_factor=dict(type='float',),
apdex_server_response_threshold=dict(type='int',),
apdex_server_response_tolerated_factor=dict(type='float',),
apdex_server_rtt_threshold=dict(type='int',),
apdex_server_rtt_tolerated_factor=dict(type='float',),
client_log_config=dict(type='dict',),
client_log_streaming_config=dict(type='dict',),
conn_lossy_ooo_threshold=dict(type='int',),
conn_lossy_timeo_rexmt_threshold=dict(type='int',),
conn_lossy_total_rexmt_threshold=dict(type='int',),
conn_lossy_zero_win_size_event_threshold=dict(type='int',),
conn_server_lossy_ooo_threshold=dict(type='int',),
conn_server_lossy_timeo_rexmt_threshold=dict(type='int',),
conn_server_lossy_total_rexmt_threshold=dict(type='int',),
conn_server_lossy_zero_win_size_event_threshold=dict(type='int',),
description=dict(type='str',),
disable_se_analytics=dict(type='bool',),
disable_server_analytics=dict(type='bool',),
exclude_client_close_before_request_as_error=dict(type='bool',),
exclude_dns_policy_drop_as_significant=dict(type='bool',),
exclude_gs_down_as_error=dict(type='bool',),
exclude_http_error_codes=dict(type='list',),
exclude_invalid_dns_domain_as_error=dict(type='bool',),
exclude_invalid_dns_query_as_error=dict(type='bool',),
exclude_no_dns_record_as_error=dict(type='bool',),
exclude_no_valid_gs_member_as_error=dict(type='bool',),
exclude_persistence_change_as_error=dict(type='bool',),
exclude_server_dns_error_as_error=dict(type='bool',),
exclude_server_tcp_reset_as_error=dict(type='bool',),
exclude_syn_retransmit_as_error=dict(type='bool',),
exclude_tcp_reset_as_error=dict(type='bool',),
exclude_unsupported_dns_query_as_error=dict(type='bool',),
hs_event_throttle_window=dict(type='int',),
hs_max_anomaly_penalty=dict(type='int',),
hs_max_resources_penalty=dict(type='int',),
hs_max_security_penalty=dict(type='int',),
hs_min_dos_rate=dict(type='int',),
hs_performance_boost=dict(type='int',),
hs_pscore_traffic_threshold_l4_client=dict(type='float',),
hs_pscore_traffic_threshold_l4_server=dict(type='float',),
hs_security_certscore_expired=dict(type='float',),
hs_security_certscore_gt30d=dict(type='float',),
hs_security_certscore_le07d=dict(type='float',),
hs_security_certscore_le30d=dict(type='float',),
hs_security_chain_invalidity_penalty=dict(type='float',),
hs_security_cipherscore_eq000b=dict(type='float',),
hs_security_cipherscore_ge128b=dict(type='float',),
hs_security_cipherscore_lt128b=dict(type='float',),
hs_security_encalgo_score_none=dict(type='float',),
hs_security_encalgo_score_rc4=dict(type='float',),
hs_security_hsts_penalty=dict(type='float',),
hs_security_nonpfs_penalty=dict(type='float',),
hs_security_selfsignedcert_penalty=dict(type='float',),
hs_security_ssl30_score=dict(type='float',),
hs_security_tls10_score=dict(type='float',),
hs_security_tls11_score=dict(type='float',),
hs_security_tls12_score=dict(type='float',),
hs_security_weak_signature_algo_penalty=dict(type='float',),
name=dict(type='str', required=True),
ranges=dict(type='list',),
resp_code_block=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'analyticsprofile',
set([]))
if __name__ == '__main__':
main()
| hryamzik/ansible | lib/ansible/modules/network/avi/avi_analyticsprofile.py | Python | gpl-3.0 | 29,130 | [
"VisIt"
] | e0b29afab8d95aa1ea0b697470837b43a1e162713969159e6c2dcfacdf147873 |
#!/usr/bin/env python
import matplotlib as mpl
import desdb
import numpy as np
import esutil
import pyfits
import sys
import argparse
import healpy as hp
import os
import functions2
import slr_zeropoint_shiftmap as slr
import numpy.lib.recfunctions as rf
import matplotlib.pyplot as plt
def NoSimFields(band='i'):
q = """
SELECT
balrog_index,
mag_auto,
flags
FROM
SUCHYTA1.balrog_sva1v2_nosim_%s
""" %(band)
return q
def SimFields(band='i',table='sva1v2'):
q = """
SELECT
t.tilename as tilename,
m.xwin_image as xwin_image,
m.ywin_image as ywin_image,
m.xmin_image as xmin_image,
m.ymin_image as ymin_image,
m.xmax_image as xmax_image,
m.ymax_image as ymax_image,
m.balrog_index as balrog_index,
m.alphawin_j2000 as ra,
m.deltawin_j2000 as dec,
m.mag_auto as mag_auto,
m.spread_model as spread_model,
m.spreaderr_model as spreaderr_model,
m.class_star as class_star,
m.mag_psf as mag_psf,
t.mag as truth_mag_auto,
m.flags as flags
FROM
SUCHYTA1.balrog_%s_sim_%s m
JOIN SUCHYTA1.balrog_%s_truth_%s t ON t.balrog_index = m.balrog_index
""" %(table, band, table, band)
return q
def DESFields(tilestuff, band='i'):
q = """
SELECT
tilename,
coadd_objects_id,
mag_auto_%s as mag_auto,
alphawin_j2000_%s as ra,
deltawin_j2000_%s as dec,
spread_model_%s as spread_model,
spreaderr_model_%s as spreaderr_model,
class_star_%s as class_star,
mag_psf_%s as mag_psf,
flags_%s as flags
FROM
sva1_coadd_objects
WHERE
tilename in %s
""" % (band,band,band,band,band,band,band,band,str(tuple(np.unique(tilestuff['tilename']))))
return q
def TruthFields(band='i', table = 'sva1v2'):
q = """
SELECT
balrog_index,
tilename,
ra,
dec,
objtype,
mag
FROM
SUCHYTA1.balrog_%s_truth_%s
"""%(table,band)
return q
def GetDESCat( depthmap, nside, tilestuff, tileinfo, band='i',depth = 0.0):
cur = desdb.connect()
q = DESFields(tileinfo, band=band)
detcat = cur.quick(q, array=True)
detcat = functions2.ValidDepth(depthmap, nside, detcat, rakey='ra', deckey='dec',depth = depth)
detcat = functions2.RemoveTileOverlap(tilestuff, detcat, col='tilename', rakey='ra', deckey='dec')
return detcat
def getTileInfo(catalog, HealConfig=None):
if HealConfig is None:
HealConfig = getHealConfig()
tiles = np.unique(catalog['tilename'])
cur = desdb.connect()
q = "SELECT tilename, udecll, udecur, urall, uraur FROM coaddtile"
tileinfo = cur.quick(q, array=True)
tilestuff = {}
for i in range(len(tileinfo)):
tilestuff[ tileinfo[i]['tilename'] ] = tileinfo[i]
max = np.power(map_nside/float(HealConfig['out_nside']), 2.0)
depthmap, nside = functions2.GetDepthMap(HealConfig['depthfile'])
return depthmap, nside
def cleanCatalog(catalog, tag='mag_auto'):
# We should get rid of obviously wrong things.
keep = np.where( (catalog[tag] > 15. ) & (catalog[tag] < 30.) & (catalog['flags'] < 2) )
return catalog[keep]
def removeBadTilesFromTruthCatalog(truth, tag='mag_auto', goodfrac = 0.8):
tileList = np.unique(truth['tilename'])
number = np.zeros(tileList.size)
for tile, i in zip(tileList,xrange(number.size)):
number[i] = np.sum(truth['tilename'] == tile)
tileList = tileList[number > goodfrac*np.max(number)]
keep = np.in1d( truth['tilename'], tileList )
return truth[keep]
def mergeCatalogsUsingPandas(sim=None, truth=None, key='balrog_index', suffixes = ['_sim','']):
import pandas as pd
simData = pd.DataFrame(sim)
truthData = pd.DataFrame(truth)
matched = pd.merge(simData, truthData, on=key, suffixes = suffixes)
matched_arr = matched.to_records(index=False)
# This last step is necessary because Pandas converts strings to Objects when eating structured arrays.
# And np.recfunctions flips out when it has one.
oldDtype = matched_arr.dtype.descr
newDtype = oldDtype
for thisOldType,i in zip(oldDtype, xrange(len(oldDtype) )):
if 'O' in thisOldType[1]:
newDtype[i] = (thisOldType[0], 'S12')
matched_arr = np.array(matched_arr,dtype=newDtype)
return matched_arr
def GetFromDB( band='i', depth = 0.0,tables =['sva1v2','sva1v3_2']): # tables =['sva1v2','sva1v3','sva1v3_2']
depthfile = '../sva1_gold_1.0.2-4_nside4096_nest_i_auto_weights.fits'
cur = desdb.connect()
q = "SELECT tilename, udecll, udecur, urall, uraur FROM coaddtile"
tileinfo = cur.quick(q, array=True)
tilestuff = {}
for i in range(len(tileinfo)):
tilestuff[ tileinfo[i]['tilename'] ] = tileinfo[i]
depthmap, nside = functions2.GetDepthMap(depthfile)
truths = []
sims = []
truthMatcheds = []
for tableName in tables:
q = TruthFields(band=band,table=tableName)
truth = cur.quick(q, array=True)
truth = removeBadTilesFromTruthCatalog(truth)
truth = functions2.ValidDepth(depthmap, nside, truth, depth = depth)
truth = functions2.RemoveTileOverlap(tilestuff, truth)
unique_binds, unique_inds = np.unique(truth['balrog_index'],return_index=True)
truth = truth[unique_inds]
q = SimFields(band=band, table=tableName)
sim = cur.quick(q, array=True)
sim = cleanCatalog(sim,tag='mag_auto')
unique_binds, unique_inds = np.unique(sim['balrog_index'],return_index=True)
sim = sim[unique_inds]
truthMatched = mergeCatalogsUsingPandas(sim=sim,truth=truth)
sim = sim[np.in1d(sim['balrog_index'],truthMatched['balrog_index'])]
sim.sort(order='balrog_index')
truthMatched.sort(order='balrog_index')
truthMatcheds.append(truthMatched)
truths.append(truth)
sims.append(sim)
sim = np.hstack(sims)
truth = np.hstack(truths)
truthMatched = np.hstack(truthMatcheds)
des = GetDESCat(depthmap, nside, tilestuff, sim, band=band,depth = depth)
des = cleanCatalog(des, tag='mag_auto')
return des, sim, truthMatched, truth, tileinfo
def getSingleFilterCatalogs(reload=False,band='i'):
# Check to see whether the catalog files exist. If they do, then
# use the files. If at least one does not, then get what we need
# from the database
fileNames = ['desCatalogFile-'+band+'.fits','BalrogObsFile-'+band+'.fits',
'BalrogTruthFile-'+band+'.fits', 'BalrogTruthMatchedFile-'+band+'.fits',
'BalrogTileInfo.fits']
exists = True
for thisFile in fileNames:
print "Checking for existence of: "+thisFile
if not os.path.isfile(thisFile): exists = False
if exists and not reload:
desCat = esutil.io.read(fileNames[0])
BalrogObs = esutil.io.read(fileNames[1])
BalrogTruth = esutil.io.read(fileNames[2])
BalrogTruthMatched = esutil.io.read(fileNames[3])
BalrogTileInfo = esutil.io.read(fileNames[4])
else:
print "Cannot find files, or have been asked to reload. Getting data from DESDB."
desCat, BalrogObs, BalrogTruthMatched, BalrogTruth, BalrogTileInfo = GetFromDB(band=band)
esutil.io.write( fileNames[0], desCat , clobber=True)
esutil.io.write( fileNames[1], BalrogObs , clobber=True)
esutil.io.write( fileNames[2], BalrogTruth , clobber=True)
esutil.io.write( fileNames[3], BalrogTruthMatched , clobber=True)
esutil.io.write( fileNames[4], BalrogTileInfo, clobber=True)
return desCat, BalrogObs, BalrogTruthMatched, BalrogTruth, BalrogTileInfo
def modestify(data, band='i'):
modest = np.zeros(len(data), dtype=np.int32)
galcut = (data['flags_%s'%(band)] <=3) & -( ((data['class_star_%s'%(band)] > 0.3) & (data['mag_auto_%s'%(band)] < 18.0)) | ((data['spread_model_%s'%(band)] + 3*data['spreaderr_model_%s'%(band)]) < 0.003) | ((data['mag_psf_%s'%(band)] > 30.0) & (data['mag_auto_%s'%(band)] < 21.0)))
modest[galcut] = 1
starcut = (data['flags_%s'%(band)] <=3) & ((data['class_star_%s'%(band)] > 0.3) & (data['mag_auto_%s'%(band)] < 18.0) & (data['mag_psf_%s'%(band)] < 30.0) | (((data['spread_model_%s'%(band)] + 3*data['spreaderr_model_%s'%(band)]) < 0.003) & ((data['spread_model_%s'%(band)] +3*data['spreaderr_model_%s'%(band)]) > -0.003)))
modest[starcut] = 3
neither = -(galcut | starcut)
modest[neither] = 5
data = rf.append_fields(data, 'modtype_%s'%(band), modest)
print len(data), np.sum(galcut), np.sum(starcut), np.sum(neither)
return data
def getMultiBandCatalogs(reload=False, band1 = 'g', band2 = 'i'):
des1, balrogObs1, balrogTruthMatched1, balrogTruth1, balrogTileInfo = getSingleFilterCatalogs(reload=reload, band=band1)
des2, balrogObs2, balrogTruthMatched2, balrogTruth2, _ = getSingleFilterCatalogs(reload=reload, band=band2)
# Now merge these across filters.
des = mergeCatalogsUsingPandas(des1, des2, key='coadd_objects_id', suffixes = ['_'+band1,'_'+band2])
balrogObs = mergeCatalogsUsingPandas(balrogObs1, balrogObs2, key='balrog_index', suffixes = ['_'+band1,'_'+band2])
balrogTruthMatched = mergeCatalogsUsingPandas(balrogTruthMatched1, balrogTruthMatched2, key='balrog_index', suffixes = ['_'+band1,'_'+band2])
balrogTruth = mergeCatalogsUsingPandas(balrogTruth1, balrogTruth2, key='balrog_index', suffixes = ['_'+band1,'_'+band2])
des = modestify(des, band=band1)
des = modestify(des, band=band2)
balrogObs = modestify(balrogObs,band=band1)
balrogObs = modestify(balrogObs,band=band2)
balrogTruthMatched = modestify(balrogTruthMatched,band=band1)
balrogTruthMatched = modestify(balrogTruthMatched,band=band2)
# Finally, add colors.
des = rf.append_fields(des, 'color_%s_%s'%(band1,band2), ( des['mag_auto_'+band1] - des['mag_auto_'+band2] ) )
balrogObs = rf.append_fields(balrogObs, 'color_%s_%s'%(band1,band2), ( balrogObs['mag_auto_'+band1] - balrogObs['mag_auto_'+band2] ) )
balrogTruthMatched = rf.append_fields(balrogTruthMatched, 'color_%s_%s'%(band1,band2), ( balrogTruthMatched['mag_auto_'+band1] - balrogTruthMatched['mag_auto_'+band2] ) )
balrogTruth = rf.append_fields(balrogTruth, 'color_%s_%s'%(band1,band2), ( balrogTruth['mag_'+band1] - balrogTruth['mag_'+band2] ) )
return des, balrogObs, balrogTruthMatched, balrogTruth, balrogTileInfo
def hpHEALPixelToRaDec(pixel, nside=4096, nest=True):
theta, phi = hp.pix2ang(nside, pixel, nest=nest)
ra, dec = convertThetaPhiToRaDec(theta, phi)
return ra, dec
def hpRaDecToHEALPixel(ra, dec, nside= 4096, nest= True):
phi = ra * np.pi / 180.0
theta = (90.0 - dec) * np.pi / 180.0
hpInd = hp.ang2pix(nside, theta, phi, nest= nest)
return hpInd
def getGoodRegionIndices(catalog=None, badHPInds=None, nside=4096,band='i'):
hpInd = hpRaDecToHEALPixel(catalog['ra_'+band], catalog['dec_'+band], nside=nside, nest= True)
keep = ~np.in1d(hpInd, badHPInds)
return keep
def excludeBadRegions(des,balrogObs, balrogTruthMatched, balrogTruth, band='i'):
eliMap = hp.read_map("sva1_gold_1.0.4_goodregions_04_equ_nest_4096.fits", nest=True)
nside = hp.npix2nside(eliMap.size)
maskIndices = np.arange(eliMap.size)
badIndices = maskIndices[eliMap == 1]
obsKeepIndices = getGoodRegionIndices(catalog=balrogObs, badHPInds=badIndices, nside=nside, band=band)
truthKeepIndices = getGoodRegionIndices(catalog=balrogTruth, badHPInds=badIndices, nside=nside,band=band)
desKeepIndices = getGoodRegionIndices(catalog=des, badHPInds=badIndices, nside=nside,band=band)
balrogObs = balrogObs[obsKeepIndices]
balrogTruthMatched = balrogTruthMatched[obsKeepIndices]
balrogTruth = balrogTruth[truthKeepIndices]
des = des[desKeepIndices]
return des,balrogObs, balrogTruthMatched, balrogTruth
def main(argv):
band1 = 'g'
band2 = 'r'
des, balrogObs, balrogTruthMatched, balrogTruth, balrogTileInfo = getCatalogs(reload=False, band1=band1,band2=band2)
des, balrogObs, balrogTruthMatched, balrogTruth = excludeBadRegions(des,balrogObs, balrogTruthMatched, balrogTruth,band=band2)
import MCMC
#truthcolumns = ['objtype_%s'%(band1), 'mag_%s'%(band1), 'mag_%s'%(band2)]
#truthbins = [np.arange(0.5,5,2.0), np.arange(17.5,27,0.5),np.arange(17.5,27,0.5)]
#measuredcolumns = ['modtype_%s'%(band1),'mag_auto_%s'%(band1), 'mag_auto_%s'%(band2)]
#measuredbins=[np.arange(0.5, 7, 2.0), np.arange(17.5,27,0.5), np.arange(17.5,27,0.5)]
truthcolumns = ['objtype_%s'%(band1), 'color_%s_%s'%(band1,band2), 'mag_%s'%(band2)]
truthbins = [np.arange(0.5,5,2.0), np.arange(-4,4,0.5),np.arange(17.5,27,0.5)]
measuredcolumns = ['modtype_%s'%(band1), 'color_%s_%s'%(band1,band2), 'mag_auto_%s'%(band2)]
measuredbins=[np.arange(0.5, 7, 2.0), np.arange(-4,4,0.25), np.arange(17.5,27,0.25)]
BalrogObject = MCMC.BalrogLikelihood(balrogTruth, balrogTruthMatched,
truthcolumns = truthcolumns,
truthbins = truthbins,
measuredcolumns= measuredcolumns,
measuredbins = measuredbins)
nWalkers = 2000
burnin = 1000
steps = 1000
ReconObject = MCMC.MCMCReconstruction(BalrogObject, des, MCMC.ObjectLogL,
truth=balrogTruth, nWalkers=nWalkers, reg=1.0e-10)
ReconObject.BurnIn(burnin)
ReconObject.Sample(steps)
print np.average(ReconObject.Sampler.acceptance_fraction)
fig = plt.figure(1,figsize=(14,7))
ax = fig.add_subplot(1,2, 1)
where = [0, None]
BalrogObject.PlotTruthHistogram1D(where=where, ax=ax, plotkwargs={'label':'BT-G', 'color':'Blue'})
BalrogObject.PlotMeasuredHistogram1D(where=where, ax=ax, plotkwargs={'label':'BO-G', 'color':'Red'})
ReconObject.PlotMeasuredHistogram1D(where=where, ax=ax, plotkwargs={'label':'DO-G', 'color':'Gray'})
ReconObject.PlotReconHistogram1D(where=where, ax=ax, plotkwargs={'label':'DR-G', 'color':'black', 'fmt':'o', 'markersize':3})
ax.legend(loc='best', ncol=2)
ax.set_yscale('log')
ax = fig.add_subplot(1,2, 2)
where = [1, None, 1]
BalrogObject.PlotTruthHistogram1D(where=where, ax=ax, plotkwargs={'label':'BT-G', 'color':'Blue'})
BalrogObject.PlotMeasuredHistogram1D(where=where, ax=ax, plotkwargs={'label':'BO-G', 'color':'Red'})
ReconObject.PlotMeasuredHistogram1D(where=where, ax=ax, plotkwargs={'label':'DO-G', 'color':'Gray'})
ReconObject.PlotReconHistogram1D(where=where, ax=ax, plotkwargs={'label':'DR-G', 'color':'black', 'fmt':'o', 'markersize':3})
ax.legend(loc='best', ncol=2)
ax.set_yscale('log')
fig.savefig("star-galaxy-magnitude-reconstruction")
plt.show(block=True)
fullRecon, fullReconErrs = ReconObject.GetReconstruction()
nBins = np.array([thing.size for thing in truthbins])-1
recon2d = np.reshape(fullRecon, nBins)
err2d = np.reshape(fullReconErrs, nBins)
stop
if __name__ == "__main__":
import pdb, traceback
try:
main(sys.argv)
except:
thingtype, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
| emhuff/regularizedInversion | des_color_inference.py | Python | mit | 15,630 | [
"Galaxy"
] | 0c0a283724ef9a1553bf3a9918631d0337199c6a392754b6276a976d429ec676 |
#! /usr/bin/env python
import owyl
from owyl import blackboard
import rospy
class Tree():
def __init__(self):
self.blackboard = blackboard.Blackboard("breath tree")
self.tree = self.build_tree()
while True:
self.tree.next()
def build_tree(self):
eva_breath_tree = \
owyl.repeatAlways(
owyl.sequence(
# TODO: Behaviors of breath (TBC)
self.breathe()
)
)
return owyl.visit(eva_breath_tree, blackboard=self.blackboard)
@owyl.taskmethod
def breathe(self, **kwargs):
# TODO
yield True
if __name__ == "__main__":
rospy.init_node("Eva_Breath")
tree = Tree()
| linas/eva_behavior | src/eva_breath.py | Python | lgpl-2.1 | 744 | [
"VisIt"
] | 7ad65f4d25cf0bdb46bcfa1d5ce458214a1f519734f37e6f977aa278774eec76 |
import numpy as np
import os
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
import matplotlib.pyplot as plt
import time
import datetime as dt
from matplotlib.dates import date2num, num2date
import pyroms
import pyroms_toolbox
import _remapping
class nctime(object):
pass
def remap_bdry_uv(src_fileuv, src_grd, dst_grd, dst_fileu, dst_filev, dmax=0, cdepth=0, kk=0, dst_dir='./'):
# CCS grid sub-sample
xrange=src_grd.xrange; yrange=src_grd.yrange
# get time
nctime.long_name = 'time'
nctime.units = 'days since 1900-01-01 00:00:00'
# get dimensions
Mp, Lp = dst_grd.hgrid.mask_rho.shape
# create destination file
print '\nCreating destination file', dst_fileu
if os.path.exists(dst_fileu) is True:
os.remove(dst_fileu)
pyroms_toolbox.nc_create_roms_file(dst_fileu, dst_grd, nctime)
print 'Creating destination file', dst_filev
if os.path.exists(dst_filev) is True:
os.remove(dst_filev)
pyroms_toolbox.nc_create_roms_bdry_file(dst_filev, dst_grd, nctime)
# open destination file
ncu = netCDF.Dataset(dst_fileu, 'a', format='NETCDF3_64BIT')
ncv = netCDF.Dataset(dst_filev, 'a', format='NETCDF3_64BIT')
#load var
cdfuv = netCDF.Dataset(src_fileuv)
src_varu = cdfuv.variables['u']
src_varv = cdfuv.variables['v']
tmp = cdfuv.variables['time'][:]
if len(tmp) > 1:
print 'error : multiple frames in input file' ; exit()
else:
time = tmp[0]
# we need to correct the time axis
ref_soda = dt.datetime(1980,1,1,0,0)
ref_roms = dt.datetime(1900,1,1,0,0)
ndays = (ref_soda - ref_roms).days
time = time + ndays
#get missing value
spval = src_varu.missing_value
# CCS grid sub-sample
src_varu = src_varu[0,:, yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
src_varv = src_varv[0,:, yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
# get weights file
wts_file = 'remap_weights_' + src_grd.name + '_to_' + dst_grd.name + '_bilinear_uv_to_rho.nc'
# build intermediate zgrid
zlevel = -src_grd.z_t[::-1]
nzlevel = len(zlevel)
dst_zcoord = pyroms.vgrid.z_coordinate(dst_grd.vgrid.h, zlevel, nzlevel)
dst_grdz = pyroms.grid.ROMS_Grid(dst_grd.name+'_Z', dst_grd.hgrid, dst_zcoord)
# create variable in destination file
print 'Creating variable u_north'
ncu.createVariable('u_north', 'f8', ('ocean_time', 's_rho', 'xi_u'), fill_value=spval)
ncu.variables['u_north'].long_name = '3D u-momentum north boundary condition'
ncu.variables['u_north'].units = 'meter second-1'
ncu.variables['u_north'].field = 'u_north, scalar, series'
#ncu.variables['u_north']._FillValue = spval
print 'Creating variable u_south'
ncu.createVariable('u_south', 'f8', ('ocean_time', 's_rho', 'xi_u'), fill_value=spval)
ncu.variables['u_south'].long_name = '3D u-momentum south boundary condition'
ncu.variables['u_south'].units = 'meter second-1'
ncu.variables['u_south'].field = 'u_south, scalar, series'
#ncu.variables['u_south']._FillValue = spval
print 'Creating variable u_west'
ncu.createVariable('u_west', 'f8', ('ocean_time', 's_rho', 'eta_u'), fill_value=spval)
ncu.variables['u_west'].long_name = '3D u-momentum west boundary condition'
ncu.variables['u_west'].units = 'meter second-1'
ncu.variables['u_west'].field = 'u_east, scalar, series'
#ncu.variables['u_west']._FillValue = spval
# create variable in destination file
print 'Creating variable ubar_north'
ncu.createVariable('ubar_north', 'f8', ('ocean_time', 'xi_u'), fill_value=spval)
ncu.variables['ubar_north'].long_name = '2D u-momentum north boundary condition'
ncu.variables['ubar_north'].units = 'meter second-1'
ncu.variables['ubar_north'].field = 'ubar_north, scalar, series'
#ncu.variables['ubar_north']._FillValue = spval
print 'Creating variable ubar_south'
ncu.createVariable('ubar_south', 'f8', ('ocean_time', 'xi_u'), fill_value=spval)
ncu.variables['ubar_south'].long_name = '2D u-momentum south boundary condition'
ncu.variables['ubar_south'].units = 'meter second-1'
ncu.variables['ubar_south'].field = 'ubar_south, scalar, series'
#ncu.variables['ubar_south']._FillValue = spval
print 'Creating variable ubar_west'
ncu.createVariable('ubar_west', 'f8', ('ocean_time', 'eta_u'), fill_value=spval)
ncu.variables['ubar_west'].long_name = '2D u-momentum west boundary condition'
ncu.variables['ubar_west'].units = 'meter second-1'
ncu.variables['ubar_west'].field = 'ubar_east, scalar, series'
#ncu.variables['ubar_west']._FillValue = spval
print 'Creating variable v_north'
ncv.createVariable('v_north', 'f8', ('ocean_time', 's_rho', 'xi_v'), fill_value=spval)
ncv.variables['v_north'].long_name = '3D v-momentum north boundary condition'
ncv.variables['v_north'].units = 'meter second-1'
ncv.variables['v_north'].field = 'v_north, scalar, series'
#ncv.variables['v_north']._FillValue = spval
print 'Creating variable v_south'
ncv.createVariable('v_south', 'f8', ('ocean_time', 's_rho', 'xi_v'), fill_value=spval)
ncv.variables['v_south'].long_name = '3D v-momentum south boundary condition'
ncv.variables['v_south'].units = 'meter second-1'
ncv.variables['v_south'].field = 'v_south, scalar, series'
#ncv.variables['v_south']._FillValue = spval
print 'Creating variable v_west'
ncv.createVariable('v_west', 'f8', ('ocean_time', 's_rho', 'eta_v'), fill_value=spval)
ncv.variables['v_west'].long_name = '3D v-momentum west boundary condition'
ncv.variables['v_west'].units = 'meter second-1'
ncv.variables['v_west'].field = 'v_east, scalar, series'
#ncv.variables['v_west']._FillValue = spval
print 'Creating variable vbar_north'
ncv.createVariable('vbar_north', 'f8', ('ocean_time', 'xi_v'), fill_value=spval)
ncv.variables['vbar_north'].long_name = '2D v-momentum north boundary condition'
ncv.variables['vbar_north'].units = 'meter second-1'
ncv.variables['vbar_north'].field = 'vbar_north, scalar, series'
#ncv.variables['vbar_north']._FillValue = spval
print 'Creating variable vbar_south'
ncv.createVariable('vbar_south', 'f8', ('ocean_time', 'xi_v'), fill_value=spval)
ncv.variables['vbar_south'].long_name = '2D v-momentum south boundary condition'
ncv.variables['vbar_south'].units = 'meter second-1'
ncv.variables['vbar_south'].field = 'vbar_south, scalar, series'
#ncv.variables['vbar_south']._FillValue = spval
print 'Creating variable vbar_west'
ncv.createVariable('vbar_west', 'f8', ('ocean_time', 'eta_v'), fill_value=spval)
ncv.variables['vbar_west'].long_name = '2D v-momentum west boundary condition'
ncv.variables['vbar_west'].units = 'meter second-1'
ncv.variables['vbar_west'].field = 'vbar_east, scalar, series'
#ncv.variables['vbar_west']._FillValue = spval
# remaping
print 'remapping and rotating u and v from', src_grd.name, \
'to', dst_grd.name
# flood the grid
print 'flood the grid'
src_uz = pyroms_toolbox.BGrid_SODA.flood(src_varu, src_grd, Bpos='uv', \
spval=spval, dmax=dmax, cdepth=cdepth, kk=kk)
src_vz = pyroms_toolbox.BGrid_SODA.flood(src_varv, src_grd, Bpos='uv', \
spval=spval, dmax=dmax, cdepth=cdepth, kk=kk)
# horizontal interpolation using scrip weights
print 'horizontal interpolation using scrip weights'
dst_uz = pyroms.remapping.remap(src_uz, wts_file, \
spval=spval)
dst_vz = pyroms.remapping.remap(src_vz, wts_file, \
spval=spval)
# vertical interpolation from standard z level to sigma
print 'vertical interpolation from standard z level to sigma'
dst_u_north = pyroms.remapping.z2roms(dst_uz[::-1, Mp-2:Mp, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(Mp-2,Mp))
dst_u_south = pyroms.remapping.z2roms(dst_uz[::-1, 0:2, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(0,2))
dst_u_west = pyroms.remapping.z2roms(dst_uz[::-1, 0:Mp, 0:2], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,2), jrange=(0,Mp))
dst_v_north = pyroms.remapping.z2roms(dst_vz[::-1, Mp-2:Mp, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(Mp-2,Mp))
dst_v_south = pyroms.remapping.z2roms(dst_vz[::-1, 0:2, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(0,2))
dst_v_west = pyroms.remapping.z2roms(dst_vz[::-1, 0:Mp, 0:2], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,2), jrange=(0,Mp))
# rotate u,v fields
src_angle = np.zeros(dst_grd.hgrid.angle_rho.shape)
dst_angle = dst_grd.hgrid.angle_rho
angle = dst_angle - src_angle
angle = np.tile(angle, (dst_grd.vgrid.N, 1, 1))
U_north = dst_u_north + dst_v_north*1j
eitheta_north = np.exp(-1j*angle[:,Mp-2:Mp, 0:Lp])
U_north = U_north * eitheta_north
dst_u_north = np.real(U_north)
dst_v_north = np.imag(U_north)
U_south = dst_u_south + dst_v_south*1j
eitheta_south = np.exp(-1j*angle[:,0:2, 0:Lp])
U_south = U_south * eitheta_south
dst_u_south = np.real(U_south)
dst_v_south = np.imag(U_south)
U_west = dst_u_west + dst_v_west*1j
eitheta_west = np.exp(-1j*angle[:,0:Mp, 0:2])
U_west = U_west * eitheta_west
dst_u_west = np.real(U_west)
dst_v_west = np.imag(U_west)
# move back to u,v points
dst_u_north = 0.5 * np.squeeze(dst_u_north[:,-1,:-1] + dst_u_north[:,-1,1:])
dst_v_north = 0.5 * np.squeeze(dst_v_north[:,:-1,:] + dst_v_north[:,1:,:])
dst_u_south = 0.5 * np.squeeze(dst_u_south[:,0,:-1] + dst_u_south[:,0,1:])
dst_v_south = 0.5 * np.squeeze(dst_v_south[:,:-1,:] + dst_v_south[:,1:,:])
dst_u_west = 0.5 * np.squeeze(dst_u_west[:,:,:-1] + dst_u_west[:,:,1:])
dst_v_west = 0.5 * np.squeeze(dst_v_west[:,:-1,0] + dst_v_west[:,1:,0])
# spval
idxu_north = np.where(dst_grd.hgrid.mask_u[-1,:] == 0)
idxv_north = np.where(dst_grd.hgrid.mask_v[-1,:] == 0)
idxu_south = np.where(dst_grd.hgrid.mask_u[0,:] == 0)
idxv_south = np.where(dst_grd.hgrid.mask_v[0,:] == 0)
idxu_west = np.where(dst_grd.hgrid.mask_u[:,0] == 0)
idxv_west = np.where(dst_grd.hgrid.mask_v[:,0] == 0)
for n in range(dst_grd.vgrid.N):
dst_u_north[n, idxu_north[0]] = spval
dst_v_north[n, idxv_north[0]] = spval
dst_u_south[n, idxu_south[0]] = spval
dst_v_south[n, idxv_south[0]] = spval
dst_u_west[n, idxu_west[0]] = spval
dst_v_west[n, idxv_west[0]] = spval
# compute depth average velocity ubar and vbar
# get z at the right position
z_u_north = 0.5 * (dst_grd.vgrid.z_w[0,:,-1,:-1] + dst_grd.vgrid.z_w[0,:,-1,1:])
z_v_north = 0.5 * (dst_grd.vgrid.z_w[0,:,-1,:] + dst_grd.vgrid.z_w[0,:,-2,:])
z_u_south = 0.5 * (dst_grd.vgrid.z_w[0,:,0,:-1] + dst_grd.vgrid.z_w[0,:,0,1:])
z_v_south = 0.5 * (dst_grd.vgrid.z_w[0,:,0,:] + dst_grd.vgrid.z_w[0,:,1,:])
z_u_west = 0.5 * (dst_grd.vgrid.z_w[0,:,:,0] + dst_grd.vgrid.z_w[0,:,:,1])
z_v_west = 0.5 * (dst_grd.vgrid.z_w[0,:,:-1,0] + dst_grd.vgrid.z_w[0,:,1:,0])
dst_ubar_north = np.zeros(dst_u_north.shape[1])
dst_ubar_south = np.zeros(dst_u_south.shape[1])
dst_ubar_west = np.zeros(dst_u_west.shape[1])
dst_vbar_north = np.zeros(dst_v_north.shape[1])
dst_vbar_south = np.zeros(dst_v_south.shape[1])
dst_vbar_west = np.zeros(dst_v_west.shape[1])
for i in range(dst_u_north.shape[1]):
dst_ubar_north[i] = (dst_u_north[:,i] * np.diff(z_u_north[:,i])).sum() / -z_u_north[0,i]
dst_ubar_south[i] = (dst_u_south[:,i] * np.diff(z_u_south[:,i])).sum() / -z_u_south[0,i]
for i in range(dst_v_north.shape[1]):
dst_vbar_north[i] = (dst_v_north[:,i] * np.diff(z_v_north[:,i])).sum() / -z_v_north[0,i]
dst_vbar_south[i] = (dst_v_south[:,i] * np.diff(z_v_south[:,i])).sum() / -z_v_south[0,i]
for j in range(dst_u_west.shape[1]):
dst_ubar_west[j] = (dst_u_west[:,j] * np.diff(z_u_west[:,j])).sum() / -z_u_west[0,j]
for j in range(dst_v_west.shape[1]):
dst_vbar_west[j] = (dst_v_west[:,j] * np.diff(z_v_west[:,j])).sum() / -z_v_west[0,j]
#mask
dst_ubar_north = np.ma.masked_where(dst_grd.hgrid.mask_u[-1,:] == 0, dst_ubar_north)
dst_ubar_south = np.ma.masked_where(dst_grd.hgrid.mask_u[0,:] == 0, dst_ubar_south)
dst_ubar_west = np.ma.masked_where(dst_grd.hgrid.mask_u[:,0] == 0, dst_ubar_west)
dst_vbar_north = np.ma.masked_where(dst_grd.hgrid.mask_v[-1,:] == 0, dst_vbar_north)
dst_vbar_south = np.ma.masked_where(dst_grd.hgrid.mask_v[0,:] == 0, dst_vbar_south)
dst_vbar_west = np.ma.masked_where(dst_grd.hgrid.mask_v[:,0] == 0, dst_vbar_west)
# write data in destination file
print 'write data in destination file'
ncu.variables['ocean_time'][0] = time
ncu.variables['u_north'][0] = dst_u_north
ncu.variables['u_south'][0] = dst_u_south
ncu.variables['u_west'][0] = dst_u_west
ncu.variables['ubar_north'][0] = dst_ubar_north
ncu.variables['ubar_south'][0] = dst_ubar_south
ncu.variables['ubar_west'][0] = dst_ubar_west
ncv.variables['ocean_time'][0] = time
ncv.variables['v_north'][0] = dst_v_north
ncv.variables['v_south'][0] = dst_v_south
ncv.variables['v_west'][0] = dst_v_west
ncv.variables['vbar_north'][0] = dst_vbar_north
ncv.variables['vbar_south'][0] = dst_vbar_south
ncv.variables['vbar_west'][0] = dst_vbar_west
# close file
ncu.close()
ncv.close()
cdfuv.close()
| kshedstrom/pyroms | examples/CCS1_SODA3.3.1/Boundary/remap_bdry_uv.py | Python | bsd-3-clause | 14,025 | [
"NetCDF"
] | 0f6ee67988e6e0856245bcf19e9b66d79d053dc6714d1dd902682ea4a9c92c97 |
# IPython log file
import numpy as np
from IPython import get_ipython
from numba import cuda
cuda.detect()
n = int(1e6)
a = np.random.rand(n)
b = np.random.rand(n)
out = np.zeros_like(a)
@cuda.jit
def add_gpu(a, b, out):
i = cuda.threadIdx.x
if i < a.size:
out[i] = a[i] + b[i]
get_ipython().magic('timeit -r 1 -n 1 add_cuda[1, n](a, b, out)')
get_ipython().magic('timeit -r 1 -n 1 add_gpu[1, n](a, b, out)')
n = int(1e6)
n = int(1e3)
a = np.random.rand(n)
b = np.random.rand(n)
out = np.zeros_like(a)
get_ipython().magic('timeit -r 1 -n 1 add_gpu[1, n](a, b, out)')
get_ipython().magic('timeit -r 1 -n 1 add_gpu[1, n](a, b, out)')
np.all(out == a+b)
get_ipython().magic('timeit -r 1 -n 1 a + b')
cuda.blockDim
from numba import vectorize
import math
@vectorize(['float32(float32, float32, float32)',
'float64(float64, float64, float64)'],
target='cuda')
def cu_discriminant(a, b, c):
return math.sqrt(b**2 - 4*a*c)
N = 1e4
t = np.float32
get_ipython().magic('pinfo np.random.sample')
A = np.random.sample(N).astype(t)
N = int(1e4)
A = np.random.sample(N).astype(t)
B = np.random.sample(N).astype(t) + 10
C = np.random.sample(N).astype(t)
get_ipython().magic('timeit -r 1 -n 1 D = cu_discriminant(A, B, C)')
get_ipython().magic('timeit -r 1 -n 1 D = cu_discriminant(A, B, C)')
get_ipython().magic('timeit -r 1 -n 1 np.sqrt(B**2 - 4*A*C)')
N = int(1e6)
A = np.random.sample(N).astype(t)
B = np.random.sample(N).astype(t) + 10
C = np.random.sample(N).astype(t)
get_ipython().magic('timeit -r 1 -n 1 D = cu_discriminant(A, B, C)')
get_ipython().magic('timeit -r 1 -n 1 np.sqrt(B**2 - 4*A*C)')
N = int(1e8)
A = np.random.sample(N).astype(t)
B = np.random.sample(N).astype(t) + 10
C = np.random.sample(N).astype(t)
get_ipython().magic('timeit -r 1 -n 1 np.sqrt(B**2 - 4*A*C)')
get_ipython().magic('timeit -r 1 -n 1 D = cu_discriminant(A, B, C)')
N = int(1e8)
N = int(1e7)
A = np.random.sample(N).astype(t)
B = np.random.sample(N).astype(t) + 10
C = np.random.sample(N).astype(t)
get_ipython().magic('timeit -r 1 -n 1 D = cu_discriminant(A, B, C)')
get_ipython().magic('timeit -r 1 -n 1 np.sqrt(B**2 - 4*A*C)')
get_ipython().magic('timeit -r 1 -n 1 D = cu_discriminant(A, B, C)')
get_ipython().magic('timeit -r 1 -n 1 D = cu_discriminant(A, B, C)')
@vectorize(['float32(float32, float32, float32)',
'float64(float64, float64, float64)'],
target='cuda')
def cu_crazy_func(a, b, c):
return math.sqrt(b**2 - 4*math.log(a)*math.sin(c)) + b
get_ipython().magic('timeit -r 1 -n 1 np.sqrt(B**2 - 4*np.log(A)*np.sin(C)) + B')
get_ipython().magic('timeit -r 1 -n 1 D = cu_crazy_func(A, B, C)')
get_ipython().magic('timeit -r 1 -n 1 D = cu_crazy_func(A, B, C)')
cuda.detect()
cuda.config()
from skimage import util
get_ipython().magic('pinfo util.apply_parallel')
image = np.random.rand(4096, 4096)
from skimage import filters
get_ipython().magic('timeit g = util.apply_parallel(filters.gaussian, image,')
get_ipython().magic("timeit -r 1 -n 1 g = util.apply_parallel(filters.gaussian, image, chunks=1024, extra_arguments={'sigma': 5})")
import toolz as tz
gauss = tz.curry(filters.gaussian)
get_ipython().magic('timeit -r 1 -n 1 g = util.apply_parallel(gauss(sigma=5), image, chunks=1024)')
get_ipython().magic('timeit -r 1 -n 1 gauss(sigma=5)(image)')
| jni/useful-histories | numba-cuda.py | Python | bsd-3-clause | 3,324 | [
"Gaussian"
] | a2e9a4c893ffa128e740a93a309340ed9fac847b57705746984dd15311c13c12 |
"""
Calculate the average protein lengths from the blast output files.
"""
import os
import sys
import argparse
from roblib import bcolors, median
def av_protein_lengths(sample, blastfile, fractionout, summaryout, searchtype):
"""
Calculate the average length of the best hit of all the proteins
"""
q = {}
av = []
sys.stderr.write(f"{bcolors.GREEN}Average protein lengths for {sample} and {searchtype}{bcolors.ENDC}\n")
with open(blastfile, 'r') as f:
with open(fractionout, 'w') as out:
for l in f:
p = l.strip().split("\t")
if p[0] in q:
continue
q[p[0]] = int(p[12])/int(p[13])
av.append(q[p[0]])
out.write(f"{p[0]}\t{q[p[0]]}\n")
with open(summaryout, 'w') as out:
out.write(f"{sample}\tAverage {searchtype} protein lengths\t")
out.write("[num orfs, median proportional length, average proportional length]\t")
out.write(f"{len(av)}\t{median(av)}\t{sum(av)/len(av)}\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=" ")
parser.add_argument('-s', help='sample name used in output', required=True)
parser.add_argument('-b', help='blast m8 file', required=True)
parser.add_argument('-f', help='fractions output file', required=True)
parser.add_argument('-o', help='summary output file', required=True)
parser.add_argument('-t', help='search type (e.g. phage, bacteria) (used in output)', required=True)
args = parser.parse_args()
av_protein_lengths(args.s, args.b, args.f, args.o, args.t) | linsalrob/EdwardsLab | phage/phage_quality_assessment_scripts/av_protein_lengths.py | Python | mit | 1,633 | [
"BLAST"
] | 2a267e62a29d052131a40581c0fc73d4b3fc6dc333f68be055eae9cd8c69286a |
# $Id$
#
# Copyright (C) 2003-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""
unit testing code for the Smiles file handling stuff
"""
import unittest
from rdkit import Chem
from rdkit.six import next
from rdkit import RDLogger
class TestCase(unittest.TestCase):
def setUp(self):
self.smis = ['CC', 'CCC', 'CCCCC', 'CCCCCC', 'CCCCCCC', 'CC', 'CCCCOC']
self.nMolecules = len(self.smis)
def tearDown(self):
RDLogger.EnableLog('rdApp.error')
def assertMolecule(self, mol, i, msg=''):
""" Assert that we have a valid molecule """
self.assertIsNotNone(mol, '{0}read {1} failed'.format(msg, i))
self.assertGreater(mol.GetNumAtoms(), 0, '{0}no atoms in mol {1}'.format(msg, i))
def test_SmilesReaderIndex(self):
# tests lazy reads
supp = Chem.SmilesMolSupplierFromText('\n'.join(self.smis), ',', 0, -1, 0)
for i in range(4):
self.assertMolecule(next(supp), i)
i = len(supp) - 1
self.assertMolecule(supp[i], i)
# Use in a list comprehension
ms = [Chem.MolToSmiles(mol) for mol in supp]
self.assertEqual(ms, self.smis)
self.assertEqual(len(supp), self.nMolecules, 'bad supplier length')
# Despite iterating through the whole supplier, we can still access by index
i = self.nMolecules - 3
self.assertMolecule(supp[i - 1], i, msg='back index: ')
with self.assertRaises(IndexError):
_ = supp[self.nMolecules] # out of bound read must fail
# and we can access with negative numbers
mol1 = supp[len(supp) - 1]
mol2 = supp[-1]
self.assertEqual(Chem.MolToSmiles(mol1), Chem.MolToSmiles(mol2))
def test_SmilesReaderIterator(self):
# tests lazy reads using the iterator interface "
supp = Chem.SmilesMolSupplierFromText('\n'.join(self.smis), ',', 0, -1, 0)
nDone = 0
for mol in supp:
self.assertMolecule(mol, nDone)
nDone += 1
self.assertEqual(nDone, self.nMolecules, 'bad number of molecules')
self.assertEqual(len(supp), self.nMolecules, 'bad supplier length')
# Despite iterating through the whole supplier, we can still access by index
i = self.nMolecules - 3
self.assertMolecule(supp[i - 1], i, msg='back index: ')
with self.assertRaises(IndexError):
_ = supp[self.nMolecules] # out of bound read must not fail
def test_SmilesReaderBoundaryConditions(self):
# Suppress the error message due to the incorrect smiles
RDLogger.DisableLog('rdApp.error')
smis = ['CC', 'CCOC', 'fail', 'CCO']
supp = Chem.SmilesMolSupplierFromText('\n'.join(smis), ',', 0, -1, 0)
self.assertEqual(len(supp), 4)
self.assertIsNone(supp[2])
self.assertIsNotNone(supp[3])
supp = Chem.SmilesMolSupplierFromText('\n'.join(smis), ',', 0, -1, 0)
self.assertIsNone(supp[2])
self.assertIsNotNone(supp[3])
self.assertEqual(len(supp), 4)
with self.assertRaises(IndexError):
supp[4]
supp = Chem.SmilesMolSupplierFromText('\n'.join(smis), ',', 0, -1, 0)
self.assertEqual(len(supp), 4)
self.assertIsNotNone(supp[3])
with self.assertRaises(IndexError):
supp[4]
supp = Chem.SmilesMolSupplierFromText('\n'.join(smis), ',', 0, -1, 0)
with self.assertRaises(IndexError):
supp[4]
self.assertEqual(len(supp), 4)
self.assertIsNotNone(supp[3])
if __name__ == '__main__': # pragma: nocover
unittest.main()
| rvianello/rdkit | rdkit/Chem/Suppliers/UnitTestSmilesMolSupplier.py | Python | bsd-3-clause | 3,573 | [
"RDKit"
] | 714ef9c024f7e095950df42e12ca7ea7fc411182af2465d6fd65d94380b6bfad |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file defines RotatePictureExplorer, an explorer for
PictureSensor.
"""
from nupicvision.regions.PictureSensor import PictureSensor
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=
# RotatePictureExplorer
class RotatePictureExplorer(PictureSensor.PictureExplorer):
@classmethod
def queryRelevantParams(klass):
"""
Returns a sequence of parameter names that are relevant to
the operation of the explorer.
May be extended or overridden by sub-classes as appropriate.
"""
return super(RotatePictureExplorer, klass).queryRelevantParams() + \
( 'radialLength', 'radialStep' )
def initSequence(self, state, params):
self._presentNextRotation(state, params)
def updateSequence(self, state, params):
self._presentNextRotation(state, params)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Internal helper method(s)
def _presentNextRotation(self, state, params):
"""
We will visit each grid position. For each grid position,
we rotate the object in 2D
"""
# Compute iteration indices
numRotations = 1 + int((params['maxAngularPosn'] - params['minAngularPosn'])
/ params['minAngularVelocity'])
edgeLen = 2 * params['radialLength'] + 1
numItersPerCat = edgeLen * edgeLen * numRotations
numCats = self._getNumCategories()
numIters = numItersPerCat * numCats
catIndex = self._getIterCount() // numItersPerCat
index = self._getIterCount() % numItersPerCat
blockIndex = index / numRotations
rotationIndex = index % numRotations
# Compute position within onion block
posnX = ((blockIndex % edgeLen) - params['radialLength']) * params['radialStep']
posnY = ((blockIndex // edgeLen) - params['radialLength']) * params['radialStep']
# Compute rotation angle
angularPosn = params['maxAngularPosn'] - params['minAngularVelocity'] * rotationIndex
# Update state
state['posnX'] = posnX
state['posnY'] = posnY
state['velocityX'] = 0
state['velocityY'] = 0
state['angularVelocity'] = params['minAngularVelocity']
state['angularPosn'] = angularPosn
state['catIndex'] = catIndex
| neuroidss/nupic.vision | nupicvision/regions/PictureSensorExplorers/rotate_block.py | Python | gpl-3.0 | 3,166 | [
"VisIt"
] | f7c310f4a49324b981de43dc8ef27ab719fbbdc144c63ea7b0011df0891a126a |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft = python sts = 4 ts = 4 sw = 4 et:
"""Afni preprocessing interfaces
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
import os
import os.path as op
import re
import numpy as np
from .base import (AFNICommandBase, AFNICommand, AFNICommandInputSpec, AFNICommandOutputSpec,
Info, no_afni)
from ..base import (CommandLineInputSpec, CommandLine, Directory, TraitedSpec,
traits, isdefined, File, InputMultiPath, Undefined)
from ...external.six import string_types
from ...utils.filemanip import (load_json, save_json, split_filename)
class BlurToFWHMInputSpec(AFNICommandInputSpec):
in_file = File(desc='The dataset that will be smoothed', argstr='-input %s', mandatory=True, exists=True)
automask = traits.Bool(desc='Create an automask from the input dataset.', argstr='-automask')
fwhm = traits.Float(desc='Blur until the 3D FWHM reaches this value (in mm)', argstr='-FWHM %f')
fwhmxy = traits.Float(desc='Blur until the 2D (x,y)-plane FWHM reaches this value (in mm)', argstr='-FWHMxy %f')
blurmaster = File(desc='The dataset whose smoothness controls the process.', argstr='-blurmaster %s', exists=True)
mask = File(desc='Mask dataset, if desired. Voxels NOT in mask will be set to zero in output.', argstr='-blurmaster %s', exists=True)
class BlurToFWHM(AFNICommand):
"""Blurs a 'master' dataset until it reaches a specified FWHM smoothness (approximately).
For complete details, see the `to3d Documentation
<https://afni.nimh.nih.gov/pub/dist/doc/program_help/3dBlurToFWHM.html>`_
Examples
========
>>> from nipype.interfaces import afni
>>> blur = afni.preprocess.BlurToFWHM()
>>> blur.inputs.in_file = 'epi.nii'
>>> blur.inputs.fwhm = 2.5
>>> blur.cmdline #doctest: +ELLIPSIS
'3dBlurToFWHM -FWHM 2.500000 -input epi.nii -prefix epi_afni'
"""
_cmd = '3dBlurToFWHM'
input_spec = BlurToFWHMInputSpec
output_spec = AFNICommandOutputSpec
class To3DInputSpec(AFNICommandInputSpec):
out_file = File(name_template="%s", desc='output image file name',
argstr='-prefix %s', name_source=["in_folder"])
in_folder = Directory(desc='folder with DICOM images to convert',
argstr='%s/*.dcm',
position=-1,
mandatory=True,
exists=True)
filetype = traits.Enum('spgr', 'fse', 'epan', 'anat', 'ct', 'spct',
'pet', 'mra', 'bmap', 'diff',
'omri', 'abuc', 'fim', 'fith', 'fico', 'fitt', 'fift',
'fizt', 'fict', 'fibt',
'fibn', 'figt', 'fipt',
'fbuc', argstr='-%s', desc='type of datafile being converted')
skipoutliers = traits.Bool(desc='skip the outliers check',
argstr='-skip_outliers')
assumemosaic = traits.Bool(desc='assume that Siemens image is mosaic',
argstr='-assume_dicom_mosaic')
datatype = traits.Enum('short', 'float', 'byte', 'complex',
desc='set output file datatype', argstr='-datum %s')
funcparams = traits.Str(desc='parameters for functional data',
argstr='-time:zt %s alt+z2')
class To3D(AFNICommand):
"""Create a 3D dataset from 2D image files using AFNI to3d command
For complete details, see the `to3d Documentation
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/to3d.html>`_
Examples
========
>>> from nipype.interfaces import afni
>>> To3D = afni.To3D()
>>> To3D.inputs.datatype = 'float'
>>> To3D.inputs.in_folder = '.'
>>> To3D.inputs.out_file = 'dicomdir.nii'
>>> To3D.inputs.filetype = "anat"
>>> To3D.cmdline #doctest: +ELLIPSIS
'to3d -datum float -anat -prefix dicomdir.nii ./*.dcm'
>>> res = To3D.run() #doctest: +SKIP
"""
_cmd = 'to3d'
input_spec = To3DInputSpec
output_spec = AFNICommandOutputSpec
class TShiftInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dTShift',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_tshift", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
tr = traits.Str(desc='manually set the TR' +
'You can attach suffix "s" for seconds or "ms" for milliseconds.',
argstr='-TR %s')
tzero = traits.Float(desc='align each slice to given time offset',
argstr='-tzero %s',
xor=['tslice'])
tslice = traits.Int(desc='align each slice to time offset of given slice',
argstr='-slice %s',
xor=['tzero'])
ignore = traits.Int(desc='ignore the first set of points specified',
argstr='-ignore %s')
interp = traits.Enum(('Fourier', 'linear', 'cubic', 'quintic', 'heptic'),
desc='different interpolation methods (see 3dTShift for details)' +
' default = Fourier', argstr='-%s')
tpattern = traits.Str(desc='use specified slice time pattern rather than one in header',
argstr='-tpattern %s')
rlt = traits.Bool(desc='Before shifting, remove the mean and linear trend',
argstr="-rlt")
rltplus = traits.Bool(desc='Before shifting,' +
' remove the mean and linear trend and ' +
'later put back the mean',
argstr="-rlt+")
class TShift(AFNICommand):
"""Shifts voxel time series from input
so that seperate slices are aligned to the same
temporal origin
For complete details, see the `3dTshift Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTshift.html>
Examples
========
>>> from nipype.interfaces import afni as afni
>>> tshift = afni.TShift()
>>> tshift.inputs.in_file = 'functional.nii'
>>> tshift.inputs.tpattern = 'alt+z'
>>> tshift.inputs.tzero = 0.0
>>> tshift.cmdline #doctest:
'3dTshift -prefix functional_tshift -tpattern alt+z -tzero 0.0 functional.nii'
>>> res = tshift.run() # doctest: +SKIP
"""
_cmd = '3dTshift'
input_spec = TShiftInputSpec
output_spec = AFNICommandOutputSpec
class RefitInputSpec(CommandLineInputSpec):
in_file = File(desc='input file to 3drefit',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=True)
deoblique = traits.Bool(desc='replace current transformation' +
' matrix with cardinal matrix',
argstr='-deoblique')
xorigin = traits.Str(desc='x distance for edge voxel offset',
argstr='-xorigin %s')
yorigin = traits.Str(desc='y distance for edge voxel offset',
argstr='-yorigin %s')
zorigin = traits.Str(desc='z distance for edge voxel offset',
argstr='-zorigin %s')
xdel = traits.Float(desc='new x voxel dimension in mm',
argstr='-xdel %f')
ydel = traits.Float(desc='new y voxel dimension in mm',
argstr='-ydel %f')
zdel = traits.Float(desc='new z voxel dimension in mm',
argstr='-zdel %f')
space = traits.Enum('TLRC', 'MNI', 'ORIG',
argstr='-space %s',
desc='Associates the dataset with a specific' +
' template type, e.g. TLRC, MNI, ORIG')
class Refit(AFNICommandBase):
"""Changes some of the information inside a 3D dataset's header
For complete details, see the `3drefit Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3drefit.html>
Examples
========
>>> from nipype.interfaces import afni as afni
>>> refit = afni.Refit()
>>> refit.inputs.in_file = 'structural.nii'
>>> refit.inputs.deoblique = True
>>> refit.cmdline
'3drefit -deoblique structural.nii'
>>> res = refit.run() # doctest: +SKIP
"""
_cmd = '3drefit'
input_spec = RefitInputSpec
output_spec = AFNICommandOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["out_file"] = os.path.abspath(self.inputs.in_file)
return outputs
class WarpInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dWarp',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_warp", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
tta2mni = traits.Bool(desc='transform dataset from Talairach to MNI152',
argstr='-tta2mni')
mni2tta = traits.Bool(desc='transform dataset from MNI152 to Talaraich',
argstr='-mni2tta')
matparent = File(desc="apply transformation from 3dWarpDrive",
argstr="-matparent %s",
exists=True)
deoblique = traits.Bool(desc='transform dataset from oblique to cardinal',
argstr='-deoblique')
interp = traits.Enum(('linear', 'cubic', 'NN', 'quintic'),
desc='spatial interpolation methods [default = linear]',
argstr='-%s')
gridset = File(desc="copy grid of specified dataset",
argstr="-gridset %s",
exists=True)
newgrid = traits.Float(desc="specify grid of this size (mm)",
argstr="-newgrid %f")
zpad = traits.Int(desc="pad input dataset with N planes" +
" of zero on all sides.",
argstr="-zpad %d")
class Warp(AFNICommand):
"""Use 3dWarp for spatially transforming a dataset
For complete details, see the `3dWarp Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dWarp.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> warp = afni.Warp()
>>> warp.inputs.in_file = 'structural.nii'
>>> warp.inputs.deoblique = True
>>> warp.inputs.out_file = "trans.nii.gz"
>>> warp.cmdline
'3dWarp -deoblique -prefix trans.nii.gz structural.nii'
>>> warp_2 = afni.Warp()
>>> warp_2.inputs.in_file = 'structural.nii'
>>> warp_2.inputs.newgrid = 1.0
>>> warp_2.inputs.out_file = "trans.nii.gz"
>>> warp_2.cmdline
'3dWarp -newgrid 1.000000 -prefix trans.nii.gz structural.nii'
"""
_cmd = '3dWarp'
input_spec = WarpInputSpec
output_spec = AFNICommandOutputSpec
class ResampleInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dresample',
argstr='-inset %s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_resample", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
orientation = traits.Str(desc='new orientation code',
argstr='-orient %s')
resample_mode = traits.Enum('NN', 'Li', 'Cu', 'Bk',
argstr='-rmode %s',
desc="resampling method from set {'NN', 'Li', 'Cu', 'Bk'}. These are for 'Nearest Neighbor', 'Linear', 'Cubic' and 'Blocky' interpolation, respectively. Default is NN.")
voxel_size = traits.Tuple(*[traits.Float()] * 3,
argstr='-dxyz %f %f %f',
desc="resample to new dx, dy and dz")
master = traits.File(argstr='-master %s',
desc='align dataset grid to a reference file')
class Resample(AFNICommand):
"""Resample or reorient an image using AFNI 3dresample command
For complete details, see the `3dresample Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dresample.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> resample = afni.Resample()
>>> resample.inputs.in_file = 'functional.nii'
>>> resample.inputs.orientation= 'RPI'
>>> resample.inputs.outputtype = "NIFTI"
>>> resample.cmdline
'3dresample -orient RPI -prefix functional_resample.nii -inset functional.nii'
>>> res = resample.run() # doctest: +SKIP
"""
_cmd = '3dresample'
input_spec = ResampleInputSpec
output_spec = AFNICommandOutputSpec
class AutoTcorrelateInputSpec(AFNICommandInputSpec):
in_file = File(desc='timeseries x space (volume or surface) file',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
polort = traits.Int(
desc='Remove polynomical trend of order m or -1 for no detrending',
argstr="-polort %d")
eta2 = traits.Bool(desc='eta^2 similarity',
argstr="-eta2")
mask = File(exists=True, desc="mask of voxels",
argstr="-mask %s")
mask_only_targets = traits.Bool(desc="use mask only on targets voxels",
argstr="-mask_only_targets",
xor=['mask_source'])
mask_source = File(exists=True,
desc="mask for source voxels",
argstr="-mask_source %s",
xor=['mask_only_targets'])
out_file = File(name_template="%s_similarity_matrix.1D", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
class AutoTcorrelate(AFNICommand):
"""Computes the correlation coefficient between the time series of each
pair of voxels in the input dataset, and stores the output into a
new anatomical bucket dataset [scaled to shorts to save memory space].
Examples
========
>>> from nipype.interfaces import afni as afni
>>> corr = afni.AutoTcorrelate()
>>> corr.inputs.in_file = 'functional.nii'
>>> corr.inputs.polort = -1
>>> corr.inputs.eta2 = True
>>> corr.inputs.mask = 'mask.nii'
>>> corr.inputs.mask_only_targets = True
>>> corr.cmdline # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
'3dAutoTcorrelate -eta2 -mask mask.nii -mask_only_targets -prefix functional_similarity_matrix.1D -polort -1 functional.nii'
>>> res = corr.run() # doctest: +SKIP
"""
input_spec = AutoTcorrelateInputSpec
output_spec = AFNICommandOutputSpec
_cmd = '3dAutoTcorrelate'
def _overload_extension(self, value, name=None):
path, base, ext = split_filename(value)
if ext.lower() not in [".1d", ".nii.gz", ".nii"]:
ext = ext + ".1D"
return os.path.join(path, base + ext)
class TStatInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dTstat',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_tstat", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
mask = File(desc='mask file',
argstr='-mask %s',
exists=True)
options = traits.Str(desc='selected statistical output',
argstr='%s')
class TStat(AFNICommand):
"""Compute voxel-wise statistics using AFNI 3dTstat command
For complete details, see the `3dTstat Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTstat.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> tstat = afni.TStat()
>>> tstat.inputs.in_file = 'functional.nii'
>>> tstat.inputs.args= '-mean'
>>> tstat.inputs.out_file = "stats"
>>> tstat.cmdline
'3dTstat -mean -prefix stats functional.nii'
>>> res = tstat.run() # doctest: +SKIP
"""
_cmd = '3dTstat'
input_spec = TStatInputSpec
output_spec = AFNICommandOutputSpec
class DetrendInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dDetrend',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_detrend", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
class Detrend(AFNICommand):
"""This program removes components from voxel time series using
linear least squares
For complete details, see the `3dDetrend Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dDetrend.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> detrend = afni.Detrend()
>>> detrend.inputs.in_file = 'functional.nii'
>>> detrend.inputs.args = '-polort 2'
>>> detrend.inputs.outputtype = "AFNI"
>>> detrend.cmdline
'3dDetrend -polort 2 -prefix functional_detrend functional.nii'
>>> res = detrend.run() # doctest: +SKIP
"""
_cmd = '3dDetrend'
input_spec = DetrendInputSpec
output_spec = AFNICommandOutputSpec
class DespikeInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dDespike',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_despike", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
class Despike(AFNICommand):
"""Removes 'spikes' from the 3D+time input dataset
For complete details, see the `3dDespike Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dDespike.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> despike = afni.Despike()
>>> despike.inputs.in_file = 'functional.nii'
>>> despike.cmdline
'3dDespike -prefix functional_despike functional.nii'
>>> res = despike.run() # doctest: +SKIP
"""
_cmd = '3dDespike'
input_spec = DespikeInputSpec
output_spec = AFNICommandOutputSpec
class CentralityInputSpec(AFNICommandInputSpec):
"""Common input spec class for all centrality-related commmands
"""
mask = File(desc='mask file to mask input data',
argstr="-mask %s",
exists=True)
thresh = traits.Float(desc='threshold to exclude connections where corr <= thresh',
argstr='-thresh %f')
polort = traits.Int(desc='', argstr='-polort %d')
autoclip = traits.Bool(desc='Clip off low-intensity regions in the dataset',
argstr='-autoclip')
automask = traits.Bool(desc='Mask the dataset to target brain-only voxels',
argstr='-automask')
class DegreeCentralityInputSpec(CentralityInputSpec):
"""DegreeCentrality inputspec
"""
in_file = File(desc='input file to 3dDegreeCentrality',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
sparsity = traits.Float(desc='only take the top percent of connections',
argstr='-sparsity %f')
oned_file = traits.Str(desc='output filepath to text dump of correlation matrix',
argstr='-out1D %s')
class DegreeCentralityOutputSpec(AFNICommandOutputSpec):
"""DegreeCentrality outputspec
"""
oned_file = File(desc='The text output of the similarity matrix computed'\
'after thresholding with one-dimensional and '\
'ijk voxel indices, correlations, image extents, '\
'and affine matrix')
class DegreeCentrality(AFNICommand):
"""Performs degree centrality on a dataset using a given maskfile
via 3dDegreeCentrality
For complete details, see the `3dDegreeCentrality Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dDegreeCentrality.html>
Examples
========
>>> from nipype.interfaces import afni as afni
>>> degree = afni.DegreeCentrality()
>>> degree.inputs.in_file = 'functional.nii'
>>> degree.inputs.mask = 'mask.nii'
>>> degree.inputs.sparsity = 1 # keep the top one percent of connections
>>> degree.inputs.out_file = 'out.nii'
>>> degree.cmdline
'3dDegreeCentrality -mask mask.nii -prefix out.nii -sparsity 1.000000 functional.nii'
>>> res = degree.run() # doctest: +SKIP
"""
_cmd = '3dDegreeCentrality'
input_spec = DegreeCentralityInputSpec
output_spec = DegreeCentralityOutputSpec
# Re-define generated inputs
def _list_outputs(self):
# Import packages
import os
# Update outputs dictionary if oned file is defined
outputs = super(DegreeCentrality, self)._list_outputs()
if self.inputs.oned_file:
outputs['oned_file'] = os.path.abspath(self.inputs.oned_file)
return outputs
class ECMInputSpec(CentralityInputSpec):
"""ECM inputspec
"""
in_file = File(desc='input file to 3dECM',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
sparsity = traits.Float(desc='only take the top percent of connections',
argstr='-sparsity %f')
full = traits.Bool(desc='Full power method; enables thresholding; '\
'automatically selected if -thresh or -sparsity '\
'are set',
argstr='-full')
fecm = traits.Bool(desc='Fast centrality method; substantial speed '\
'increase but cannot accomodate thresholding; '\
'automatically selected if -thresh or -sparsity '\
'are not set',
argstr='-fecm')
shift = traits.Float(desc='shift correlation coefficients in similarity '\
'matrix to enforce non-negativity, s >= 0.0; '\
'default = 0.0 for -full, 1.0 for -fecm',
argstr='-shift %f')
scale = traits.Float(desc='scale correlation coefficients in similarity '\
'matrix to after shifting, x >= 0.0; '\
'default = 1.0 for -full, 0.5 for -fecm',
argstr='-scale %f')
eps = traits.Float(desc='sets the stopping criterion for the power '\
'iteration; l2|v_old - v_new| < eps*|v_old|; '\
'default = 0.001',
argstr='-eps %f')
max_iter = traits.Int(desc='sets the maximum number of iterations to use '\
'in the power iteration; default = 1000',
argstr='-max_iter %d')
memory = traits.Float(desc='Limit memory consumption on system by setting '\
'the amount of GB to limit the algorithm to; '\
'default = 2GB',
argstr='-memory %f')
class ECM(AFNICommand):
"""Performs degree centrality on a dataset using a given maskfile
via the 3dLFCD command
For complete details, see the `3dECM Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dECM.html>
Examples
========
>>> from nipype.interfaces import afni as afni
>>> ecm = afni.ECM()
>>> ecm.inputs.in_file = 'functional.nii'
>>> ecm.inputs.mask = 'mask.nii'
>>> ecm.inputs.sparsity = 0.1 # keep top 0.1% of connections
>>> ecm.inputs.out_file = 'out.nii'
>>> ecm.cmdline
'3dECM -mask mask.nii -prefix out.nii -sparsity 0.100000 functional.nii'
>>> res = ecm.run() # doctest: +SKIP
"""
_cmd = '3dECM'
input_spec = ECMInputSpec
output_spec = AFNICommandOutputSpec
class LFCDInputSpec(CentralityInputSpec):
"""LFCD inputspec
"""
in_file = File(desc='input file to 3dLFCD',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
class LFCD(AFNICommand):
"""Performs degree centrality on a dataset using a given maskfile
via the 3dLFCD command
For complete details, see the `3dLFCD Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dLFCD.html>
Examples
========
>>> from nipype.interfaces import afni as afni
>>> lfcd = afni.LFCD()
>>> lfcd.inputs.in_file = 'functional.nii'
>>> lfcd.inputs.mask = 'mask.nii'
>>> lfcd.inputs.thresh = 0.8 # keep all connections with corr >= 0.8
>>> lfcd.inputs.out_file = 'out.nii'
>>> lfcd.cmdline
'3dLFCD -mask mask.nii -prefix out.nii -thresh 0.800000 functional.nii'
>>> res = lfcd.run() # doctest: +SKIP
"""
_cmd = '3dLFCD'
input_spec = LFCDInputSpec
output_spec = AFNICommandOutputSpec
class AutomaskInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dAutomask',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_mask", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
brain_file = File(name_template="%s_masked",
desc="output file from 3dAutomask",
argstr='-apply_prefix %s',
name_source="in_file")
clfrac = traits.Float(desc='sets the clip level fraction' +
' (must be 0.1-0.9). ' +
'A small value will tend to make the mask larger [default = 0.5].',
argstr="-clfrac %s")
dilate = traits.Int(desc='dilate the mask outwards',
argstr="-dilate %s")
erode = traits.Int(desc='erode the mask inwards',
argstr="-erode %s")
class AutomaskOutputSpec(TraitedSpec):
out_file = File(desc='mask file',
exists=True)
brain_file = File(desc='brain file (skull stripped)', exists=True)
class Automask(AFNICommand):
"""Create a brain-only mask of the image using AFNI 3dAutomask command
For complete details, see the `3dAutomask Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dAutomask.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> automask = afni.Automask()
>>> automask.inputs.in_file = 'functional.nii'
>>> automask.inputs.dilate = 1
>>> automask.inputs.outputtype = "NIFTI"
>>> automask.cmdline #doctest: +ELLIPSIS
'3dAutomask -apply_prefix functional_masked.nii -dilate 1 -prefix functional_mask.nii functional.nii'
>>> res = automask.run() # doctest: +SKIP
"""
_cmd = '3dAutomask'
input_spec = AutomaskInputSpec
output_spec = AutomaskOutputSpec
class VolregInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dvolreg',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_volreg", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
basefile = File(desc='base file for registration',
argstr='-base %s',
position=-6,
exists=True)
zpad = traits.Int(desc='Zeropad around the edges' +
' by \'n\' voxels during rotations',
argstr='-zpad %d',
position=-5)
md1d_file = File(name_template='%s_md.1D', desc='max displacement output file',
argstr='-maxdisp1D %s', name_source="in_file",
keep_extension=True, position=-4)
oned_file = File(name_template='%s.1D', desc='1D movement parameters output file',
argstr='-1Dfile %s',
name_source="in_file",
keep_extension=True)
verbose = traits.Bool(desc='more detailed description of the process',
argstr='-verbose')
timeshift = traits.Bool(desc='time shift to mean slice time offset',
argstr='-tshift 0')
copyorigin = traits.Bool(desc='copy base file origin coords to output',
argstr='-twodup')
oned_matrix_save = File(name_template='%s.aff12.1D',
desc='Save the matrix transformation',
argstr='-1Dmatrix_save %s',
keep_extension=True,
name_source="in_file")
class VolregOutputSpec(TraitedSpec):
out_file = File(desc='registered file', exists=True)
md1d_file = File(desc='max displacement info file', exists=True)
oned_file = File(desc='movement parameters info file', exists=True)
oned_matrix_save = File(desc='matrix transformation from base to input', exists=True)
class Volreg(AFNICommand):
"""Register input volumes to a base volume using AFNI 3dvolreg command
For complete details, see the `3dvolreg Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dvolreg.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> volreg = afni.Volreg()
>>> volreg.inputs.in_file = 'functional.nii'
>>> volreg.inputs.args = '-Fourier -twopass'
>>> volreg.inputs.zpad = 4
>>> volreg.inputs.outputtype = "NIFTI"
>>> volreg.cmdline #doctest: +ELLIPSIS
'3dvolreg -Fourier -twopass -1Dfile functional.1D -1Dmatrix_save functional.aff12.1D -prefix functional_volreg.nii -zpad 4 -maxdisp1D functional_md.1D functional.nii'
>>> res = volreg.run() # doctest: +SKIP
"""
_cmd = '3dvolreg'
input_spec = VolregInputSpec
output_spec = VolregOutputSpec
class MergeInputSpec(AFNICommandInputSpec):
in_files = InputMultiPath(
File(desc='input file to 3dmerge', exists=True),
argstr='%s',
position=-1,
mandatory=True,
copyfile=False)
out_file = File(name_template="%s_merge", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
doall = traits.Bool(desc='apply options to all sub-bricks in dataset',
argstr='-doall')
blurfwhm = traits.Int(desc='FWHM blur value (mm)',
argstr='-1blur_fwhm %d',
units='mm')
class Merge(AFNICommand):
"""Merge or edit volumes using AFNI 3dmerge command
For complete details, see the `3dmerge Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dmerge.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> merge = afni.Merge()
>>> merge.inputs.in_files = ['functional.nii', 'functional2.nii']
>>> merge.inputs.blurfwhm = 4
>>> merge.inputs.doall = True
>>> merge.inputs.out_file = 'e7.nii'
>>> res = merge.run() # doctest: +SKIP
"""
_cmd = '3dmerge'
input_spec = MergeInputSpec
output_spec = AFNICommandOutputSpec
class CopyInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dcopy',
argstr='%s',
position=-2,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_copy", desc='output image file name',
argstr='%s', position=-1, name_source="in_file")
class Copy(AFNICommand):
"""Copies an image of one type to an image of the same
or different type using 3dcopy command
For complete details, see the `3dcopy Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dcopy.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> copy3d = afni.Copy()
>>> copy3d.inputs.in_file = 'functional.nii'
>>> copy3d.cmdline
'3dcopy functional.nii functional_copy'
>>> from copy import deepcopy
>>> copy3d_2 = deepcopy(copy3d)
>>> copy3d_2.inputs.outputtype = 'NIFTI'
>>> copy3d_2.cmdline
'3dcopy functional.nii functional_copy.nii'
>>> copy3d_3 = deepcopy(copy3d)
>>> copy3d_3.inputs.outputtype = 'NIFTI_GZ'
>>> copy3d_3.cmdline
'3dcopy functional.nii functional_copy.nii.gz'
>>> copy3d_4 = deepcopy(copy3d)
>>> copy3d_4.inputs.out_file = 'new_func.nii'
>>> copy3d_4.cmdline
'3dcopy functional.nii new_func.nii'
"""
_cmd = '3dcopy'
input_spec = CopyInputSpec
output_spec = AFNICommandOutputSpec
class FourierInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dFourier',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_fourier", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
lowpass = traits.Float(desc='lowpass',
argstr='-lowpass %f',
position=0,
mandatory=True)
highpass = traits.Float(desc='highpass',
argstr='-highpass %f',
position=1,
mandatory=True)
class Fourier(AFNICommand):
"""Program to lowpass and/or highpass each voxel time series in a
dataset, via the FFT
For complete details, see the `3dFourier Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dfourier.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> fourier = afni.Fourier()
>>> fourier.inputs.in_file = 'functional.nii'
>>> fourier.inputs.args = '-retrend'
>>> fourier.inputs.highpass = 0.005
>>> fourier.inputs.lowpass = 0.1
>>> res = fourier.run() # doctest: +SKIP
"""
_cmd = '3dFourier'
input_spec = FourierInputSpec
output_spec = AFNICommandOutputSpec
class BandpassInputSpec(AFNICommandInputSpec):
in_file = File(
desc='input file to 3dBandpass',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(
name_template='%s_bp',
desc='output file from 3dBandpass',
argstr='-prefix %s',
position=1,
name_source='in_file',
genfile=True)
lowpass = traits.Float(
desc='lowpass',
argstr='%f',
position=-2,
mandatory=True)
highpass = traits.Float(
desc='highpass',
argstr='%f',
position=-3,
mandatory=True)
mask = File(
desc='mask file',
position=2,
argstr='-mask %s',
exists=True)
despike = traits.Bool(
argstr='-despike',
desc="""Despike each time series before other processing.
++ Hopefully, you don't actually need to do this,
which is why it is optional.""")
orthogonalize_file = InputMultiPath(
File(exists=True),
argstr="-ort %s",
desc="""Also orthogonalize input to columns in f.1D
++ Multiple '-ort' options are allowed.""")
orthogonalize_dset = File(
exists=True,
argstr="-dsort %s",
desc="""Orthogonalize each voxel to the corresponding
voxel time series in dataset 'fset', which must
have the same spatial and temporal grid structure
as the main input dataset.
++ At present, only one '-dsort' option is allowed.""")
no_detrend = traits.Bool(
argstr='-nodetrend',
desc="""Skip the quadratic detrending of the input that
occurs before the FFT-based bandpassing.
++ You would only want to do this if the dataset
had been detrended already in some other program.""")
tr = traits.Float(
argstr="-dt %f",
desc="set time step (TR) in sec [default=from dataset header]")
nfft = traits.Int(
argstr='-nfft %d',
desc="set the FFT length [must be a legal value]")
normalize = traits.Bool(
argstr='-norm',
desc="""Make all output time series have L2 norm = 1
++ i.e., sum of squares = 1""")
automask = traits.Bool(
argstr='-automask',
desc="Create a mask from the input dataset")
blur = traits.Float(
argstr='-blur %f',
desc="""Blur (inside the mask only) with a filter
width (FWHM) of 'fff' millimeters.""")
localPV = traits.Float(
argstr='-localPV %f',
desc="""Replace each vector by the local Principal Vector
(AKA first singular vector) from a neighborhood
of radius 'rrr' millimiters.
++ Note that the PV time series is L2 normalized.
++ This option is mostly for Bob Cox to have fun with.""")
notrans = traits.Bool(
argstr='-notrans',
desc="""Don't check for initial positive transients in the data:
++ The test is a little slow, so skipping it is OK,
if you KNOW the data time series are transient-free.""")
class Bandpass(AFNICommand):
"""Program to lowpass and/or highpass each voxel time series in a
dataset, offering more/different options than Fourier
For complete details, see the `3dBandpass Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dbandpass.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> from nipype.testing import example_data
>>> bandpass = afni.Bandpass()
>>> bandpass.inputs.in_file = example_data('functional.nii')
>>> bandpass.inputs.highpass = 0.005
>>> bandpass.inputs.lowpass = 0.1
>>> res = bandpass.run() # doctest: +SKIP
"""
_cmd = '3dBandpass'
input_spec = BandpassInputSpec
output_spec = AFNICommandOutputSpec
class ZCutUpInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dZcutup',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_zcupup", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
keep = traits.Str(desc='slice range to keep in output',
argstr='-keep %s')
class ZCutUp(AFNICommand):
"""Cut z-slices from a volume using AFNI 3dZcutup command
For complete details, see the `3dZcutup Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dZcutup.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> zcutup = afni.ZCutUp()
>>> zcutup.inputs.in_file = 'functional.nii'
>>> zcutup.inputs.out_file = 'functional_zcutup.nii'
>>> zcutup.inputs.keep= '0 10'
>>> res = zcutup.run() # doctest: +SKIP
"""
_cmd = '3dZcutup'
input_spec = ZCutUpInputSpec
output_spec = AFNICommandOutputSpec
class AllineateInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dAllineate',
argstr='-source %s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
reference = File(
exists=True,
argstr='-base %s',
desc="""file to be used as reference, the first volume will be used
if not given the reference will be the first volume of in_file.""")
out_file = File(
desc='output file from 3dAllineate',
argstr='-prefix %s',
position=-2,
name_source='%s_allineate',
genfile=True)
out_param_file = File(
argstr='-1Dparam_save %s',
desc='Save the warp parameters in ASCII (.1D) format.')
in_param_file = File(
exists=True,
argstr='-1Dparam_apply %s',
desc="""Read warp parameters from file and apply them to
the source dataset, and produce a new dataset""")
out_matrix = File(
argstr='-1Dmatrix_save %s',
desc='Save the transformation matrix for each volume.')
in_matrix = File(desc='matrix to align input file',
argstr='-1Dmatrix_apply %s',
position=-3)
_cost_funcs = [
'leastsq', 'ls',
'mutualinfo', 'mi',
'corratio_mul', 'crM',
'norm_mutualinfo', 'nmi',
'hellinger', 'hel',
'corratio_add', 'crA',
'corratio_uns', 'crU']
cost = traits.Enum(
*_cost_funcs, argstr='-cost %s',
desc="""Defines the 'cost' function that defines the matching
between the source and the base""")
_interp_funcs = [
'nearestneighbour', 'linear', 'cubic', 'quintic', 'wsinc5']
interpolation = traits.Enum(
*_interp_funcs[:-1], argstr='-interp %s',
desc='Defines interpolation method to use during matching')
final_interpolation = traits.Enum(
*_interp_funcs, argstr='-final %s',
desc='Defines interpolation method used to create the output dataset')
# TECHNICAL OPTIONS (used for fine control of the program):
nmatch = traits.Int(
argstr='-nmatch %d',
desc='Use at most n scattered points to match the datasets.')
no_pad = traits.Bool(
argstr='-nopad',
desc='Do not use zero-padding on the base image.')
zclip = traits.Bool(
argstr='-zclip',
desc='Replace negative values in the input datasets (source & base) with zero.')
convergence = traits.Float(
argstr='-conv %f',
desc='Convergence test in millimeters (default 0.05mm).')
usetemp = traits.Bool(argstr='-usetemp', desc='temporary file use')
check = traits.List(
traits.Enum(*_cost_funcs), argstr='-check %s',
desc="""After cost functional optimization is done, start at the
final parameters and RE-optimize using this new cost functions.
If the results are too different, a warning message will be
printed. However, the final parameters from the original
optimization will be used to create the output dataset.""")
# ** PARAMETERS THAT AFFECT THE COST OPTIMIZATION STRATEGY **
one_pass = traits.Bool(
argstr='-onepass',
desc="""Use only the refining pass -- do not try a coarse
resolution pass first. Useful if you know that only
small amounts of image alignment are needed.""")
two_pass = traits.Bool(
argstr='-twopass',
desc="""Use a two pass alignment strategy for all volumes, searching
for a large rotation+shift and then refining the alignment.""")
two_blur = traits.Float(
argstr='-twoblur',
desc='Set the blurring radius for the first pass in mm.')
two_first = traits.Bool(
argstr='-twofirst',
desc="""Use -twopass on the first image to be registered, and
then on all subsequent images from the source dataset,
use results from the first image's coarse pass to start
the fine pass.""")
two_best = traits.Int(
argstr='-twobest %d',
desc="""In the coarse pass, use the best 'bb' set of initial
points to search for the starting point for the fine
pass. If bb==0, then no search is made for the best
starting point, and the identity transformation is
used as the starting point. [Default=5; min=0 max=11]""")
fine_blur = traits.Float(
argstr='-fineblur %f',
desc="""Set the blurring radius to use in the fine resolution
pass to 'x' mm. A small amount (1-2 mm?) of blurring at
the fine step may help with convergence, if there is
some problem, especially if the base volume is very noisy.
[Default == 0 mm = no blurring at the final alignment pass]""")
center_of_mass = traits.Str(
argstr='-cmass%s',
desc='Use the center-of-mass calculation to bracket the shifts.')
autoweight = traits.Str(
argstr='-autoweight%s',
desc="""Compute a weight function using the 3dAutomask
algorithm plus some blurring of the base image.""")
automask = traits.Int(
argstr='-automask+%d',
desc="""Compute a mask function, set a value for dilation or 0.""")
autobox = traits.Bool(
argstr='-autobox',
desc="""Expand the -automask function to enclose a rectangular
box that holds the irregular mask.""")
nomask = traits.Bool(
argstr='-nomask',
desc="""Don't compute the autoweight/mask; if -weight is not
also used, then every voxel will be counted equally.""")
weight_file = File(
argstr='-weight %s', exists=True,
desc="""Set the weighting for each voxel in the base dataset;
larger weights mean that voxel count more in the cost function.
Must be defined on the same grid as the base dataset""")
out_weight_file = traits.File(
argstr='-wtprefix %s',
desc="""Write the weight volume to disk as a dataset""")
source_mask = File(
exists=True, argstr='-source_mask %s',
desc='mask the input dataset')
source_automask = traits.Int(
argstr='-source_automask+%d',
desc='Automatically mask the source dataset with dilation or 0.')
warp_type = traits.Enum(
'shift_only', 'shift_rotate', 'shift_rotate_scale', 'affine_general',
argstr='-warp %s',
desc='Set the warp type.')
warpfreeze = traits.Bool(
argstr='-warpfreeze',
desc='Freeze the non-rigid body parameters after first volume.')
replacebase = traits.Bool(
argstr='-replacebase',
desc="""If the source has more than one volume, then after the first
volume is aligned to the base""")
replacemeth = traits.Enum(
*_cost_funcs,
argstr='-replacemeth %s',
desc="""After first volume is aligned, switch method for later volumes.
For use with '-replacebase'.""")
epi = traits.Bool(
argstr='-EPI',
desc="""Treat the source dataset as being composed of warped
EPI slices, and the base as comprising anatomically
'true' images. Only phase-encoding direction image
shearing and scaling will be allowed with this option.""")
master = File(
exists=True, argstr='-master %s',
desc='Write the output dataset on the same grid as this file')
newgrid = traits.Float(
argstr='-newgrid %f',
desc='Write the output dataset using isotropic grid spacing in mm')
# Non-linear experimental
_nwarp_types = ['bilinear',
'cubic', 'quintic', 'heptic', 'nonic',
'poly3', 'poly5', 'poly7', 'poly9'] # same non-hellenistic
nwarp = traits.Enum(
*_nwarp_types, argstr='-nwarp %s',
desc='Experimental nonlinear warping: bilinear or legendre poly.')
_dirs = ['X', 'Y', 'Z', 'I', 'J', 'K']
nwarp_fixmot = traits.List(
traits.Enum(*_dirs),
argstr='-nwarp_fixmot%s',
desc='To fix motion along directions.')
nwarp_fixdep = traits.List(
traits.Enum(*_dirs),
argstr='-nwarp_fixdep%s',
desc='To fix non-linear warp dependency along directions.')
class AllineateOutputSpec(TraitedSpec):
out_file = File(desc='output image file name')
matrix = File(desc='matrix to align input file')
class Allineate(AFNICommand):
"""Program to align one dataset (the 'source') to a base dataset
For complete details, see the `3dAllineate Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dAllineate.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> allineate = afni.Allineate()
>>> allineate.inputs.in_file = 'functional.nii'
>>> allineate.inputs.out_file= 'functional_allineate.nii'
>>> allineate.inputs.in_matrix= 'cmatrix.mat'
>>> res = allineate.run() # doctest: +SKIP
"""
_cmd = '3dAllineate'
input_spec = AllineateInputSpec
output_spec = AllineateOutputSpec
def _format_arg(self, name, trait_spec, value):
if name == 'nwarp_fixmot' or name == 'nwarp_fixdep':
arg = ' '.join([trait_spec.argstr % v for v in value])
return arg
return super(Allineate, self)._format_arg(name, trait_spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.out_file):
outputs['out_file'] = self._gen_filename(self.inputs.in_file,
suffix=self.inputs.suffix)
else:
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
if isdefined(self.inputs.out_matrix):
outputs['matrix'] = os.path.abspath(os.path.join(os.getcwd(),\
self.inputs.out_matrix +".aff12.1D"))
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()[name]
class MaskaveInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dmaskave',
argstr='%s',
position=-2,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_maskave.1D", desc='output image file name',
keep_extension=True,
argstr="> %s", name_source="in_file", position=-1)
mask = File(desc='matrix to align input file',
argstr='-mask %s',
position=1,
exists=True)
quiet = traits.Bool(desc='matrix to align input file',
argstr='-quiet',
position=2)
class Maskave(AFNICommand):
"""Computes average of all voxels in the input dataset
which satisfy the criterion in the options list
For complete details, see the `3dmaskave Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dmaskave.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> maskave = afni.Maskave()
>>> maskave.inputs.in_file = 'functional.nii'
>>> maskave.inputs.mask= 'seed_mask.nii'
>>> maskave.inputs.quiet= True
>>> maskave.cmdline #doctest: +ELLIPSIS
'3dmaskave -mask seed_mask.nii -quiet functional.nii > functional_maskave.1D'
>>> res = maskave.run() # doctest: +SKIP
"""
_cmd = '3dmaskave'
input_spec = MaskaveInputSpec
output_spec = AFNICommandOutputSpec
class SkullStripInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dSkullStrip',
argstr='-input %s',
position=1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_skullstrip", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
class SkullStrip(AFNICommand):
"""A program to extract the brain from surrounding
tissue from MRI T1-weighted images
For complete details, see the `3dSkullStrip Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dSkullStrip.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> skullstrip = afni.SkullStrip()
>>> skullstrip.inputs.in_file = 'functional.nii'
>>> skullstrip.inputs.args = '-o_ply'
>>> res = skullstrip.run() # doctest: +SKIP
"""
_cmd = '3dSkullStrip'
_redirect_x = True
input_spec = SkullStripInputSpec
output_spec = AFNICommandOutputSpec
def __init__(self, **inputs):
super(SkullStrip, self).__init__(**inputs)
if not no_afni():
v = Info.version()
# As of AFNI 16.0.00, redirect_x is not needed
if isinstance(v[0], int) and v[0] > 15:
self._redirect_x = False
class TCatInputSpec(AFNICommandInputSpec):
in_files = InputMultiPath(
File(exists=True),
desc='input file to 3dTcat',
argstr=' %s',
position=-1,
mandatory=True,
copyfile=False)
out_file = File(name_template="%s_tcat", desc='output image file name',
argstr='-prefix %s', name_source="in_files")
rlt = traits.Str(desc='options', argstr='-rlt%s', position=1)
class TCat(AFNICommand):
"""Concatenate sub-bricks from input datasets into
one big 3D+time dataset
For complete details, see the `3dTcat Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTcat.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> tcat = afni.TCat()
>>> tcat.inputs.in_files = ['functional.nii', 'functional2.nii']
>>> tcat.inputs.out_file= 'functional_tcat.nii'
>>> tcat.inputs.rlt = '+'
>>> res = tcat.run() # doctest: +SKIP
"""
_cmd = '3dTcat'
input_spec = TCatInputSpec
output_spec = AFNICommandOutputSpec
class FimInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dfim+',
argstr=' -input %s',
position=1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_fim", desc='output image file name',
argstr='-bucket %s', name_source="in_file")
ideal_file = File(desc='ideal time series file name',
argstr='-ideal_file %s',
position=2,
mandatory=True,
exists=True)
fim_thr = traits.Float(desc='fim internal mask threshold value',
argstr='-fim_thr %f', position=3)
out = traits.Str(desc='Flag to output the specified parameter',
argstr='-out %s', position=4)
class Fim(AFNICommand):
"""Program to calculate the cross-correlation of
an ideal reference waveform with the measured FMRI
time series for each voxel
For complete details, see the `3dfim+ Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dfim+.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> fim = afni.Fim()
>>> fim.inputs.in_file = 'functional.nii'
>>> fim.inputs.ideal_file= 'seed.1D'
>>> fim.inputs.out_file = 'functional_corr.nii'
>>> fim.inputs.out = 'Correlation'
>>> fim.inputs.fim_thr = 0.0009
>>> res = fim.run() # doctest: +SKIP
"""
_cmd = '3dfim+'
input_spec = FimInputSpec
output_spec = AFNICommandOutputSpec
class TCorrelateInputSpec(AFNICommandInputSpec):
xset = File(desc='input xset',
argstr=' %s',
position=-2,
mandatory=True,
exists=True,
copyfile=False)
yset = File(desc='input yset',
argstr=' %s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_tcorr", desc='output image file name',
argstr='-prefix %s', name_source="xset")
pearson = traits.Bool(desc='Correlation is the normal' +
' Pearson correlation coefficient',
argstr='-pearson',
position=1)
polort = traits.Int(desc='Remove polynomical trend of order m',
argstr='-polort %d', position=2)
class TCorrelate(AFNICommand):
"""Computes the correlation coefficient between corresponding voxel
time series in two input 3D+time datasets 'xset' and 'yset'
For complete details, see the `3dTcorrelate Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTcorrelate.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> tcorrelate = afni.TCorrelate()
>>> tcorrelate.inputs.xset= 'u_rc1s1_Template.nii'
>>> tcorrelate.inputs.yset = 'u_rc1s2_Template.nii'
>>> tcorrelate.inputs.out_file = 'functional_tcorrelate.nii.gz'
>>> tcorrelate.inputs.polort = -1
>>> tcorrelate.inputs.pearson = True
>>> res = tcarrelate.run() # doctest: +SKIP
"""
_cmd = '3dTcorrelate'
input_spec = TCorrelateInputSpec
output_spec = AFNICommandOutputSpec
class TCorr1DInputSpec(AFNICommandInputSpec):
xset = File(desc='3d+time dataset input',
argstr=' %s',
position=-2,
mandatory=True,
exists=True,
copyfile=False)
y_1d = File(desc='1D time series file input',
argstr=' %s',
position=-1,
mandatory=True,
exists=True)
out_file = File(desc='output filename prefix',
name_template='%s_correlation.nii.gz',
argstr='-prefix %s',
name_source='xset',
keep_extension=True)
pearson = traits.Bool(desc='Correlation is the normal' +
' Pearson correlation coefficient',
argstr=' -pearson',
xor=['spearman', 'quadrant', 'ktaub'],
position=1)
spearman = traits.Bool(desc='Correlation is the' +
' Spearman (rank) correlation coefficient',
argstr=' -spearman',
xor=['pearson', 'quadrant', 'ktaub'],
position=1)
quadrant = traits.Bool(desc='Correlation is the' +
' quadrant correlation coefficient',
argstr=' -quadrant',
xor=['pearson', 'spearman', 'ktaub'],
position=1)
ktaub = traits.Bool(desc='Correlation is the' +
' Kendall\'s tau_b correlation coefficient',
argstr=' -ktaub',
xor=['pearson', 'spearman', 'quadrant'],
position=1)
class TCorr1DOutputSpec(TraitedSpec):
out_file = File(desc='output file containing correlations',
exists=True)
class TCorr1D(AFNICommand):
"""Computes the correlation coefficient between each voxel time series
in the input 3D+time dataset.
For complete details, see the `3dTcorr1D Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTcorr1D.html>`_
>>> from nipype.interfaces import afni as afni
>>> tcorr1D = afni.TCorr1D()
>>> tcorr1D.inputs.xset= 'u_rc1s1_Template.nii'
>>> tcorr1D.inputs.y_1d = 'seed.1D'
>>> tcorr1D.cmdline
'3dTcorr1D -prefix u_rc1s1_Template_correlation.nii.gz u_rc1s1_Template.nii seed.1D'
>>> res = tcorr1D.run() # doctest: +SKIP
"""
_cmd = '3dTcorr1D'
input_spec = TCorr1DInputSpec
output_spec = TCorr1DOutputSpec
class BrickStatInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dmaskave',
argstr='%s',
position=-1,
mandatory=True,
exists=True)
mask = File(desc='-mask dset = use dset as mask to include/exclude voxels',
argstr='-mask %s',
position=2,
exists=True)
min = traits.Bool(desc='print the minimum value in dataset',
argstr='-min',
position=1)
class BrickStatOutputSpec(TraitedSpec):
min_val = traits.Float(desc='output')
class BrickStat(AFNICommand):
"""Compute maximum and/or minimum voxel values of an input dataset
For complete details, see the `3dBrickStat Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dBrickStat.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> brickstat = afni.BrickStat()
>>> brickstat.inputs.in_file = 'functional.nii'
>>> brickstat.inputs.mask = 'skeleton_mask.nii.gz'
>>> brickstat.inputs.min = True
>>> res = brickstat.run() # doctest: +SKIP
"""
_cmd = '3dBrickStat'
input_spec = BrickStatInputSpec
output_spec = BrickStatOutputSpec
def aggregate_outputs(self, runtime=None, needed_outputs=None):
outputs = self._outputs()
outfile = os.path.join(os.getcwd(), 'stat_result.json')
if runtime is None:
try:
min_val = load_json(outfile)['stat']
except IOError:
return self.run().outputs
else:
min_val = []
for line in runtime.stdout.split('\n'):
if line:
values = line.split()
if len(values) > 1:
min_val.append([float(val) for val in values])
else:
min_val.extend([float(val) for val in values])
if len(min_val) == 1:
min_val = min_val[0]
save_json(outfile, dict(stat=min_val))
outputs.min_val = min_val
return outputs
class ClipLevelInputSpec(CommandLineInputSpec):
in_file = File(desc='input file to 3dClipLevel',
argstr='%s',
position=-1,
mandatory=True,
exists=True)
mfrac = traits.Float(desc='Use the number ff instead of 0.50 in the algorithm',
argstr='-mfrac %s',
position=2)
doall = traits.Bool(desc='Apply the algorithm to each sub-brick separately',
argstr='-doall',
position=3,
xor=('grad'))
grad = traits.File(desc='also compute a \'gradual\' clip level as a function of voxel position, and output that to a dataset',
argstr='-grad %s',
position=3,
xor=('doall'))
class ClipLevelOutputSpec(TraitedSpec):
clip_val = traits.Float(desc='output')
class ClipLevel(AFNICommandBase):
"""Estimates the value at which to clip the anatomical dataset so
that background regions are set to zero.
For complete details, see the `3dClipLevel Documentation.
<https://afni.nimh.nih.gov/pub/dist/doc/program_help/3dClipLevel.html>`_
Examples
========
>>> from nipype.interfaces.afni import preprocess
>>> cliplevel = preprocess.ClipLevel()
>>> cliplevel.inputs.in_file = 'anatomical.nii'
>>> res = cliplevel.run() # doctest: +SKIP
"""
_cmd = '3dClipLevel'
input_spec = ClipLevelInputSpec
output_spec = ClipLevelOutputSpec
def aggregate_outputs(self, runtime=None, needed_outputs=None):
outputs = self._outputs()
outfile = os.path.join(os.getcwd(), 'stat_result.json')
if runtime is None:
try:
clip_val = load_json(outfile)['stat']
except IOError:
return self.run().outputs
else:
clip_val = []
for line in runtime.stdout.split('\n'):
if line:
values = line.split()
if len(values) > 1:
clip_val.append([float(val) for val in values])
else:
clip_val.extend([float(val) for val in values])
if len(clip_val) == 1:
clip_val = clip_val[0]
save_json(outfile, dict(stat=clip_val))
outputs.clip_val = clip_val
return outputs
class MaskToolInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file or files to 3dmask_tool',
argstr='-input %s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_mask", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
count = traits.Bool(desc='Instead of created a binary 0/1 mask dataset, '+
'create one with. counts of voxel overlap, i.e '+
'each voxel will contain the number of masks ' +
'that it is set in.',
argstr='-count',
position=2)
datum = traits.Enum('byte','short','float',
argstr='-datum %s',
desc='specify data type for output. Valid types are '+
'\'byte\', \'short\' and \'float\'.')
dilate_inputs = traits.Str(desc='Use this option to dilate and/or erode '+
'datasets as they are read. ex. ' +
'\'5 -5\' to dilate and erode 5 times',
argstr='-dilate_inputs %s')
dilate_results = traits.Str(desc='dilate and/or erode combined mask at ' +
'the given levels.',
argstr='-dilate_results %s')
frac = traits.Float(desc='When combining masks (across datasets and ' +
'sub-bricks), use this option to restrict the ' +
'result to a certain fraction of the set of ' +
'volumes',
argstr='-frac %s')
inter = traits.Bool(desc='intersection, this means -frac 1.0',
argstr='-inter')
union = traits.Bool(desc='union, this means -frac 0',
argstr='-union')
fill_holes = traits.Bool(desc='This option can be used to fill holes ' +
'in the resulting mask, i.e. after all ' +
'other processing has been done.',
argstr='-fill_holes')
fill_dirs = traits.Str(desc='fill holes only in the given directions. ' +
'This option is for use with -fill holes. ' +
'should be a single string that specifies ' +
'1-3 of the axes using {x,y,z} labels (i.e. '+
'dataset axis order), or using the labels ' +
'in {R,L,A,P,I,S}.',
argstr='-fill_dirs %s',
requires=['fill_holes'])
class MaskToolOutputSpec(TraitedSpec):
out_file = File(desc='mask file',
exists=True)
class MaskTool(AFNICommand):
"""3dmask_tool - for combining/dilating/eroding/filling masks
For complete details, see the `3dmask_tool Documentation.
<https://afni.nimh.nih.gov/pub../pub/dist/doc/program_help/3dmask_tool.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> automask = afni.Automask()
>>> automask.inputs.in_file = 'functional.nii'
>>> automask.inputs.dilate = 1
>>> automask.inputs.outputtype = "NIFTI"
>>> automask.cmdline #doctest: +ELLIPSIS
'3dAutomask -apply_prefix functional_masked.nii -dilate 1 -prefix functional_mask.nii functional.nii'
>>> res = automask.run() # doctest: +SKIP
"""
_cmd = '3dmask_tool'
input_spec = MaskToolInputSpec
output_spec = MaskToolOutputSpec
class SegInputSpec(CommandLineInputSpec):
in_file = File(desc='ANAT is the volume to segment',
argstr='-anat %s',
position=-1,
mandatory=True,
exists=True,
copyfile=True)
mask = traits.Either(traits.Enum('AUTO'),
File(exists=True),
desc=('only non-zero voxels in mask are analyzed. '
'mask can either be a dataset or the string '
'"AUTO" which would use AFNI\'s automask '
'function to create the mask.'),
argstr='-mask %s',
position=-2,
mandatory=True)
blur_meth = traits.Enum('BFT', 'BIM',
argstr='-blur_meth %s',
desc='set the blurring method for bias field estimation')
bias_fwhm = traits.Float(desc='The amount of blurring used when estimating the field bias with the Wells method',
argstr='-bias_fwhm %f')
classes = traits.Str(desc='CLASS_STRING is a semicolon delimited string of class labels',
argstr='-classes %s')
bmrf = traits.Float(desc='Weighting factor controlling spatial homogeneity of the classifications',
argstr='-bmrf %f')
bias_classes = traits.Str(desc='A semcolon demlimited string of classes that contribute to the estimation of the bias field',
argstr='-bias_classes %s')
prefix = traits.Str(desc='the prefix for the output folder containing all output volumes',
argstr='-prefix %s')
mixfrac = traits.Str(desc='MIXFRAC sets up the volume-wide (within mask) tissue fractions while initializing the segmentation (see IGNORE for exception)',
argstr='-mixfrac %s')
mixfloor = traits.Float(desc='Set the minimum value for any class\'s mixing fraction',
argstr='-mixfloor %f')
main_N = traits.Int(desc='Number of iterations to perform.',
argstr='-main_N %d')
class Seg(AFNICommandBase):
"""3dSeg segments brain volumes into tissue classes. The program allows
for adding a variety of global and voxelwise priors. However for the
moment, only mixing fractions and MRF are documented.
For complete details, see the `3dSeg Documentation.
<https://afni.nimh.nih.gov/pub/dist/doc/program_help/3dSeg.html>
Examples
========
>>> from nipype.interfaces.afni import preprocess
>>> seg = preprocess.Seg()
>>> seg.inputs.in_file = 'structural.nii'
>>> seg.inputs.mask = 'AUTO'
>>> res = seg.run() # doctest: +SKIP
"""
_cmd = '3dSeg'
input_spec = SegInputSpec
output_spec = AFNICommandOutputSpec
def aggregate_outputs(self, runtime=None, needed_outputs=None):
import glob
outputs = self._outputs()
if isdefined(self.inputs.prefix):
outfile = os.path.join(os.getcwd(), self.inputs.prefix, 'Classes+*.BRIK')
else:
outfile = os.path.join(os.getcwd(), 'Segsy', 'Classes+*.BRIK')
outputs.out_file = glob.glob(outfile)[0]
return outputs
class ROIStatsInputSpec(CommandLineInputSpec):
in_file = File(desc='input file to 3dROIstats',
argstr='%s',
position=-1,
mandatory=True,
exists=True)
mask = File(desc='input mask',
argstr='-mask %s',
position=3,
exists=True)
mask_f2short = traits.Bool(
desc='Tells the program to convert a float mask ' +
'to short integers, by simple rounding.',
argstr='-mask_f2short',
position=2)
quiet = traits.Bool(desc='execute quietly',
argstr='-quiet',
position=1)
terminal_output = traits.Enum('file',
desc=('Control terminal output:'
'`file` - saves ouptup into '
'a file'),
nohash=True, mandatory=True, usedefault=True)
class ROIStatsOutputSpec(TraitedSpec):
stats = File(desc='output tab separated values file', exists=True)
class ROIStats(AFNICommandBase):
"""Display statistics over masked regions
For complete details, see the `3dROIstats Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dROIstats.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> roistats = afni.ROIStats()
>>> roistats.inputs.in_file = 'functional.nii'
>>> roistats.inputs.mask = 'skeleton_mask.nii.gz'
>>> roistats.inputs.quiet=True
>>> res = roistats.run() # doctest: +SKIP
"""
_cmd = '3dROIstats'
input_spec = ROIStatsInputSpec
output_spec = ROIStatsOutputSpec
def aggregate_outputs(self, runtime=None, needed_outputs=None):
outputs = self._outputs()
output_filename = "roi_stats.csv"
f = open(output_filename, "w")
f.write(runtime.stdout)
f.close()
outputs.stats = os.path.abspath(output_filename)
return outputs
class CalcInputSpec(AFNICommandInputSpec):
in_file_a = File(desc='input file to 3dcalc',
argstr='-a %s', position=0, mandatory=True, exists=True)
in_file_b = File(desc='operand file to 3dcalc',
argstr=' -b %s', position=1, exists=True)
in_file_c = File(desc='operand file to 3dcalc',
argstr=' -c %s', position=2, exists=True)
out_file = File(name_template="%s_calc", desc='output image file name',
argstr='-prefix %s', name_source="in_file_a")
expr = traits.Str(desc='expr', argstr='-expr "%s"', position=3,
mandatory=True)
start_idx = traits.Int(desc='start index for in_file_a',
requires=['stop_idx'])
stop_idx = traits.Int(desc='stop index for in_file_a',
requires=['start_idx'])
single_idx = traits.Int(desc='volume index for in_file_a')
other = File(desc='other options', argstr='')
class Calc(AFNICommand):
"""This program does voxel-by-voxel arithmetic on 3D datasets
For complete details, see the `3dcalc Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dcalc.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> calc = afni.Calc()
>>> calc.inputs.in_file_a = 'functional.nii'
>>> calc.inputs.in_file_b = 'functional2.nii'
>>> calc.inputs.expr='a*b'
>>> calc.inputs.out_file = 'functional_calc.nii.gz'
>>> calc.inputs.outputtype = "NIFTI"
>>> calc.cmdline #doctest: +ELLIPSIS
'3dcalc -a functional.nii -b functional2.nii -expr "a*b" -prefix functional_calc.nii.gz'
"""
_cmd = '3dcalc'
input_spec = CalcInputSpec
output_spec = AFNICommandOutputSpec
def _format_arg(self, name, trait_spec, value):
if name == 'in_file_a':
arg = trait_spec.argstr % value
if isdefined(self.inputs.start_idx):
arg += '[%d..%d]' % (self.inputs.start_idx,
self.inputs.stop_idx)
if isdefined(self.inputs.single_idx):
arg += '[%d]' % (self.inputs.single_idx)
return arg
return super(Calc, self)._format_arg(name, trait_spec, value)
def _parse_inputs(self, skip=None):
"""Skip the arguments without argstr metadata
"""
return super(Calc, self)._parse_inputs(
skip=('start_idx', 'stop_idx', 'other'))
class BlurInMaskInputSpec(AFNICommandInputSpec):
in_file = File(
desc='input file to 3dSkullStrip',
argstr='-input %s',
position=1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template='%s_blur', desc='output to the file', argstr='-prefix %s',
name_source='in_file', position=-1)
mask = File(
desc='Mask dataset, if desired. Blurring will occur only within the mask. Voxels NOT in the mask will be set to zero in the output.',
argstr='-mask %s')
multimask = File(
desc='Multi-mask dataset -- each distinct nonzero value in dataset will be treated as a separate mask for blurring purposes.',
argstr='-Mmask %s')
automask = traits.Bool(
desc='Create an automask from the input dataset.',
argstr='-automask')
fwhm = traits.Float(
desc='fwhm kernel size',
argstr='-FWHM %f',
mandatory=True)
preserve = traits.Bool(
desc='Normally, voxels not in the mask will be set to zero in the output. If you want the original values in the dataset to be preserved in the output, use this option.',
argstr='-preserve')
float_out = traits.Bool(
desc='Save dataset as floats, no matter what the input data type is.',
argstr='-float')
options = traits.Str(desc='options', argstr='%s', position=2)
class BlurInMask(AFNICommand):
""" Blurs a dataset spatially inside a mask. That's all. Experimental.
For complete details, see the `3dBlurInMask Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dBlurInMask.html>
Examples
========
>>> from nipype.interfaces import afni as afni
>>> bim = afni.BlurInMask()
>>> bim.inputs.in_file = 'functional.nii'
>>> bim.inputs.mask = 'mask.nii'
>>> bim.inputs.fwhm = 5.0
>>> bim.cmdline #doctest: +ELLIPSIS
'3dBlurInMask -input functional.nii -FWHM 5.000000 -mask mask.nii -prefix functional_blur'
>>> res = bim.run() # doctest: +SKIP
"""
_cmd = '3dBlurInMask'
input_spec = BlurInMaskInputSpec
output_spec = AFNICommandOutputSpec
class TCorrMapInputSpec(AFNICommandInputSpec):
in_file = File(exists=True, argstr='-input %s', mandatory=True, copyfile=False)
seeds = File(exists=True, argstr='-seed %s', xor=('seeds_width'))
mask = File(exists=True, argstr='-mask %s')
automask = traits.Bool(argstr='-automask')
polort = traits.Int(argstr='-polort %d')
bandpass = traits.Tuple((traits.Float(), traits.Float()),
argstr='-bpass %f %f')
regress_out_timeseries = traits.File(exists=True, argstr='-ort %s')
blur_fwhm = traits.Float(argstr='-Gblur %f')
seeds_width = traits.Float(argstr='-Mseed %f', xor=('seeds'))
# outputs
mean_file = File(argstr='-Mean %s', suffix='_mean', name_source="in_file")
zmean = File(argstr='-Zmean %s', suffix='_zmean', name_source="in_file")
qmean = File(argstr='-Qmean %s', suffix='_qmean', name_source="in_file")
pmean = File(argstr='-Pmean %s', suffix='_pmean', name_source="in_file")
_thresh_opts = ('absolute_threshold',
'var_absolute_threshold',
'var_absolute_threshold_normalize')
thresholds = traits.List(traits.Int())
absolute_threshold = File(
argstr='-Thresh %f %s', suffix='_thresh',
name_source="in_file", xor=_thresh_opts)
var_absolute_threshold = File(
argstr='-VarThresh %f %f %f %s', suffix='_varthresh',
name_source="in_file", xor=_thresh_opts)
var_absolute_threshold_normalize = File(
argstr='-VarThreshN %f %f %f %s', suffix='_varthreshn',
name_source="in_file", xor=_thresh_opts)
correlation_maps = File(
argstr='-CorrMap %s', name_source="in_file")
correlation_maps_masked = File(
argstr='-CorrMask %s', name_source="in_file")
_expr_opts = ('average_expr', 'average_expr_nonzero', 'sum_expr')
expr = traits.Str()
average_expr = File(
argstr='-Aexpr %s %s', suffix='_aexpr',
name_source='in_file', xor=_expr_opts)
average_expr_nonzero = File(
argstr='-Cexpr %s %s', suffix='_cexpr',
name_source='in_file', xor=_expr_opts)
sum_expr = File(
argstr='-Sexpr %s %s', suffix='_sexpr',
name_source='in_file', xor=_expr_opts)
histogram_bin_numbers = traits.Int()
histogram = File(
name_source='in_file', argstr='-Hist %d %s', suffix='_hist')
class TCorrMapOutputSpec(TraitedSpec):
mean_file = File()
zmean = File()
qmean = File()
pmean = File()
absolute_threshold = File()
var_absolute_threshold = File()
var_absolute_threshold_normalize = File()
correlation_maps = File()
correlation_maps_masked = File()
average_expr = File()
average_expr_nonzero = File()
sum_expr = File()
histogram = File()
class TCorrMap(AFNICommand):
""" For each voxel time series, computes the correlation between it
and all other voxels, and combines this set of values into the
output dataset(s) in some way.
For complete details, see the `3dTcorrMap Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTcorrMap.html>
Examples
========
>>> from nipype.interfaces import afni as afni
>>> tcm = afni.TCorrMap()
>>> tcm.inputs.in_file = 'functional.nii'
>>> tcm.inputs.mask = 'mask.nii'
>>> tcm.mean_file = '%s_meancorr.nii'
>>> res = tcm.run() # doctest: +SKIP
"""
_cmd = '3dTcorrMap'
input_spec = TCorrMapInputSpec
output_spec = TCorrMapOutputSpec
_additional_metadata = ['suffix']
def _format_arg(self, name, trait_spec, value):
if name in self.inputs._thresh_opts:
return trait_spec.argstr % self.inputs.thresholds + [value]
elif name in self.inputs._expr_opts:
return trait_spec.argstr % (self.inputs.expr, value)
elif name == 'histogram':
return trait_spec.argstr % (self.inputs.histogram_bin_numbers,
value)
else:
return super(TCorrMap, self)._format_arg(name, trait_spec, value)
class AutoboxInputSpec(AFNICommandInputSpec):
in_file = File(exists=True, mandatory=True, argstr='-input %s',
desc='input file', copyfile=False)
padding = traits.Int(
argstr='-npad %d',
desc='Number of extra voxels to pad on each side of box')
out_file = File(argstr="-prefix %s", name_source="in_file")
no_clustering = traits.Bool(
argstr='-noclust',
desc="""Don't do any clustering to find box. Any non-zero
voxel will be preserved in the cropped volume.
The default method uses some clustering to find the
cropping box, and will clip off small isolated blobs.""")
class AutoboxOuputSpec(TraitedSpec): # out_file not mandatory
x_min = traits.Int()
x_max = traits.Int()
y_min = traits.Int()
y_max = traits.Int()
z_min = traits.Int()
z_max = traits.Int()
out_file = File(desc='output file')
class Autobox(AFNICommand):
""" Computes size of a box that fits around the volume.
Also can be used to crop the volume to that box.
For complete details, see the `3dAutobox Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dAutobox.html>
Examples
========
>>> from nipype.interfaces import afni as afni
>>> abox = afni.Autobox()
>>> abox.inputs.in_file = 'structural.nii'
>>> abox.inputs.padding = 5
>>> res = abox.run() # doctest: +SKIP
"""
_cmd = '3dAutobox'
input_spec = AutoboxInputSpec
output_spec = AutoboxOuputSpec
def aggregate_outputs(self, runtime=None, needed_outputs=None):
outputs = self._outputs()
pattern = 'x=(?P<x_min>-?\d+)\.\.(?P<x_max>-?\d+) y=(?P<y_min>-?\d+)\.\.(?P<y_max>-?\d+) z=(?P<z_min>-?\d+)\.\.(?P<z_max>-?\d+)'
for line in runtime.stderr.split('\n'):
m = re.search(pattern, line)
if m:
d = m.groupdict()
for k in list(d.keys()):
d[k] = int(d[k])
outputs.set(**d)
outputs.set(out_file=self._gen_filename('out_file'))
return outputs
def _gen_filename(self, name):
if name == 'out_file' and (not isdefined(self.inputs.out_file)):
return Undefined
return super(Autobox, self)._gen_filename(name)
class RetroicorInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dretroicor',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(desc='output image file name', argstr='-prefix %s', mandatory=True, position=1)
card = File(desc='1D cardiac data file for cardiac correction',
argstr='-card %s',
position=-2,
exists=True)
resp = File(desc='1D respiratory waveform data for correction',
argstr='-resp %s',
position=-3,
exists=True)
threshold = traits.Int(desc='Threshold for detection of R-wave peaks in input (Make sure it is above the background noise level, Try 3/4 or 4/5 times range plus minimum)',
argstr='-threshold %d',
position=-4)
order = traits.Int(desc='The order of the correction (2 is typical)',
argstr='-order %s',
position=-5)
cardphase = File(desc='Filename for 1D cardiac phase output',
argstr='-cardphase %s',
position=-6,
hash_files=False)
respphase = File(desc='Filename for 1D resp phase output',
argstr='-respphase %s',
position=-7,
hash_files=False)
class Retroicor(AFNICommand):
"""Performs Retrospective Image Correction for physiological
motion effects, using a slightly modified version of the
RETROICOR algorithm
The durations of the physiological inputs are assumed to equal
the duration of the dataset. Any constant sampling rate may be
used, but 40 Hz seems to be acceptable. This program's cardiac
peak detection algorithm is rather simplistic, so you might try
using the scanner's cardiac gating output (transform it to a
spike wave if necessary).
This program uses slice timing information embedded in the
dataset to estimate the proper cardiac/respiratory phase for
each slice. It makes sense to run this program before any
program that may destroy the slice timings (e.g. 3dvolreg for
motion correction).
For complete details, see the `3dretroicor Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dretroicor.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> ret = afni.Retroicor()
>>> ret.inputs.in_file = 'functional.nii'
>>> ret.inputs.card = 'mask.1D'
>>> ret.inputs.resp = 'resp.1D'
>>> res = ret.run() # doctest: +SKIP
"""
_cmd = '3dretroicor'
input_spec = RetroicorInputSpec
output_spec = AFNICommandOutputSpec
class AFNItoNIFTIInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dAFNItoNIFTI',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s.nii", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
hash_files = False
class AFNItoNIFTI(AFNICommand):
"""Changes AFNI format files to NIFTI format using 3dAFNItoNIFTI
see AFNI Documentation: <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dAFNItoNIFTI.html>
this can also convert 2D or 1D data, which you can numpy.squeeze() to remove extra dimensions
Examples
========
>>> from nipype.interfaces import afni as afni
>>> a2n = afni.AFNItoNIFTI()
>>> a2n.inputs.in_file = 'afni_output.3D'
>>> a2n.inputs.out_file = 'afni_output.nii'
>>> a2n.cmdline
'3dAFNItoNIFTI -prefix afni_output.nii afni_output.3D'
"""
_cmd = '3dAFNItoNIFTI'
input_spec = AFNItoNIFTIInputSpec
output_spec = AFNICommandOutputSpec
def _overload_extension(self, value):
path, base, ext = split_filename(value)
if ext.lower() not in [".1d", ".nii.gz", ".1D"]:
ext = ext + ".nii"
return os.path.join(path, base + ext)
def _gen_filename(self, name):
return os.path.abspath(super(AFNItoNIFTI, self)._gen_filename(name))
class EvalInputSpec(AFNICommandInputSpec):
in_file_a = File(desc='input file to 1deval',
argstr='-a %s', position=0, mandatory=True, exists=True)
in_file_b = File(desc='operand file to 1deval',
argstr=' -b %s', position=1, exists=True)
in_file_c = File(desc='operand file to 1deval',
argstr=' -c %s', position=2, exists=True)
out_file = File(name_template="%s_calc", desc='output image file name',
argstr='-prefix %s', name_source="in_file_a")
out1D = traits.Bool(desc="output in 1D",
argstr='-1D')
expr = traits.Str(desc='expr', argstr='-expr "%s"', position=3,
mandatory=True)
start_idx = traits.Int(desc='start index for in_file_a',
requires=['stop_idx'])
stop_idx = traits.Int(desc='stop index for in_file_a',
requires=['start_idx'])
single_idx = traits.Int(desc='volume index for in_file_a')
other = File(desc='other options', argstr='')
class Eval(AFNICommand):
"""Evaluates an expression that may include columns of data from one or more text files
see AFNI Documentation: <http://afni.nimh.nih.gov/pub/dist/doc/program_help/1deval.html>
Examples
========
>>> from nipype.interfaces import afni as afni
>>> eval = afni.Eval()
>>> eval.inputs.in_file_a = 'seed.1D'
>>> eval.inputs.in_file_b = 'resp.1D'
>>> eval.inputs.expr='a*b'
>>> eval.inputs.out1D = True
>>> eval.inputs.out_file = 'data_calc.1D'
>>> calc.cmdline #doctest: +SKIP
'3deval -a timeseries1.1D -b timeseries2.1D -expr "a*b" -1D -prefix data_calc.1D'
"""
_cmd = '1deval'
input_spec = EvalInputSpec
output_spec = AFNICommandOutputSpec
def _format_arg(self, name, trait_spec, value):
if name == 'in_file_a':
arg = trait_spec.argstr % value
if isdefined(self.inputs.start_idx):
arg += '[%d..%d]' % (self.inputs.start_idx,
self.inputs.stop_idx)
if isdefined(self.inputs.single_idx):
arg += '[%d]' % (self.inputs.single_idx)
return arg
return super(Eval, self)._format_arg(name, trait_spec, value)
def _parse_inputs(self, skip=None):
"""Skip the arguments without argstr metadata
"""
return super(Eval, self)._parse_inputs(
skip=('start_idx', 'stop_idx', 'out1D', 'other'))
class MeansInputSpec(AFNICommandInputSpec):
in_file_a = File(desc='input file to 3dMean',
argstr='%s',
position=0,
mandatory=True,
exists=True)
in_file_b = File(desc='another input file to 3dMean',
argstr='%s',
position=1,
exists=True)
out_file = File(name_template="%s_mean", desc='output image file name',
argstr='-prefix %s', name_source="in_file_a")
scale = traits.Str(desc='scaling of output', argstr='-%sscale')
non_zero = traits.Bool(desc='use only non-zero values', argstr='-non_zero')
std_dev = traits.Bool(desc='calculate std dev', argstr='-stdev')
sqr = traits.Bool(desc='mean square instead of value', argstr='-sqr')
summ = traits.Bool(desc='take sum, (not average)', argstr='-sum')
count = traits.Bool(desc='compute count of non-zero voxels', argstr='-count')
mask_inter = traits.Bool(desc='create intersection mask', argstr='-mask_inter')
mask_union = traits.Bool(desc='create union mask', argstr='-mask_union')
class Means(AFNICommand):
"""Takes the voxel-by-voxel mean of all input datasets using 3dMean
see AFNI Documentation: <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dMean.html>
Examples
========
>>> from nipype.interfaces import afni as afni
>>> means = afni.Means()
>>> means.inputs.in_file_a = 'im1.nii'
>>> means.inputs.in_file_b = 'im2.nii'
>>> means.inputs.out_file = 'output.nii'
>>> means.cmdline
'3dMean im1.nii im2.nii -prefix output.nii'
"""
_cmd = '3dMean'
input_spec = MeansInputSpec
output_spec = AFNICommandOutputSpec
class HistInputSpec(CommandLineInputSpec):
in_file = File(
desc='input file to 3dHist', argstr='-input %s', position=1, mandatory=True,
exists=True, copyfile=False)
out_file = File(
desc='Write histogram to niml file with this prefix', name_template='%s_hist',
keep_extension=False, argstr='-prefix %s', name_source=['in_file'])
showhist = traits.Bool(False, usedefault=True, desc='write a text visual histogram',
argstr='-showhist')
out_show = File(
name_template="%s_hist.out", desc='output image file name', keep_extension=False,
argstr="> %s", name_source="in_file", position=-1)
mask = File(desc='matrix to align input file', argstr='-mask %s', exists=True)
nbin = traits.Int(desc='number of bins', argstr='-nbin %d')
max_value = traits.Float(argstr='-max %f', desc='maximum intensity value')
min_value = traits.Float(argstr='-min %f', desc='minimum intensity value')
bin_width = traits.Float(argstr='-binwidth %f', desc='bin width')
class HistOutputSpec(TraitedSpec):
out_file = File(desc='output file', exists=True)
out_show = File(desc='output visual histogram')
class Hist(AFNICommandBase):
"""Computes average of all voxels in the input dataset
which satisfy the criterion in the options list
For complete details, see the `3dHist Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dHist.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> hist = afni.Hist()
>>> hist.inputs.in_file = 'functional.nii'
>>> hist.cmdline
'3dHist -input functional.nii -prefix functional_hist'
>>> res = hist.run() # doctest: +SKIP
"""
_cmd = '3dHist'
input_spec = HistInputSpec
output_spec = HistOutputSpec
_redirect_x = True
def __init__(self, **inputs):
super(Hist, self).__init__(**inputs)
if not no_afni():
version = Info.version()
# As of AFNI 16.0.00, redirect_x is not needed
if isinstance(version[0], int) and version[0] > 15:
self._redirect_x = False
def _parse_inputs(self, skip=None):
if not self.inputs.showhist:
if skip is None:
skip = []
skip += ['out_show']
return super(Hist, self)._parse_inputs(skip=skip)
def _list_outputs(self):
outputs = super(Hist, self)._list_outputs()
outputs['out_file'] += '.niml.hist'
if not self.inputs.showhist:
outputs['out_show'] = Undefined
return outputs
class FWHMxInputSpec(CommandLineInputSpec):
in_file = File(desc='input dataset', argstr='-input %s', mandatory=True, exists=True)
out_file = File(argstr='> %s', name_source='in_file', name_template='%s_fwhmx.out',
position=-1, keep_extension=False, desc='output file')
out_subbricks = File(argstr='-out %s', name_source='in_file', name_template='%s_subbricks.out',
keep_extension=False, desc='output file listing the subbricks FWHM')
mask = File(desc='use only voxels that are nonzero in mask', argstr='-mask %s', exists=True)
automask = traits.Bool(False, usedefault=True, argstr='-automask',
desc='compute a mask from THIS dataset, a la 3dAutomask')
detrend = traits.Either(
traits.Bool(), traits.Int(), default=False, argstr='-detrend', xor=['demed'], usedefault=True,
desc='instead of demed (0th order detrending), detrend to the specified order. If order '
'is not given, the program picks q=NT/30. -detrend disables -demed, and includes '
'-unif.')
demed = traits.Bool(
False, argstr='-demed', xor=['detrend'],
desc='If the input dataset has more than one sub-brick (e.g., has a time axis), then '
'subtract the median of each voxel\'s time series before processing FWHM. This will '
'tend to remove intrinsic spatial structure and leave behind the noise.')
unif = traits.Bool(False, argstr='-unif',
desc='If the input dataset has more than one sub-brick, then normalize each'
' voxel\'s time series to have the same MAD before processing FWHM.')
out_detrend = File(argstr='-detprefix %s', name_source='in_file', name_template='%s_detrend',
keep_extension=False, desc='Save the detrended file into a dataset')
geom = traits.Bool(argstr='-geom', xor=['arith'],
desc='if in_file has more than one sub-brick, compute the final estimate as'
'the geometric mean of the individual sub-brick FWHM estimates')
arith = traits.Bool(argstr='-arith', xor=['geom'],
desc='if in_file has more than one sub-brick, compute the final estimate as'
'the arithmetic mean of the individual sub-brick FWHM estimates')
combine = traits.Bool(argstr='-combine', desc='combine the final measurements along each axis')
compat = traits.Bool(argstr='-compat', desc='be compatible with the older 3dFWHM')
acf = traits.Either(
traits.Bool(), File(), traits.Tuple(File(exists=True), traits.Float()),
default=False, usedefault=True, argstr='-acf', desc='computes the spatial autocorrelation')
class FWHMxOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='output file')
out_subbricks = File(exists=True, desc='output file (subbricks)')
out_detrend = File(desc='output file, detrended')
fwhm = traits.Either(
traits.Tuple(traits.Float(), traits.Float(), traits.Float()),
traits.Tuple(traits.Float(), traits.Float(), traits.Float(), traits.Float()),
desc='FWHM along each axis')
acf_param = traits.Either(
traits.Tuple(traits.Float(), traits.Float(), traits.Float()),
traits.Tuple(traits.Float(), traits.Float(), traits.Float(), traits.Float()),
desc='fitted ACF model parameters')
out_acf = File(exists=True, desc='output acf file')
class FWHMx(AFNICommandBase):
"""
Unlike the older 3dFWHM, this program computes FWHMs for all sub-bricks
in the input dataset, each one separately. The output for each one is
written to the file specified by '-out'. The mean (arithmetic or geometric)
of all the FWHMs along each axis is written to stdout. (A non-positive
output value indicates something bad happened; e.g., FWHM in z is meaningless
for a 2D dataset; the estimation method computed incoherent intermediate results.)
Examples
--------
>>> from nipype.interfaces import afni as afp
>>> fwhm = afp.FWHMx()
>>> fwhm.inputs.in_file = 'functional.nii'
>>> fwhm.cmdline
'3dFWHMx -input functional.nii -out functional_subbricks.out > functional_fwhmx.out'
(Classic) METHOD:
* Calculate ratio of variance of first differences to data variance.
* Should be the same as 3dFWHM for a 1-brick dataset.
(But the output format is simpler to use in a script.)
.. note:: IMPORTANT NOTE [AFNI > 16]
A completely new method for estimating and using noise smoothness values is
now available in 3dFWHMx and 3dClustSim. This method is implemented in the
'-acf' options to both programs. 'ACF' stands for (spatial) AutoCorrelation
Function, and it is estimated by calculating moments of differences out to
a larger radius than before.
Notably, real FMRI data does not actually have a Gaussian-shaped ACF, so the
estimated ACF is then fit (in 3dFWHMx) to a mixed model (Gaussian plus
mono-exponential) of the form
.. math::
ACF(r) = a * exp(-r*r/(2*b*b)) + (1-a)*exp(-r/c)
where :math:`r` is the radius, and :math:`a, b, c` are the fitted parameters.
The apparent FWHM from this model is usually somewhat larger in real data
than the FWHM estimated from just the nearest-neighbor differences used
in the 'classic' analysis.
The longer tails provided by the mono-exponential are also significant.
3dClustSim has also been modified to use the ACF model given above to generate
noise random fields.
.. note:: TL;DR or summary
The take-awaymessage is that the 'classic' 3dFWHMx and
3dClustSim analysis, using a pure Gaussian ACF, is not very correct for
FMRI data -- I cannot speak for PET or MEG data.
.. warning::
Do NOT use 3dFWHMx on the statistical results (e.g., '-bucket') from
3dDeconvolve or 3dREMLfit!!! The function of 3dFWHMx is to estimate
the smoothness of the time series NOISE, not of the statistics. This
proscription is especially true if you plan to use 3dClustSim next!!
.. note:: Recommendations
* For FMRI statistical purposes, you DO NOT want the FWHM to reflect
the spatial structure of the underlying anatomy. Rather, you want
the FWHM to reflect the spatial structure of the noise. This means
that the input dataset should not have anatomical (spatial) structure.
* One good form of input is the output of '3dDeconvolve -errts', which is
the dataset of residuals left over after the GLM fitted signal model is
subtracted out from each voxel's time series.
* If you don't want to go to that much trouble, use '-detrend' to approximately
subtract out the anatomical spatial structure, OR use the output of 3dDetrend
for the same purpose.
* If you do not use '-detrend', the program attempts to find non-zero spatial
structure in the input, and will print a warning message if it is detected.
.. note:: Notes on -demend
* I recommend this option, and it is not the default only for historical
compatibility reasons. It may become the default someday.
* It is already the default in program 3dBlurToFWHM. This is the same detrending
as done in 3dDespike; using 2*q+3 basis functions for q > 0.
* If you don't use '-detrend', the program now [Aug 2010] checks if a large number
of voxels are have significant nonzero means. If so, the program will print a
warning message suggesting the use of '-detrend', since inherent spatial
structure in the image will bias the estimation of the FWHM of the image time
series NOISE (which is usually the point of using 3dFWHMx).
"""
_cmd = '3dFWHMx'
input_spec = FWHMxInputSpec
output_spec = FWHMxOutputSpec
_acf = True
def _parse_inputs(self, skip=None):
if not self.inputs.detrend:
if skip is None:
skip = []
skip += ['out_detrend']
return super(FWHMx, self)._parse_inputs(skip=skip)
def _format_arg(self, name, trait_spec, value):
if name == 'detrend':
if isinstance(value, bool):
if value:
return trait_spec.argstr
else:
return None
elif isinstance(value, int):
return trait_spec.argstr + ' %d' % value
if name == 'acf':
if isinstance(value, bool):
if value:
return trait_spec.argstr
else:
self._acf = False
return None
elif isinstance(value, tuple):
return trait_spec.argstr + ' %s %f' % value
elif isinstance(value, string_types):
return trait_spec.argstr + ' ' + value
return super(FWHMx, self)._format_arg(name, trait_spec, value)
def _list_outputs(self):
outputs = super(FWHMx, self)._list_outputs()
if self.inputs.detrend:
fname, ext = op.splitext(self.inputs.in_file)
if '.gz' in ext:
_, ext2 = op.splitext(fname)
ext = ext2 + ext
outputs['out_detrend'] += ext
else:
outputs['out_detrend'] = Undefined
sout = np.loadtxt(outputs['out_file']) #pylint: disable=E1101
if self._acf:
outputs['acf_param'] = tuple(sout[1])
sout = tuple(sout[0])
outputs['out_acf'] = op.abspath('3dFWHMx.1D')
if isinstance(self.inputs.acf, string_types):
outputs['out_acf'] = op.abspath(self.inputs.acf)
outputs['fwhm'] = tuple(sout)
return outputs
class OutlierCountInputSpec(CommandLineInputSpec):
in_file = File(argstr='%s', mandatory=True, exists=True, position=-2, desc='input dataset')
mask = File(exists=True, argstr='-mask %s', xor=['autoclip', 'automask'],
desc='only count voxels within the given mask')
qthr = traits.Range(value=1e-3, low=0.0, high=1.0, argstr='-qthr %.5f',
desc='indicate a value for q to compute alpha')
autoclip = traits.Bool(False, usedefault=True, argstr='-autoclip', xor=['in_file'],
desc='clip off small voxels')
automask = traits.Bool(False, usedefault=True, argstr='-automask', xor=['in_file'],
desc='clip off small voxels')
fraction = traits.Bool(False, usedefault=True, argstr='-fraction',
desc='write out the fraction of masked voxels'
' which are outliers at each timepoint')
interval = traits.Bool(False, usedefault=True, argstr='-range',
desc='write out the median + 3.5 MAD of outlier'
' count with each timepoint')
save_outliers = traits.Bool(False, usedefault=True, desc='enables out_file option')
outliers_file = File(
name_template="%s_outliers", argstr='-save %s', name_source=["in_file"],
output_name='out_outliers', keep_extension=True, desc='output image file name')
polort = traits.Int(argstr='-polort %d',
desc='detrend each voxel timeseries with polynomials')
legendre = traits.Bool(False, usedefault=True, argstr='-legendre',
desc='use Legendre polynomials')
out_file = File(
name_template='%s_outliers', name_source=['in_file'], argstr='> %s',
keep_extension=False, position=-1, desc='capture standard output')
class OutlierCountOutputSpec(TraitedSpec):
out_outliers = File(exists=True, desc='output image file name')
out_file = File(
name_template='%s_tqual', name_source=['in_file'], argstr='> %s',
keep_extension=False, position=-1, desc='capture standard output')
class OutlierCount(CommandLine):
"""Create a 3D dataset from 2D image files using AFNI to3d command
For complete details, see the `to3d Documentation
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/to3d.html>`_
Examples
========
>>> from nipype.interfaces import afni
>>> toutcount = afni.OutlierCount()
>>> toutcount.inputs.in_file = 'functional.nii'
>>> toutcount.cmdline #doctest: +ELLIPSIS
'3dToutcount functional.nii > functional_outliers'
>>> res = toutcount.run() #doctest: +SKIP
"""
_cmd = '3dToutcount'
input_spec = OutlierCountInputSpec
output_spec = OutlierCountOutputSpec
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
if not self.inputs.save_outliers:
skip += ['outliers_file']
return super(OutlierCount, self)._parse_inputs(skip)
def _list_outputs(self):
outputs = self.output_spec().get()
if self.inputs.save_outliers:
outputs['out_outliers'] = op.abspath(self.inputs.outliers_file)
outputs['out_file'] = op.abspath(self.inputs.out_file)
return outputs
class QualityIndexInputSpec(CommandLineInputSpec):
in_file = File(argstr='%s', mandatory=True, exists=True, position=-2, desc='input dataset')
mask = File(exists=True, argstr='-mask %s', xor=['autoclip', 'automask'],
desc='compute correlation only across masked voxels')
spearman = traits.Bool(False, usedefault=True, argstr='-spearman',
desc='Quality index is 1 minus the Spearman (rank) '
'correlation coefficient of each sub-brick '
'with the median sub-brick. (default)')
quadrant = traits.Bool(False, usedefault=True, argstr='-quadrant',
desc='Similar to -spearman, but using 1 minus the '
'quadrant correlation coefficient as the '
'quality index.')
autoclip = traits.Bool(False, usedefault=True, argstr='-autoclip', xor=['mask'],
desc='clip off small voxels')
automask = traits.Bool(False, usedefault=True, argstr='-automask', xor=['mask'],
desc='clip off small voxels')
clip = traits.Float(argstr='-clip %f', desc='clip off values below')
interval = traits.Bool(False, usedefault=True, argstr='-range',
desc='write out the median + 3.5 MAD of outlier'
' count with each timepoint')
out_file = File(
name_template='%s_tqual', name_source=['in_file'], argstr='> %s',
keep_extension=False, position=-1, desc='capture standard output')
class QualityIndexOutputSpec(TraitedSpec):
out_file = File(desc='file containing the caputured standard output')
class QualityIndex(CommandLine):
"""Create a 3D dataset from 2D image files using AFNI to3d command
For complete details, see the `to3d Documentation
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/to3d.html>`_
Examples
========
>>> from nipype.interfaces import afni
>>> tqual = afni.QualityIndex()
>>> tqual.inputs.in_file = 'functional.nii'
>>> tqual.cmdline #doctest: +ELLIPSIS
'3dTqual functional.nii > functional_tqual'
>>> res = tqual.run() #doctest: +SKIP
"""
_cmd = '3dTqual'
input_spec = QualityIndexInputSpec
output_spec = QualityIndexOutputSpec
| FCP-INDI/nipype | nipype/interfaces/afni/preprocess.py | Python | bsd-3-clause | 112,150 | [
"Gaussian"
] | 0e7c9d556b0638c94347a647711a1eadeb68e3ad4f2afd0f5670eb3b2fefc689 |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides utility functions used with command line samples."""
# This module is used for version 2 of the Google Data APIs.
import sys
import getpass
import urllib
import gdata.gauth
__author__ = 'j.s@google.com (Jeff Scudder)'
CLIENT_LOGIN = 1
AUTHSUB = 2
OAUTH = 3
HMAC = 1
RSA = 2
class SettingsUtil(object):
"""Gather's user preferences from flags or command prompts.
An instance of this object stores the choices made by the user. At some
point it might be useful to save the user's preferences so that they do
not need to always set flags or answer preference prompts.
"""
def __init__(self, prefs=None):
self.prefs = prefs or {}
def get_param(self, name, prompt='', secret=False, ask=True, reuse=False):
# First, check in this objects stored preferences.
if name in self.prefs:
return self.prefs[name]
# Second, check for a command line parameter.
value = None
for i in xrange(len(sys.argv)):
if sys.argv[i].startswith('--%s=' % name):
value = sys.argv[i].split('=')[1]
elif sys.argv[i] == '--%s' % name:
value = sys.argv[i + 1]
# Third, if it was not on the command line, ask the user to input the
# value.
if value is None and ask:
prompt = '%s: ' % prompt
if secret:
value = getpass.getpass(prompt)
else:
value = raw_input(prompt)
# If we want to save the preference for reuse in future requests, add it
# to this object's prefs.
if value is not None and reuse:
self.prefs[name] = value
return value
def authorize_client(self, client, auth_type=None, service=None,
source=None, scopes=None, oauth_type=None,
consumer_key=None, consumer_secret=None):
"""Uses command line arguments, or prompts user for token values."""
if 'client_auth_token' in self.prefs:
return
if auth_type is None:
auth_type = int(self.get_param(
'auth_type', 'Please choose the authorization mechanism you want'
' to use.\n'
'1. to use your email address and password (ClientLogin)\n'
'2. to use a web browser to visit an auth web page (AuthSub)\n'
'3. if you have registed to use OAuth\n', reuse=True))
# Get the scopes for the services we want to access.
if auth_type == AUTHSUB or auth_type == OAUTH:
if scopes is None:
scopes = self.get_param(
'scopes', 'Enter the URL prefixes (scopes) for the resources you '
'would like to access.\nFor multiple scope URLs, place a comma '
'between each URL.\n'
'Example: http://www.google.com/calendar/feeds/,'
'http://www.google.com/m8/feeds/\n', reuse=True).split(',')
elif isinstance(scopes, (str, unicode)):
scopes = scopes.split(',')
if auth_type == CLIENT_LOGIN:
email = self.get_param('email', 'Please enter your username',
reuse=False)
password = self.get_param('password', 'Password', True, reuse=False)
if service is None:
service = self.get_param(
'service', 'What is the name of the service you wish to access?'
'\n(See list:'
' http://code.google.com/apis/gdata/faq.html#clientlogin)',
reuse=True)
if source is None:
source = self.get_param('source', ask=False, reuse=True)
client.client_login(email, password, source=source, service=service)
elif auth_type == AUTHSUB:
auth_sub_token = self.get_param('auth_sub_token', ask=False, reuse=True)
session_token = self.get_param('session_token', ask=False, reuse=True)
private_key = None
auth_url = None
single_use_token = None
rsa_private_key = self.get_param(
'rsa_private_key',
'If you want to use secure mode AuthSub, please provide the\n'
' location of your RSA private key which corresponds to the\n'
' certificate you have uploaded for your domain. If you do not\n'
' have an RSA key, simply press enter', reuse=True)
if rsa_private_key:
try:
private_key_file = open(rsa_private_key, 'rb')
private_key = private_key_file.read()
private_key_file.close()
except IOError:
print 'Unable to read private key from file'
if private_key is not None:
if client.auth_token is None:
if session_token:
client.auth_token = gdata.gauth.SecureAuthSubToken(
session_token, private_key, scopes)
self.prefs['client_auth_token'] = gdata.gauth.token_to_blob(
client.auth_token)
return
elif auth_sub_token:
client.auth_token = gdata.gauth.SecureAuthSubToken(
auth_sub_token, private_key, scopes)
client.upgrade_token()
self.prefs['client_auth_token'] = gdata.gauth.token_to_blob(
client.auth_token)
return
auth_url = gdata.gauth.generate_auth_sub_url(
'http://gauthmachine.appspot.com/authsub', scopes, True)
print 'with a private key, get ready for this URL', auth_url
else:
if client.auth_token is None:
if session_token:
client.auth_token = gdata.gauth.AuthSubToken(session_token,
scopes)
self.prefs['client_auth_token'] = gdata.gauth.token_to_blob(
client.auth_token)
return
elif auth_sub_token:
client.auth_token = gdata.gauth.AuthSubToken(auth_sub_token,
scopes)
client.upgrade_token()
self.prefs['client_auth_token'] = gdata.gauth.token_to_blob(
client.auth_token)
return
auth_url = gdata.gauth.generate_auth_sub_url(
'http://gauthmachine.appspot.com/authsub', scopes)
print 'Visit the following URL in your browser to authorize this app:'
print str(auth_url)
print 'After agreeing to authorize the app, copy the token value from'
print ' the URL. Example: "www.google.com/?token=ab12" token value is'
print ' ab12'
token_value = raw_input('Please enter the token value: ')
if private_key is not None:
single_use_token = gdata.gauth.SecureAuthSubToken(
token_value, private_key, scopes)
else:
single_use_token = gdata.gauth.AuthSubToken(token_value, scopes)
client.auth_token = single_use_token
client.upgrade_token()
elif auth_type == OAUTH:
if oauth_type is None:
oauth_type = int(self.get_param(
'oauth_type', 'Please choose the authorization mechanism you want'
' to use.\n'
'1. use an HMAC signature using your consumer key and secret\n'
'2. use RSA with your private key to sign requests\n',
reuse=True))
consumer_key = self.get_param(
'consumer_key', 'Please enter your OAuth conumer key '
'which identifies your app', reuse=True)
if oauth_type == HMAC:
consumer_secret = self.get_param(
'consumer_secret', 'Please enter your OAuth conumer secret '
'which you share with the OAuth provider', True, reuse=False)
# Swap out this code once the client supports requesting an oauth
# token.
# Get a request token.
request_token = client.get_oauth_token(
scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key,
consumer_secret=consumer_secret)
elif oauth_type == RSA:
rsa_private_key = self.get_param(
'rsa_private_key',
'Please provide the location of your RSA private key which\n'
' corresponds to the certificate you have uploaded for your'
' domain.',
reuse=True)
try:
private_key_file = open(rsa_private_key, 'rb')
private_key = private_key_file.read()
private_key_file.close()
except IOError:
print 'Unable to read private key from file'
request_token = client.get_oauth_token(
scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key,
rsa_private_key=private_key)
else:
print 'Invalid OAuth signature type'
return None
# Authorize the request token in the browser.
print 'Visit the following URL in your browser to authorize this app:'
print str(request_token.generate_authorization_url())
print 'After agreeing to authorize the app, copy URL from the browser\'s'
print ' address bar.'
url = raw_input('Please enter the url: ')
gdata.gauth.authorize_request_token(request_token, url)
# Exchange for an access token.
client.auth_token = client.get_access_token(request_token)
else:
print 'Invalid authorization type.'
return None
if client.auth_token:
self.prefs['client_auth_token'] = gdata.gauth.token_to_blob(
client.auth_token)
def get_param(name, prompt='', secret=False, ask=True):
settings = SettingsUtil()
return settings.get_param(name=name, prompt=prompt, secret=secret, ask=ask)
def authorize_client(client, auth_type=None, service=None, source=None,
scopes=None, oauth_type=None, consumer_key=None,
consumer_secret=None):
"""Uses command line arguments, or prompts user for token values."""
settings = SettingsUtil()
return settings.authorize_client(client=client, auth_type=auth_type,
service=service, source=source,
scopes=scopes, oauth_type=oauth_type,
consumer_key=consumer_key,
consumer_secret=consumer_secret)
def print_options():
"""Displays usage information, available command line params."""
# TODO: fill in the usage description for authorizing the client.
print ''
| boxed/CMi | web_frontend/gdata/sample_util.py | Python | mit | 10,714 | [
"VisIt"
] | 24751ae2fb6827b35b2fcdbbfc22b1274c142d7b6261fc42b74101d7515eba9d |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from geonode.tests.base import GeoNodeBaseTestSupport
import base64
import json
import os
import shutil
import tempfile
from os.path import basename, splitext
from django.contrib.auth import get_user_model
from django.http import HttpRequest
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from guardian.shortcuts import assign_perm, get_anonymous_user
from geonode import geoserver
from geonode.decorators import on_ogc_backend
from geonode.geoserver.helpers import OGC_Servers_Handler, extract_name_from_sld
from geonode.layers.populate_layers_data import create_layer_data
from geonode.layers.models import Layer
san_andres_y_providencia_sld = """<?xml version="1.0" encoding="UTF-8"?>
<sld:StyledLayerDescriptor xmlns:sld="http://www.opengis.net/sld"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:ogc="http://www.opengis.net/ogc"
xmlns:gml="http://www.opengis.net/gml"
version="1.0.0"
xsi:schemaLocation="http://www.opengis.net/sld http://schemas.opengis.net/sld/1.0.0/StyledLayerDescriptor.xsd">
<sld:NamedLayer>
<sld:Name>geonode:san_andres_y_providencia_administrative</sld:Name>
<sld:UserStyle>
<sld:Name>san_andres_y_providencia_administrative</sld:Name>
<sld:Title>San Andres y Providencia Administrative</sld:Title>
<sld:IsDefault>1</sld:IsDefault>
<sld:FeatureTypeStyle>
<sld:Rule>
<sld:LineSymbolizer>
<sld:Stroke>
<sld:CssParameter name="stroke">#880000</sld:CssParameter>
<sld:CssParameter name="stroke-width">3</sld:CssParameter>
<sld:CssParameter name="stroke-dasharray">4.0 4.0</sld:CssParameter>
</sld:Stroke>
</sld:LineSymbolizer>
</sld:Rule>
</sld:FeatureTypeStyle>
<sld:FeatureTypeStyle>
<sld:Rule>
<sld:LineSymbolizer>
<sld:Stroke>
<sld:CssParameter name="stroke">#ffbbbb</sld:CssParameter>
<sld:CssParameter name="stroke-width">2</sld:CssParameter>
</sld:Stroke>
</sld:LineSymbolizer>
</sld:Rule>
</sld:FeatureTypeStyle>
</sld:UserStyle>
</sld:NamedLayer>
</sld:StyledLayerDescriptor>
"""
lac_sld = """<?xml version="1.0" encoding="UTF-8"?>
<StyledLayerDescriptor xmlns="http://www.opengis.net/sld"
xmlns:ogc="http://www.opengis.net/ogc"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
version="1.1.0" xmlns:xlink="http://www.w3.org/1999/xlink"
xsi:schemaLocation="http://www.opengis.net/sld http://schemas.opengis.net/sld/1.1.0/StyledLayerDescriptor.xsd"
xmlns:se="http://www.opengis.net/se">
<NamedLayer>
<se:Name>LAC_NonIndigenous_Access_to_Sanitation2</se:Name>
<UserStyle>
<se:Name>LAC NonIndigenous Access to Sanitation</se:Name>
<se:FeatureTypeStyle>
<se:Rule>
<se:Name> Low (25 - 40%) </se:Name>
<se:Description>
<se:Title> Low (25 - 40%) </se:Title>
</se:Description>
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">
<ogc:And>
<ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyName>NonInd</ogc:PropertyName>
<ogc:Literal>24.89999999999999858</ogc:Literal>
</ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyName>NonInd</ogc:PropertyName>
<ogc:Literal>39.89999999999999858</ogc:Literal>
</ogc:PropertyIsLessThanOrEqualTo>
</ogc:And>
</ogc:Filter>
<se:PolygonSymbolizer>
<se:Fill>
<se:SvgParameter name="fill">#ff8b16</se:SvgParameter>
</se:Fill>
<se:Stroke>
<se:SvgParameter name="stroke">#000001</se:SvgParameter>
<se:SvgParameter name="stroke-width">0.1</se:SvgParameter>
<se:SvgParameter name="stroke-linejoin">bevel</se:SvgParameter>
</se:Stroke>
</se:PolygonSymbolizer>
</se:Rule>
<se:Rule>
<se:Name> Medium Low (40 - 65 %)</se:Name>
<se:Description>
<se:Title> Medium Low (40 - 65 %)</se:Title>
</se:Description>
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">
<ogc:And>
<ogc:PropertyIsGreaterThan>
<ogc:PropertyName>NonInd</ogc:PropertyName>
<ogc:Literal>39.89999999999999858</ogc:Literal>
</ogc:PropertyIsGreaterThan>
<ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyName>NonInd</ogc:PropertyName>
<ogc:Literal>64.90000000000000568</ogc:Literal>
</ogc:PropertyIsLessThanOrEqualTo>
</ogc:And>
</ogc:Filter>
<se:PolygonSymbolizer>
<se:Fill>
<se:SvgParameter name="fill">#fffb0b</se:SvgParameter>
</se:Fill>
<se:Stroke>
<se:SvgParameter name="stroke">#000001</se:SvgParameter>
<se:SvgParameter name="stroke-width">0.1</se:SvgParameter>
<se:SvgParameter name="stroke-linejoin">bevel</se:SvgParameter>
</se:Stroke>
</se:PolygonSymbolizer>
</se:Rule>
<se:Rule>
<se:Name> Medium (65 - 70 %) </se:Name>
<se:Description>
<se:Title> Medium (65 - 70 %) </se:Title>
</se:Description>
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">
<ogc:And>
<ogc:PropertyIsGreaterThan>
<ogc:PropertyName>NonInd</ogc:PropertyName>
<ogc:Literal>64.90000000000000568</ogc:Literal>
</ogc:PropertyIsGreaterThan>
<ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyName>NonInd</ogc:PropertyName>
<ogc:Literal>69.90000000000000568</ogc:Literal>
</ogc:PropertyIsLessThanOrEqualTo>
</ogc:And>
</ogc:Filter>
<se:PolygonSymbolizer>
<se:Fill>
<se:SvgParameter name="fill">#55d718</se:SvgParameter>
</se:Fill>
<se:Stroke>
<se:SvgParameter name="stroke">#000001</se:SvgParameter>
<se:SvgParameter name="stroke-width">0.1</se:SvgParameter>
<se:SvgParameter name="stroke-linejoin">bevel</se:SvgParameter>
</se:Stroke>
</se:PolygonSymbolizer>
</se:Rule>
<se:Rule>
<se:Name> Medium High (70 - 85 %) </se:Name>
<se:Description>
<se:Title> Medium High (70 - 85 %) </se:Title>
</se:Description>
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">
<ogc:And>
<ogc:PropertyIsGreaterThan>
<ogc:PropertyName>NonInd</ogc:PropertyName>
<ogc:Literal>69.90000000000000568</ogc:Literal>
</ogc:PropertyIsGreaterThan>
<ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyName>NonInd</ogc:PropertyName>
<ogc:Literal>84.90000000000000568</ogc:Literal>
</ogc:PropertyIsLessThanOrEqualTo>
</ogc:And>
</ogc:Filter>
<se:PolygonSymbolizer>
<se:Fill>
<se:SvgParameter name="fill">#3f7122</se:SvgParameter>
</se:Fill>
<se:Stroke>
<se:SvgParameter name="stroke">#000001</se:SvgParameter>
<se:SvgParameter name="stroke-width">0.1</se:SvgParameter>
<se:SvgParameter name="stroke-linejoin">bevel</se:SvgParameter>
</se:Stroke>
</se:PolygonSymbolizer>
</se:Rule>
<se:Rule>
<se:Name> High (85 - 93 %) </se:Name>
<se:Description>
<se:Title> High (85 - 93 %) </se:Title>
</se:Description>
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">
<ogc:And>
<ogc:PropertyIsGreaterThan>
<ogc:PropertyName>NonInd</ogc:PropertyName>
<ogc:Literal>84.90000000000000568</ogc:Literal>
</ogc:PropertyIsGreaterThan>
<ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyName>NonInd</ogc:PropertyName>
<ogc:Literal>92.90000000000000568</ogc:Literal>
</ogc:PropertyIsLessThanOrEqualTo>
</ogc:And>
</ogc:Filter>
<se:PolygonSymbolizer>
<se:Fill>
<se:SvgParameter name="fill">#76ffff</se:SvgParameter>
</se:Fill>
<se:Stroke>
<se:SvgParameter name="stroke">#000001</se:SvgParameter>
<se:SvgParameter name="stroke-width">0.1</se:SvgParameter>
<se:SvgParameter name="stroke-linejoin">bevel</se:SvgParameter>
</se:Stroke>
</se:PolygonSymbolizer>
</se:Rule>
<se:Rule>
<se:Name> Very High (93 - 96 %) </se:Name>
<se:Description>
<se:Title> Very High (93 - 96 %) </se:Title>
</se:Description>
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">
<ogc:And>
<ogc:PropertyIsGreaterThan>
<ogc:PropertyName>NonInd</ogc:PropertyName>
<ogc:Literal>92.90000000000000568</ogc:Literal>
</ogc:PropertyIsGreaterThan>
<ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyName>NonInd</ogc:PropertyName>
<ogc:Literal>96.09999999999999432</ogc:Literal>
</ogc:PropertyIsLessThanOrEqualTo>
</ogc:And>
</ogc:Filter>
<se:PolygonSymbolizer>
<se:Fill>
<se:SvgParameter name="fill">#0a4291</se:SvgParameter>
</se:Fill>
<se:Stroke>
<se:SvgParameter name="stroke">#000001</se:SvgParameter>
<se:SvgParameter name="stroke-width">0.1</se:SvgParameter>
<se:SvgParameter name="stroke-linejoin">bevel</se:SvgParameter>
</se:Stroke>
</se:PolygonSymbolizer>
</se:Rule>
<se:Rule>
<se:Name> Country not surveyed</se:Name>
<se:Description>
<se:Title> Country not surveyed</se:Title>
</se:Description>
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">
<ogc:And>
<ogc:PropertyIsGreaterThan>
<ogc:PropertyName>NonInd</ogc:PropertyName>
<ogc:Literal>-999</ogc:Literal>
</ogc:PropertyIsGreaterThan>
<ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyName>NonInd</ogc:PropertyName>
<ogc:Literal>24.89999999999999858</ogc:Literal>
</ogc:PropertyIsLessThanOrEqualTo>
</ogc:And>
</ogc:Filter>
<se:PolygonSymbolizer>
<se:Fill>
<se:SvgParameter name="fill">#d9d9d9</se:SvgParameter>
</se:Fill>
<se:Stroke>
<se:SvgParameter name="stroke">#000001</se:SvgParameter>
<se:SvgParameter name="stroke-width">0.1</se:SvgParameter>
<se:SvgParameter name="stroke-linejoin">bevel</se:SvgParameter>
</se:Stroke>
</se:PolygonSymbolizer>
</se:Rule>
</se:FeatureTypeStyle>
</UserStyle>
</NamedLayer>
</StyledLayerDescriptor>
"""
freshgwabs2_sld = """<?xml version="1.0" encoding="UTF-8"?>
<sld:StyledLayerDescriptor xmlns:sld="http://www.opengis.net/sld"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:ogc="http://www.opengis.net/ogc"
xmlns:gml="http://www.opengis.net/gml"
version="1.0.0"
xsi:schemaLocation="http://www.opengis.net/sld http://schemas.opengis.net/sld/1.0.0/StyledLayerDescriptor.xsd">
<sld:NamedLayer>
<sld:Name>geonode:freshgwabs2</sld:Name>
<sld:UserStyle>
<sld:Name>freshgwabs2</sld:Name>
<sld:IsDefault>1</sld:IsDefault>
<sld:FeatureTypeStyle>
<sld:Rule>
<sld:Name>< 1112 million cubic metres</sld:Name>
<sld:Title>< 1112 million cubic metres</sld:Title>
<ogc:Filter>
<ogc:And>
<ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyName>y2007</ogc:PropertyName>
<ogc:Literal>0</ogc:Literal>
</ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyName>y2007</ogc:PropertyName>
<ogc:Literal>1112</ogc:Literal>
</ogc:PropertyIsLessThanOrEqualTo>
</ogc:And>
</ogc:Filter>
<sld:PolygonSymbolizer>
<sld:Fill>
<sld:CssParameter name="fill">#ffe9b1</sld:CssParameter>
</sld:Fill>
<sld:Stroke>
<sld:CssParameter name="stroke">#000001</sld:CssParameter>
<sld:CssParameter name="stroke-width">0.1</sld:CssParameter>
</sld:Stroke>
</sld:PolygonSymbolizer>
</sld:Rule>
<sld:Rule>
<sld:Name> 1112 - 4794 million cubic metres</sld:Name>
<sld:Title> 1112 - 4794 million cubic metres</sld:Title>
<ogc:Filter>
<ogc:And>
<ogc:PropertyIsGreaterThan>
<ogc:PropertyName>y2007</ogc:PropertyName>
<ogc:Literal>1112</ogc:Literal>
</ogc:PropertyIsGreaterThan>
<ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyName>y2007</ogc:PropertyName>
<ogc:Literal>4794</ogc:Literal>
</ogc:PropertyIsLessThanOrEqualTo>
</ogc:And>
</ogc:Filter>
<sld:PolygonSymbolizer>
<sld:Fill>
<sld:CssParameter name="fill">#eaad57</sld:CssParameter>
</sld:Fill>
<sld:Stroke>
<sld:CssParameter name="stroke">#000001</sld:CssParameter>
<sld:CssParameter name="stroke-width">0.1</sld:CssParameter>
</sld:Stroke>
</sld:PolygonSymbolizer>
</sld:Rule>
<sld:Rule>
<sld:Name> 4794 - 12096 million cubic metres</sld:Name>
<sld:Title> 4794 - 12096 million cubic metres</sld:Title>
<ogc:Filter>
<ogc:And>
<ogc:PropertyIsGreaterThan>
<ogc:PropertyName>y2007</ogc:PropertyName>
<ogc:Literal>4794</ogc:Literal>
</ogc:PropertyIsGreaterThan>
<ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyName>y2007</ogc:PropertyName>
<ogc:Literal>12096</ogc:Literal>
</ogc:PropertyIsLessThanOrEqualTo>
</ogc:And>
</ogc:Filter>
<sld:PolygonSymbolizer>
<sld:Fill>
<sld:CssParameter name="fill">#ff7f1d</sld:CssParameter>
</sld:Fill>
<sld:Stroke>
<sld:CssParameter name="stroke">#000001</sld:CssParameter>
<sld:CssParameter name="stroke-width">0.1</sld:CssParameter>
</sld:Stroke>
</sld:PolygonSymbolizer>
</sld:Rule>
<sld:Rule>
<sld:Name> 12096 - 28937 million cubic metres</sld:Name>
<sld:Title> 12096 - 28937 million cubic metres</sld:Title>
<ogc:Filter>
<ogc:And>
<ogc:PropertyIsGreaterThan>
<ogc:PropertyName>y2007</ogc:PropertyName>
<ogc:Literal>12096</ogc:Literal>
</ogc:PropertyIsGreaterThan>
<ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyName>y2007</ogc:PropertyName>
<ogc:Literal>28937</ogc:Literal>
</ogc:PropertyIsLessThanOrEqualTo>
</ogc:And>
</ogc:Filter>
<sld:PolygonSymbolizer>
<sld:Fill>
<sld:CssParameter name="fill">#af8a33</sld:CssParameter>
</sld:Fill>
<sld:Stroke>
<sld:CssParameter name="stroke">#000001</sld:CssParameter>
<sld:CssParameter name="stroke-width">0.1</sld:CssParameter>
</sld:Stroke>
</sld:PolygonSymbolizer>
</sld:Rule>
<sld:Rule>
<sld:Name>> 28937 million cubic metres</sld:Name>
<sld:Title>> 28937 million cubic metres</sld:Title>
<ogc:Filter>
<ogc:And>
<ogc:PropertyIsGreaterThan>
<ogc:PropertyName>y2007</ogc:PropertyName>
<ogc:Literal>28937</ogc:Literal>
</ogc:PropertyIsGreaterThan>
<ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyName>y2007</ogc:PropertyName>
<ogc:Literal>106910</ogc:Literal>
</ogc:PropertyIsLessThanOrEqualTo>
</ogc:And>
</ogc:Filter>
<sld:PolygonSymbolizer>
<sld:Fill>
<sld:CssParameter name="fill">#5b4b2d</sld:CssParameter>
</sld:Fill>
<sld:Stroke>
<sld:CssParameter name="stroke">#000001</sld:CssParameter>
<sld:CssParameter name="stroke-width">0.1</sld:CssParameter>
</sld:Stroke>
</sld:PolygonSymbolizer>
</sld:Rule>
<sld:Rule>
<sld:Name>No data for 2007</sld:Name>
<sld:Title>No data for 2007</sld:Title>
<ogc:Filter>
<ogc:And>
<ogc:PropertyIsGreaterThan>
<ogc:PropertyName>y2007</ogc:PropertyName>
<ogc:Literal>-99</ogc:Literal>
</ogc:PropertyIsGreaterThan>
<ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyName>y2007</ogc:PropertyName>
<ogc:Literal>0</ogc:Literal>
</ogc:PropertyIsLessThanOrEqualTo>
</ogc:And>
</ogc:Filter>
<sld:PolygonSymbolizer>
<sld:Fill>
<sld:CssParameter name="fill">#d9d9d9</sld:CssParameter>
</sld:Fill>
<sld:Stroke>
<sld:CssParameter name="stroke">#000001</sld:CssParameter>
</sld:Stroke>
</sld:PolygonSymbolizer>
</sld:Rule>
</sld:FeatureTypeStyle>
</sld:UserStyle>
</sld:NamedLayer>
</sld:StyledLayerDescriptor>
"""
raster_sld = """<?xml version="1.0" ?>
<sld:StyledLayerDescriptor version="1.0.0" xmlns="http://www.opengis.net/sld"
xmlns:gml="http://www.opengis.net/gml" xmlns:ogc="http://www.opengis.net/ogc"
xmlns:sld="http://www.opengis.net/sld">
<sld:UserLayer>
<sld:LayerFeatureConstraints>
<sld:FeatureTypeConstraint/>
</sld:LayerFeatureConstraints>
<sld:UserStyle>
<sld:Name>geonode-geonode_gwpollriskafriotest</sld:Name>
<sld:Title/>
<sld:FeatureTypeStyle>
<sld:Name/>
<sld:Rule>
<sld:RasterSymbolizer>
<sld:Geometry>
<ogc:PropertyName>grid</ogc:PropertyName>
</sld:Geometry>
<sld:Opacity>1</sld:Opacity>
<sld:ColorMap>
<sld:ColorMapEntry color="#000000" opacity="1.0" quantity="77"/>
<sld:ColorMapEntry color="#FFFFFF" opacity="1.0" quantity="214"/>
</sld:ColorMap>
</sld:RasterSymbolizer>
</sld:Rule>
</sld:FeatureTypeStyle>
</sld:UserStyle>
</sld:UserLayer>
</sld:StyledLayerDescriptor>
"""
line_sld = """<?xml version="1.0" encoding="UTF-8"?>
<StyledLayerDescriptor xmlns="http://www.opengis.net/sld"
xmlns:ogc="http://www.opengis.net/ogc"
xsi:schemaLocation="http://www.opengis.net/sld http://schemas.opengis.net/sld/1.1.0/StyledLayerDescriptor.xsd"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
version="1.1.0" xmlns:se="http://www.opengis.net/se">
<NamedLayer>
<se:Name>line_3</se:Name>
<UserStyle>
<se:Name>line 3</se:Name>
<se:FeatureTypeStyle>
<se:Rule>
<se:Name>Single symbol</se:Name>
<se:LineSymbolizer>
<se:Stroke>
<se:SvgParameter name="stroke">#db1e2a</se:SvgParameter>
<se:SvgParameter name="stroke-width">2</se:SvgParameter>
<se:SvgParameter name="stroke-linejoin">round</se:SvgParameter>
<se:SvgParameter name="stroke-linecap">round</se:SvgParameter>
<se:SvgParameter name="stroke-dasharray">2 7</se:SvgParameter>
</se:Stroke>
</se:LineSymbolizer>
</se:Rule>
</se:FeatureTypeStyle>
</UserStyle>
</NamedLayer>
</StyledLayerDescriptor>
"""
SLDS = {
'san_andres_y_providencia': san_andres_y_providencia_sld,
'lac': lac_sld,
'freshgwabs2': freshgwabs2_sld,
'raster': raster_sld,
'line': line_sld
}
class LayerTests(GeoNodeBaseTestSupport):
type = 'layer'
def setUp(self):
super(LayerTests, self).setUp()
self.user = 'admin'
self.passwd = 'admin'
create_layer_data()
@on_ogc_backend(geoserver.BACKEND_PACKAGE)
def test_style_manager(self):
"""
Ensures the layer_style_manage route returns a 200.
"""
layer = Layer.objects.all()[0]
bob = get_user_model().objects.get(username='bobby')
assign_perm('change_layer_style', bob, layer)
logged_in = self.client.login(username='bobby', password='bob')
self.assertEquals(logged_in, True)
response = self.client.get(
reverse(
'layer_style_manage', args=(
layer.alternate,)))
self.assertEqual(response.status_code, 200)
form_data = {'default_style': 'polygon'}
response = self.client.post(
reverse(
'layer_style_manage', args=(
layer.alternate,)), data=form_data)
self.assertEquals(response.status_code, 302)
@on_ogc_backend(geoserver.BACKEND_PACKAGE)
def test_style_validity_and_name(self):
# Check that including an SLD with a valid shapefile results in the SLD
# getting picked up
d = None
try:
d = tempfile.mkdtemp()
for f in ("san_andres_y_providencia.sld",
"lac.sld",
"freshgwabs2.sld",
"raster.sld",
"line.sld",):
path = os.path.join(d, f)
f = open(path, "wb")
f.write(SLDS[splitext(basename(path))[0]])
f.close()
# Test 'san_andres_y_providencia.sld'
san_andres_y_providencia_sld_file = os.path.join(
d, "san_andres_y_providencia.sld")
san_andres_y_providencia_sld_xml = open(
san_andres_y_providencia_sld_file).read()
san_andres_y_providencia_sld_name = extract_name_from_sld(
None, san_andres_y_providencia_sld_xml)
self.assertEquals(
san_andres_y_providencia_sld_name,
'san_andres_y_providencia_administrative')
# Test 'lac.sld'
lac_sld_file = os.path.join(d, "lac.sld")
lac_sld_xml = open(lac_sld_file).read()
lac_sld_name = extract_name_from_sld(
None, lac_sld_xml, sld_file=lac_sld_file)
self.assertEquals(lac_sld_name,
'LAC NonIndigenous Access to Sanitation')
# Test 'freshgwabs2.sld'
freshgwabs2_sld_file = os.path.join(d, "freshgwabs2.sld")
freshgwabs2_sld_xml = open(freshgwabs2_sld_file).read()
freshgwabs2_sld_name = extract_name_from_sld(
None, freshgwabs2_sld_xml, sld_file=freshgwabs2_sld_file)
self.assertEquals(freshgwabs2_sld_name, 'freshgwabs2')
# Test 'raster.sld'
raster_sld_file = os.path.join(d, "raster.sld")
raster_sld_xml = open(raster_sld_file).read()
raster_sld_name = extract_name_from_sld(
None, raster_sld_xml, sld_file=raster_sld_file)
self.assertEquals(
raster_sld_name,
'geonode-geonode_gwpollriskafriotest')
# Test 'line.sld'
line_sld_file = os.path.join(d, "line.sld")
line_sld_xml = open(line_sld_file).read()
line_sld_name = extract_name_from_sld(
None, line_sld_xml, sld_file=line_sld_file)
self.assertEquals(line_sld_name, 'line 3')
finally:
if d is not None:
shutil.rmtree(d)
@on_ogc_backend(geoserver.BACKEND_PACKAGE)
def test_feature_edit_check(self):
"""Verify that the feature_edit_check view is behaving as expected
"""
# Setup some layer names to work with
valid_layer_typename = Layer.objects.all()[0].alternate
Layer.objects.all()[0].set_default_permissions()
invalid_layer_typename = "n0ch@nc3"
# Test that an invalid layer.typename is handled for properly
response = self.client.post(
reverse(
'feature_edit_check',
args=(
invalid_layer_typename,
)))
self.assertEquals(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEquals(response_json['authorized'], False)
# First test un-authenticated
response = self.client.post(
reverse(
'feature_edit_check',
args=(
valid_layer_typename,
)))
response_json = json.loads(response.content)
self.assertEquals(response_json['authorized'], False)
# Next Test with a user that does NOT have the proper perms
logged_in = self.client.login(username='bobby', password='bob')
self.assertEquals(logged_in, True)
response = self.client.post(
reverse(
'feature_edit_check',
args=(
valid_layer_typename,
)))
response_json = json.loads(response.content)
self.assertEquals(response_json['authorized'], True)
# Login as a user with the proper permission and test the endpoint
logged_in = self.client.login(username='admin', password='admin')
self.assertEquals(logged_in, True)
response = self.client.post(
reverse(
'feature_edit_check',
args=(
valid_layer_typename,
)))
response_json = json.loads(response.content)
self.assertEquals(response_json['authorized'], True)
layer = Layer.objects.all()[0]
layer.storeType = "dataStore"
layer.save()
# Test that the method returns authorized=True if it's a datastore
if settings.OGC_SERVER['default']['DATASTORE']:
# The check was moved from the template into the view
response = self.client.post(
reverse(
'feature_edit_check',
args=(
valid_layer_typename,
)))
response_json = json.loads(response.content)
self.assertEquals(response_json['authorized'], True)
@on_ogc_backend(geoserver.BACKEND_PACKAGE)
def test_layer_acls(self):
""" Verify that the layer_acls view is behaving as expected
"""
# Test that HTTP_AUTHORIZATION in request.META is working properly
valid_uname_pw = '%s:%s' % ('bobby', 'bob')
invalid_uname_pw = '%s:%s' % ('n0t', 'v@l1d')
valid_auth_headers = {
'HTTP_AUTHORIZATION': 'basic ' + base64.b64encode(valid_uname_pw),
}
invalid_auth_headers = {
'HTTP_AUTHORIZATION': 'basic ' +
base64.b64encode(invalid_uname_pw),
}
bob = get_user_model().objects.get(username='bobby')
layer_ca = Layer.objects.get(alternate='geonode:CA')
assign_perm('change_layer_data', bob, layer_ca)
# Test that requesting when supplying the geoserver credentials returns
# the expected json
expected_result = {
u'email': u'bobby@bob.com',
u'fullname': u'bobby',
u'is_anonymous': False,
u'is_superuser': False,
u'name': u'bobby',
u'ro': [u'geonode:layer2',
u'geonode:mylayer',
u'geonode:foo',
u'geonode:whatever',
u'geonode:fooey',
u'geonode:quux',
u'geonode:fleem'],
u'rw': [u'geonode:CA']
}
response = self.client.get(reverse('layer_acls'), **valid_auth_headers)
response_json = json.loads(response.content)
# 'ro' and 'rw' are unsorted collections
self.assertEquals(sorted(expected_result), sorted(response_json))
# Test that requesting when supplying invalid credentials returns the
# appropriate error code
response = self.client.get(
reverse('layer_acls'),
**invalid_auth_headers)
self.assertEquals(response.status_code, 401)
# Test logging in using Djangos normal auth system
self.client.login(username='admin', password='admin')
# Basic check that the returned content is at least valid json
response = self.client.get(reverse('layer_acls'))
response_json = json.loads(response.content)
self.assertEquals('admin', response_json['fullname'])
self.assertEquals('ad@m.in', response_json['email'])
# TODO Lots more to do here once jj0hns0n understands the ACL system
# better
@on_ogc_backend(geoserver.BACKEND_PACKAGE)
def test_resolve_user(self):
"""Verify that the resolve_user view is behaving as expected
"""
# Test that HTTP_AUTHORIZATION in request.META is working properly
valid_uname_pw = "%s:%s" % ('admin', 'admin')
invalid_uname_pw = "%s:%s" % ("n0t", "v@l1d")
valid_auth_headers = {
'HTTP_AUTHORIZATION': 'basic ' + base64.b64encode(valid_uname_pw),
}
invalid_auth_headers = {
'HTTP_AUTHORIZATION': 'basic ' +
base64.b64encode(invalid_uname_pw),
}
response = self.client.get(
reverse('layer_resolve_user'),
**valid_auth_headers)
response_json = json.loads(response.content)
self.assertEquals({'geoserver': False,
'superuser': True,
'user': 'admin',
'fullname': 'admin',
'email': 'ad@m.in'},
response_json)
# Test that requesting when supplying invalid credentials returns the
# appropriate error code
response = self.client.get(
reverse('layer_acls'),
**invalid_auth_headers)
self.assertEquals(response.status_code, 401)
# Test logging in using Djangos normal auth system
self.client.login(username='admin', password='admin')
# Basic check that the returned content is at least valid json
response = self.client.get(reverse('layer_resolve_user'))
response_json = json.loads(response.content)
self.assertEquals('admin', response_json['user'])
self.assertEquals('admin', response_json['fullname'])
self.assertEquals('ad@m.in', response_json['email'])
class UtilsTests(GeoNodeBaseTestSupport):
type = 'layer'
def setUp(self):
super(UtilsTests, self).setUp()
self.OGC_DEFAULT_SETTINGS = {
'default': {
'BACKEND': 'geonode.geoserver',
'LOCATION': 'http://localhost:8080/geoserver/',
'USER': 'admin',
'PASSWORD': 'geoserver',
'MAPFISH_PRINT_ENABLED': True,
'PRINT_NG_ENABLED': True,
'GEONODE_SECURITY_ENABLED': True,
'GEOFENCE_SECURITY_ENABLED': True,
'GEOGIG_ENABLED': False,
'WMST_ENABLED': False,
'BACKEND_WRITE_ENABLED': True,
'WPS_ENABLED': False,
'DATASTORE': str(),
'GEOGIG_DATASTORE_DIR': str(),
}
}
self.UPLOADER_DEFAULT_SETTINGS = {
'BACKEND': 'geonode.rest',
'OPTIONS': {
'TIME_ENABLED': False,
'MOSAIC_ENABLED': False,
'GEOGIG_ENABLED': False}}
self.DATABASE_DEFAULT_SETTINGS = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'development.db'}}
@on_ogc_backend(geoserver.BACKEND_PACKAGE)
def test_ogc_server_settings(self):
"""
Tests the OGC Servers Handler class.
"""
with override_settings(OGC_SERVER=self.OGC_DEFAULT_SETTINGS, UPLOADER=self.UPLOADER_DEFAULT_SETTINGS):
OGC_SERVER = self.OGC_DEFAULT_SETTINGS.copy()
OGC_SERVER.update(
{'PUBLIC_LOCATION': 'http://localhost:8080/geoserver/'})
ogc_settings = OGC_Servers_Handler(OGC_SERVER)['default']
default = OGC_SERVER.get('default')
self.assertEqual(ogc_settings.server, default)
self.assertEqual(ogc_settings.BACKEND, default.get('BACKEND'))
self.assertEqual(ogc_settings.LOCATION, default.get('LOCATION'))
self.assertEqual(
ogc_settings.PUBLIC_LOCATION,
default.get('PUBLIC_LOCATION'))
self.assertEqual(ogc_settings.USER, default.get('USER'))
self.assertEqual(ogc_settings.PASSWORD, default.get('PASSWORD'))
self.assertEqual(ogc_settings.DATASTORE, str())
self.assertEqual(ogc_settings.credentials, ('admin', 'geoserver'))
self.assertTrue(ogc_settings.MAPFISH_PRINT_ENABLED)
self.assertTrue(ogc_settings.PRINT_NG_ENABLED)
self.assertTrue(ogc_settings.GEONODE_SECURITY_ENABLED)
self.assertFalse(ogc_settings.GEOGIG_ENABLED)
self.assertFalse(ogc_settings.WMST_ENABLED)
self.assertTrue(ogc_settings.BACKEND_WRITE_ENABLED)
self.assertFalse(ogc_settings.WPS_ENABLED)
@on_ogc_backend(geoserver.BACKEND_PACKAGE)
def test_ogc_server_defaults(self):
"""
Tests that OGC_SERVER_SETTINGS are built if they do not exist in the settings.
"""
OGC_SERVER = {'default': dict()}
defaults = self.OGC_DEFAULT_SETTINGS.get('default')
ogc_settings = OGC_Servers_Handler(OGC_SERVER)['default']
self.assertEqual(ogc_settings.server, defaults)
self.assertEqual(ogc_settings.rest, defaults['LOCATION'] + 'rest')
self.assertEqual(ogc_settings.ows, defaults['LOCATION'] + 'ows')
# Make sure we get None vs a KeyError when the key does not exist
self.assertIsNone(ogc_settings.SFDSDFDSF)
@on_ogc_backend(geoserver.BACKEND_PACKAGE)
def test_importer_configuration(self):
"""
Tests that the OGC_Servers_Handler throws an ImproperlyConfigured exception when using the importer
backend without a vector database and a datastore configured.
"""
database_settings = self.DATABASE_DEFAULT_SETTINGS.copy()
ogc_server_settings = self.OGC_DEFAULT_SETTINGS.copy()
uploader_settings = self.UPLOADER_DEFAULT_SETTINGS.copy()
uploader_settings['BACKEND'] = 'geonode.importer'
self.assertTrue(['geonode_imports' not in database_settings.keys()])
with self.settings(UPLOADER=uploader_settings, OGC_SERVER=ogc_server_settings, DATABASES=database_settings):
# Test the importer backend without specifying a datastore or
# corresponding database.
with self.assertRaises(ImproperlyConfigured):
OGC_Servers_Handler(ogc_server_settings)['default']
ogc_server_settings['default']['DATASTORE'] = 'geonode_imports'
# Test the importer backend with a datastore but no corresponding
# database.
with self.settings(UPLOADER=uploader_settings, OGC_SERVER=ogc_server_settings, DATABASES=database_settings):
with self.assertRaises(ImproperlyConfigured):
OGC_Servers_Handler(ogc_server_settings)['default']
database_settings['geonode_imports'] = database_settings[
'default'].copy()
database_settings['geonode_imports'].update(
{'NAME': 'geonode_imports'})
# Test the importer backend with a datastore and a corresponding
# database, no exceptions should be thrown.
with self.settings(UPLOADER=uploader_settings, OGC_SERVER=ogc_server_settings, DATABASES=database_settings):
OGC_Servers_Handler(ogc_server_settings)['default']
class SecurityTest(GeoNodeBaseTestSupport):
type = 'layer'
"""
Tests for the Geonode security app.
"""
def setUp(self):
super(SecurityTest, self).setUp()
@on_ogc_backend(geoserver.BACKEND_PACKAGE)
def test_login_middleware(self):
"""
Tests the Geonode login required authentication middleware.
"""
from geonode.security.middleware import LoginRequiredMiddleware
middleware = LoginRequiredMiddleware()
white_list = [
reverse('account_ajax_login'),
reverse('account_confirm_email', kwargs=dict(key='test')),
reverse('account_login'),
reverse('account_reset_password'),
reverse('forgot_username'),
reverse('layer_acls'),
reverse('layer_resolve_user'),
]
black_list = [
reverse('account_signup'),
reverse('document_browse'),
reverse('maps_browse'),
reverse('layer_browse'),
reverse('layer_detail', kwargs=dict(layername='geonode:Test')),
reverse('layer_remove', kwargs=dict(layername='geonode:Test')),
reverse('profile_browse'),
]
request = HttpRequest()
request.user = get_anonymous_user()
# Requests should be redirected to the the `redirected_to` path when un-authenticated user attempts to visit
# a black-listed url.
for path in black_list:
request.path = path
response = middleware.process_request(request)
self.assertEqual(response.status_code, 302)
self.assertTrue(
response.get('Location').startswith(
middleware.redirect_to))
# The middleware should return None when an un-authenticated user
# attempts to visit a white-listed url.
for path in white_list:
request.path = path
response = middleware.process_request(request)
self.assertIsNone(
response,
msg="Middleware activated for white listed path: {0}".format(path))
self.client.login(username='admin', password='admin')
admin = get_user_model().objects.get(username='admin')
self.assertTrue(admin.is_authenticated())
request.user = admin
# The middleware should return None when an authenticated user attempts
# to visit a black-listed url.
for path in black_list:
request.path = path
response = middleware.process_request(request)
self.assertIsNone(response)
| simod/geonode | geonode/geoserver/tests.py | Python | gpl-3.0 | 40,693 | [
"VisIt"
] | 0f21fec3d1686743f6b7a95b61228ab89c5616c3101248ce8edc8fe136d16adc |
import unittest
__author__ = 'Alexander Weigl <Alexander.Weigl@student.kit.edu>'
__date__ = "2015-02-19"
__version__ = "0.1"
import clictk, os.path
ROOT = os.path.dirname(os.path.abspath(__file__))
from xml.etree import ElementTree
from xml.dom import minidom
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
"""
rough_string = ElementTree.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent="\t")
from clictk import *
class ReadTests(unittest.TestCase):
def test_compare_ok(self):
ref1 = Executable(executable=None, category='Registration.NiftyReg', title='RegAladin (NiftyReg)',
description='Module/executable for global registration (rigid and/or affine) based on a block-matching approach and a trimmed least squared optimisation.',
version='0.0.1', license='BSD', contributor='Marc Modat, Pankaj Daga, David Cash (UCL)',
acknowledgements=None, documentation_url=None, parameter_groups=[
ParameterGroup(label='Input images. Reference and floating images are mandatory',
description='Input images to perform the registration', advanced=False, parameters=[
Parameter(name='referenceImageName', type='image', default='required',
description='Reference image filename (also called Target or Fixed)', channel='input',
values=[], index=None, label='Reference image', longflag='ref',
file_ext='.nii,.nii.gz,.nrrd,.png'),
Parameter(name='floatingImageName', type='image', default='required',
description='Floating image filename (also called Source or moving)', channel='input',
values=[], index=None, label='Floating image', longflag='flo',
file_ext='.nii,.nii.gz,.nrrd,.png'),
Parameter(name='referenceMaskImageName', type='image', default='',
description='Filename of a mask image in the reference space', channel='input',
values=[], index=None, label='Ref. mask', longflag='rmask',
file_ext='.nii,.nii.gz,.nrrd,.png'),
Parameter(name='floatingMaskImageName', type='image', default='',
description='Filename of a mask image in the floating space. Only used when symmetric turned on',
channel='input', values=[], index=None, label='Flo. mask', longflag='fmask',
file_ext='.nii,.nii.gz,.nrrd,.png'),
Parameter(name='smoothReferenceWidth', type='float', default='0',
description='Standard deviation in mm (voxel if negative) of the Gaussian kernel used to smooth the reference image',
channel=None, values=[], index=None, label='Ref .Smooth', longflag='smooR',
file_ext=None), Parameter(name='smoothFloatingWidth', type='float', default='0',
description='Standard deviation in mm (voxel if negative) of the Gaussian kernel used to smooth the Floating image',
channel=None, values=[], index=None, label='Flo. smooth',
longflag='smooF', file_ext=None),
Parameter(name='referenceLowerThreshold', type='float', default='0',
description='Lower threshold value applied to the reference image', channel=None,
values=[], index=None, label='Ref. Low Thr.', longflag='refLowThr', file_ext=None),
Parameter(name='referenceUpperThreshold', type='float', default='0',
description='Upper threshold value applied to the reference image', channel=None,
values=[], index=None, label='Ref. Up Thr.', longflag='refUpThr', file_ext=None),
Parameter(name='floatingLowerThreshold', type='float', default='0',
description='Lower threshold value applied to the floating image', channel=None,
values=[], index=None, label='Flo. Low Thr.', longflag='floLowThr', file_ext=None),
Parameter(name='floatingUpperThreshold', type='float', default='0',
description='Upper threshold value applied to the floating image', channel=None,
values=[], index=None, label='Flo. Up Thr.', longflag='floUpThr', file_ext=None)])])
ref2 = Executable(executable=None, category='Registration.NiftyReg', title='RegAladin (NiftyReg)',
description='Module/executable for global registration (rigid and/or affine) based on a block-matching approach and a trimmed least squared optimisation.',
version='0.0.1', license='BSD', contributor='Marc Modat, Pankaj Daga, David Cash (UCL)',
acknowledgements=None, documentation_url=None, parameter_groups=[
ParameterGroup(label='Input images. Reference and floating images are mandatory',
description='Input images to perform the registration', advanced=False, parameters=[
Parameter(name='referenceImageName', type='image', default='required',
description='Reference image filename (also called Target or Fixed)', channel='input',
values=[], index=None, label='Reference image', longflag='ref',
file_ext='.nii,.nii.gz,.nrrd,.png'),
Parameter(name='floatingImageName', type='image', default='required',
description='Floating image filename (also called Source or moving)', channel='input',
values=[], index=None, label='Floating image', longflag='flo',
file_ext='.nii,.nii.gz,.nrrd,.png'),
Parameter(name='referenceMaskImageName', type='image', default='',
description='Filename of a mask image in the reference space', channel='input',
values=[], index=None, label='Ref. mask', longflag='rmask',
file_ext='.nii,.nii.gz,.nrrd,.png'),
Parameter(name='floatingMaskImageName', type='image', default='',
description='Filename of a mask image in the floating space. Only used when symmetric turned on',
channel='input', values=[], index=None, label='Flo. mask', longflag='fmask',
file_ext='.nii,.nii.gz,.nrrd,.png'),
Parameter(name='smoothReferenceWidth', type='float', default='0',
description='Standard deviation in mm (voxel if negative) of the Gaussian kernel used to smooth the reference image',
channel=None, values=[], index=None, label='Ref .Smooth', longflag='smooR',
file_ext=None), Parameter(name='smoothFloatingWidth', type='float', default='0',
description='Standard deviation in mm (voxel if negative) of the Gaussian kernel used to smooth the Floating image',
channel=None, values=[], index=None, label='Flo. smooth',
longflag='smooF', file_ext=None),
Parameter(name='referenceLowerThreshold', type='float', default='0',
description='Lower threshold value applied to the reference image', channel=None,
values=[], index=None, label='Ref. Low Thr.', longflag='refLowThr', file_ext=None),
Parameter(name='referenceUpperThreshold', type='float', default='0',
description='Upper threshold value applied to the reference image', channel=None,
values=[], index=None, label='Ref. Up Thr.', longflag='refUpThr', file_ext=None),
Parameter(name='floatingLowerThreshold', type='float', default='0',
description='Lower threshold value applied to the floating image', channel=None,
values=[], index=None, label='Flo. Low Thr.', longflag='floLowThr', file_ext=None),
Parameter(name='floatingUpperThreshold', type='float', default='0',
description='Upper threshold value applied to the floating image', channel=None,
values=[], index=None, label='Flo. Up Thr.', longflag='floUpThr', file_ext=None)])])
ref3 = Executable(executable=None, category='Registration.NiftyReg', title='RegF3D (NiftyReg)',
description='Module/executable for local registration (non-rigid) based on a cubic B-Spline deformation model',
version='0.0.1', license='BSD', contributor='Marc Modat, Pankaj Daga (UCL)',
acknowledgements=None, documentation_url=None, parameter_groups=[
ParameterGroup(label='Input images. Reference and floating images are mandatory',
description='Input images to perform the registration', advanced=False, parameters=[
Parameter(name='referenceImageName', type='image', default='required',
description='Reference image filename (also called Target or Fixed)', channel='input',
values=[], index=None, label='Reference image', longflag='ref',
file_ext='.nii,.nii.gz,.nrrd,.png'),
Parameter(name='floatingImageName', type='image', default='required',
description='Floating image filename (also called Source or moving)', channel='input',
values=[], index=None, label='Floating image', longflag='flo',
file_ext='.nii,.nii.gz,.nrrd,.png'),
Parameter(name='referenceMaskImageName', type='image', default='',
description='Reference mask image filename', channel='input', values=[], index=None,
label='Ref. mask', longflag='rmask', file_ext='.nii,.nii.gz,.nrrd,.png')])])
self.assertEquals(ref1, ref2)
self.assertNotEqual(ref1, ref3)
def test_reading_xml_reg_aladin(self):
executable = Executable.from_xml(os.path.join(ROOT, "reg_aladin.xml"))
reference = Executable(executable=None, category='Registration.NiftyReg', title='RegAladin (NiftyReg)',
description='Module/executable for global registration (rigid and/or affine) based on a block-matching approach and a trimmed least squared optimisation.',
version='0.0.1', license='BSD', contributor='Marc Modat, Pankaj Daga, David Cash (UCL)',
acknowledgements=None, documentation_url=None, parameter_groups=[
ParameterGroup(label='Input images. Reference and floating images are mandatory',
description='Input images to perform the registration', advanced=False, parameters=[
Parameter(name='referenceImageName', type='image', default='required',
description='Reference image filename (also called Target or Fixed)', channel='input',
values=[], index=None, label='Reference image', longflag='ref',
file_ext='.nii,.nii.gz,.nrrd,.png'),
Parameter(name='floatingImageName', type='image', default='required',
description='Floating image filename (also called Source or moving)', channel='input',
values=[], index=None, label='Floating image', longflag='flo',
file_ext='.nii,.nii.gz,.nrrd,.png'),
Parameter(name='referenceMaskImageName', type='image', default='',
description='Filename of a mask image in the reference space', channel='input',
values=[], index=None, label='Ref. mask', longflag='rmask',
file_ext='.nii,.nii.gz,.nrrd,.png'),
Parameter(name='floatingMaskImageName', type='image', default='',
description='Filename of a mask image in the floating space. Only used when symmetric turned on',
channel='input', values=[], index=None, label='Flo. mask', longflag='fmask',
file_ext='.nii,.nii.gz,.nrrd,.png'),
Parameter(name='smoothReferenceWidth', type='float', default='0',
description='Standard deviation in mm (voxel if negative) of the Gaussian kernel used to smooth the reference image',
channel=None, values=[], index=None, label='Ref .Smooth', longflag='smooR',
file_ext=None), Parameter(name='smoothFloatingWidth', type='float', default='0',
description='Standard deviation in mm (voxel if negative) of the Gaussian kernel used to smooth the Floating image',
channel=None, values=[], index=None, label='Flo. smooth',
longflag='smooF', file_ext=None),
Parameter(name='referenceLowerThreshold', type='float', default='0',
description='Lower threshold value applied to the reference image', channel=None,
values=[], index=None, label='Ref. Low Thr.', longflag='refLowThr', file_ext=None),
Parameter(name='referenceUpperThreshold', type='float', default='0',
description='Upper threshold value applied to the reference image', channel=None,
values=[], index=None, label='Ref. Up Thr.', longflag='refUpThr', file_ext=None),
Parameter(name='floatingLowerThreshold', type='float', default='0',
description='Lower threshold value applied to the floating image', channel=None,
values=[], index=None, label='Flo. Low Thr.', longflag='floLowThr', file_ext=None),
Parameter(name='floatingUpperThreshold', type='float', default='0',
description='Upper threshold value applied to the floating image', channel=None,
values=[], index=None, label='Flo. Up Thr.', longflag='floUpThr', file_ext=None)])])
self.assertEquals(reference, executable)
@unittest.expectedFailure
def test_call_reg_aladin_error(self):
executable = clictk.Executable.from_xml(os.path.join(ROOT, "reg_aladin.xml"))
executable.cmdline(abc="test") # should throw KeyError
def test_call_reg_aladin_ok(self):
executable = clictk.Executable.from_xml(os.path.join(ROOT, "reg_aladin.xml"))
args = executable.cmdline(referenceImageName="image.png", floatingImageName="floating.png")
ref = [None, '--ref', 'image.png', '--flo', 'floating.png']
self.assertItemsEqual(ref, args)
def test_reading_xml_reg_f3d(self):
executable = clictk.Executable.from_xml(os.path.join(ROOT, "reg_f3d.xml"))
reference = Executable(executable=None, category='Registration.NiftyReg', title='RegF3D (NiftyReg)',
description='Module/executable for local registration (non-rigid) based on a cubic B-Spline deformation model',
version='0.0.1', license='BSD', contributor='Marc Modat, Pankaj Daga (UCL)',
acknowledgements=None, documentation_url=None, parameter_groups=[
ParameterGroup(label='Input images. Reference and floating images are mandatory',
description='Input images to perform the registration', advanced=False, parameters=[
Parameter(name='referenceImageName', type='image', default='required',
description='Reference image filename (also called Target or Fixed)', channel='input',
values=[], index=None, label='Reference image', longflag='ref',
file_ext='.nii,.nii.gz,.nrrd,.png'),
Parameter(name='floatingImageName', type='image', default='required',
description='Floating image filename (also called Source or moving)', channel='input',
values=[], index=None, label='Floating image', longflag='flo',
file_ext='.nii,.nii.gz,.nrrd,.png'),
Parameter(name='referenceMaskImageName', type='image', default='',
description='Reference mask image filename', channel='input', values=[], index=None,
label='Ref. mask', longflag='rmask', file_ext='.nii,.nii.gz,.nrrd,.png')])])
self.assertEquals(executable, reference)
def test_reading_xml_reg_jacobian(self):
executable = clictk.Executable.from_xml(os.path.join(ROOT, "reg_jacobian.xml"))
reference = Executable(executable=None, category='Registration.NiftyReg', title='RegJacobian (NiftyReg)',
description='NiftyReg module to create Jacobian-based images', version='0.0.1',
license='BSD', contributor='Marc Modat (UCL)', acknowledgements=None,
documentation_url=None, parameter_groups=[
ParameterGroup(label='Input reference image', description='Input images', advanced=False, parameters=[
Parameter(name='InTrans', type='file', default='required', description='Input transformation',
channel='input', values=[], index=None, label='Input Trans.', longflag='trans',
file_ext='.nii,.nii.gz,.nrrd,.txt,.mat'),
Parameter(name='referenceImageName', type='image', default='required',
description='Reference image filename, required if the transformation is a spline parametrisation',
channel='input', values=[], index=None, label='Reference image', longflag='ref',
file_ext='.nii,.nii..gz,.nrrd,.png')])])
self.assertEquals(reference, executable)
def test_reading_xml_reg_tools(self):
executable = clictk.Executable.from_xml(os.path.join(ROOT, "reg_tools.xml"))
reference = Executable(executable=None, category='Registration.NiftyReg', title='RegTools (NiftyReg)',
description='NiftyReg module under construction', version='0.0.1', license='BSD',
contributor='Marc Modat (UCL)', acknowledgements=None, documentation_url=None,
parameter_groups=[
ParameterGroup(label='Input and Output', description='Input image (mandatory)',
advanced=False, parameters=[
Parameter(name='inputImageName', type='image', default='required',
description='Input image filename', channel='input', values=[],
index=None, label='Input image', longflag='in',
file_ext='.nii,.nii.gz,.nrrd,.png')])])
self.assertEquals(executable, reference)
def test_exec_xml_stub(self):
return True
class ArgParseTest(unittest.TestCase):
def test_argparse_1(self):
executable = clictk.Executable.from_xml(os.path.join(ROOT, "reg_aladin.xml"))
argparse = build_argument_parser(executable)
argparse.print_help()
def test_docopt(self):
executable = clictk.Executable.from_xml(os.path.join(ROOT, "reg_aladin.xml"))
print build_docopt(executable) | CognitionGuidedSurgery/pyclictk | test/test_nifty.py | Python | lgpl-3.0 | 21,359 | [
"Gaussian"
] | f269d26bad0cf963a38ee3343ecbaea6c3a25d54d9522948fdebf13f87294d6f |
# Copyright (c) 2015,Vienna University of Technology,
# Department of Geodesy and Geoinformation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Vienna University of Technology,
# Department of Geodesy and Geoinformation nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL VIENNA UNIVERSITY OF TECHNOLOGY,
# DEPARTMENT OF GEODESY AND GEOINFORMATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Dataset Reader for EQUI-7 data
Created on Mon Jun 8 17:30:19 2015
'''
import netCDF4 as nc
import numpy as np
from smdc_perftests.datasets.esa_cci import ESACCI_netcdf
import pygeogrids.grids as grids
class EQUI_7(ESACCI_netcdf):
def __init__(self, fname, variables=None, avg_var=None, time_var='time', lat_var='x', lon_var='y'):
"""
Parameters
----------
self: type
description
fname: string
filename
variables: list, optional
if given only these variables will be read
avg_var: list, optional
list of variables for which to calculate the average if not given
it is calculated for all variables
time_var: string, optional
name of the time variable in the netCDF file
lat_var: string, optional
name of the latitude variable in the netCDF file
lon_var: string, optional
name of the longitude variable in the netCDF file
"""
self.fname = fname
self.ds = nc.Dataset(fname)
self.lat_var = lat_var
self.lon_var = lon_var
self.time_var = time_var
self.avg_var = avg_var
if variables is None:
self.variables = self.ds.variables.keys()
# exclude time, lat and lon from variable list
self.variables.remove(self.time_var)
self.variables.remove(self.lat_var)
self.variables.remove(self.lon_var)
else:
self.variables = variables
self._init_grid()
def _init_grid(self):
"""
initialize the grid of the dataset
"""
x = self.ds.variables[self.lat_var][:]
y = self.ds.variables[self.lon_var][:]
xs, ys = np.meshgrid(x, y)
self.grid = grids.BasicGrid(
xs.flatten(), ys.flatten(), shape=(1200, 2400))
| TUW-GEO/SMDC-performance | smdc_perftests/datasets/EQUI_7.py | Python | bsd-3-clause | 3,619 | [
"NetCDF"
] | f09eeb1b60d91589c46e8ef6eac33005156713144890b0453628872e2a301870 |
"""M-probability estimate"""
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from category_encoders.ordinal import OrdinalEncoder
import category_encoders.utils as util
from sklearn.utils.random import check_random_state
__author__ = 'Jan Motl'
class MEstimateEncoder(BaseEstimator, util.TransformerWithTargetMixin):
"""M-probability estimate of likelihood.
Supported targets: binomial and continuous. For polynomial target support, see PolynomialWrapper.
This is a simplified version of target encoder, which goes under names like m-probability estimate or
additive smoothing with known incidence rates. In comparison to target encoder, m-probability estimate
has only one tunable parameter (`m`), while target encoder has two tunable parameters (`min_samples_leaf`
and `smoothing`).
Parameters
----------
verbose: int
integer indicating verbosity of the output. 0 for none.
cols: list
a list of columns to encode, if None, all string columns will be encoded.
drop_invariant: bool
boolean for whether or not to drop encoded columns with 0 variance.
return_df: bool
boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array).
handle_missing: str
options are 'return_nan', 'error' and 'value', defaults to 'value', which returns the prior probability.
handle_unknown: str
options are 'return_nan', 'error' and 'value', defaults to 'value', which returns the prior probability.
randomized: bool,
adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing data are untouched).
sigma: float
standard deviation (spread or "width") of the normal distribution.
m: float
this is the "m" in the m-probability estimate. Higher value of m results into stronger shrinking.
M is non-negative.
Example
-------
>>> from category_encoders import *
>>> import pandas as pd
>>> from sklearn.datasets import load_boston
>>> bunch = load_boston()
>>> y = bunch.target > 22.5
>>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names)
>>> enc = MEstimateEncoder(cols=['CHAS', 'RAD']).fit(X, y)
>>> numeric_dataset = enc.transform(X)
>>> print(numeric_dataset.info())
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 506 entries, 0 to 505
Data columns (total 13 columns):
CRIM 506 non-null float64
ZN 506 non-null float64
INDUS 506 non-null float64
CHAS 506 non-null float64
NOX 506 non-null float64
RM 506 non-null float64
AGE 506 non-null float64
DIS 506 non-null float64
RAD 506 non-null float64
TAX 506 non-null float64
PTRATIO 506 non-null float64
B 506 non-null float64
LSTAT 506 non-null float64
dtypes: float64(13)
memory usage: 51.5 KB
None
References
----------
.. [1] A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems, equation 7, from
https://dl.acm.org/citation.cfm?id=507538
.. [2] On estimating probabilities in tree pruning, equation 1, from
https://link.springer.com/chapter/10.1007/BFb0017010
.. [3] Additive smoothing, from
https://en.wikipedia.org/wiki/Additive_smoothing#Generalized_to_the_case_of_known_incidence_rates
"""
def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True,
handle_unknown='value', handle_missing='value', random_state=None, randomized=False, sigma=0.05, m=1.0):
self.verbose = verbose
self.return_df = return_df
self.drop_invariant = drop_invariant
self.drop_cols = []
self.cols = cols
self.ordinal_encoder = None
self._dim = None
self.mapping = None
self.handle_unknown = handle_unknown
self.handle_missing = handle_missing
self._sum = None
self._count = None
self.random_state = random_state
self.randomized = randomized
self.sigma = sigma
self.m = m
self.feature_names = None
# noinspection PyUnusedLocal
def fit(self, X, y, **kwargs):
"""Fit encoder according to X and binary y.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Binary target values.
Returns
-------
self : encoder
Returns self.
"""
# Unite parameters into pandas types
X = util.convert_input(X)
y = util.convert_input_vector(y, X.index).astype(float)
# The lengths must be equal
if X.shape[0] != y.shape[0]:
raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".")
self._dim = X.shape[1]
# If columns aren't passed, just use every string column
if self.cols is None:
self.cols = util.get_obj_cols(X)
else:
self.cols = util.convert_cols_to_list(self.cols)
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
self.ordinal_encoder = OrdinalEncoder(
verbose=self.verbose,
cols=self.cols,
handle_unknown='value',
handle_missing='value'
)
self.ordinal_encoder = self.ordinal_encoder.fit(X)
X_ordinal = self.ordinal_encoder.transform(X)
# Training
self.mapping = self._train(X_ordinal, y)
X_temp = self.transform(X, override_return_df=True)
self.feature_names = X_temp.columns.tolist()
# Store column names with approximately constant variance on the training data
if self.drop_invariant:
self.drop_cols = []
generated_cols = util.get_generated_cols(X, X_temp, self.cols)
self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5]
try:
[self.feature_names.remove(x) for x in self.drop_cols]
except KeyError as e:
if self.verbose > 0:
print("Could not remove column from feature names."
"Not found in generated cols.\n{}".format(e))
return self
def transform(self, X, y=None, override_return_df=False):
"""Perform the transformation to new categorical data.
When the data are used for model training, it is important to also pass the target in order to apply leave one out.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
y : array-like, shape = [n_samples] when transform by leave one out
None, when transform without target information (such as transform test set)
Returns
-------
p : array, shape = [n_samples, n_numeric + N]
Transformed values with encoding applied.
"""
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
if self._dim is None:
raise ValueError('Must train encoder before it can be used to transform data.')
# Unite the input into pandas types
X = util.convert_input(X)
# Then make sure that it is the right size
if X.shape[1] != self._dim:
raise ValueError('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim,))
# If we are encoding the training data, we have to check the target
if y is not None:
y = util.convert_input_vector(y, X.index).astype(float)
if X.shape[0] != y.shape[0]:
raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".")
if not list(self.cols):
return X
# Do not modify the input argument
X = X.copy(deep=True)
X = self.ordinal_encoder.transform(X)
if self.handle_unknown == 'error':
if X[self.cols].isin([-1]).any().any():
raise ValueError('Unexpected categories found in dataframe')
# Loop over the columns and replace the nominal values with the numbers
X = self._score(X, y)
# Postprocessing
# Note: We should not even convert these columns.
if self.drop_invariant:
for col in self.drop_cols:
X.drop(col, 1, inplace=True)
if self.return_df or override_return_df:
return X
else:
return X.values
def _train(self, X, y):
# Initialize the output
mapping = {}
# Calculate global statistics
self._sum = y.sum()
self._count = y.count()
prior = self._sum/self._count
for switch in self.ordinal_encoder.category_mapping:
col = switch.get('col')
values = switch.get('mapping')
# Calculate sum and count of the target for each unique value in the feature col
stats = y.groupby(X[col]).agg(['sum', 'count']) # Count of x_{i,+} and x_i
# Calculate the m-probability estimate
estimate = (stats['sum'] + prior * self.m) / (stats['count'] + self.m)
# Ignore unique columns. This helps to prevent overfitting on id-like columns
if len(stats['count']) == self._count:
estimate[:] = prior
if self.handle_unknown == 'return_nan':
estimate.loc[-1] = np.nan
elif self.handle_unknown == 'value':
estimate.loc[-1] = prior
if self.handle_missing == 'return_nan':
estimate.loc[values.loc[np.nan]] = np.nan
elif self.handle_missing == 'value':
estimate.loc[-2] = prior
# Store the m-probability estimate for transform() function
mapping[col] = estimate
return mapping
def _score(self, X, y):
for col in self.cols:
# Score the column
X[col] = X[col].map(self.mapping[col])
# Randomization is meaningful only for training data -> we do it only if y is present
if self.randomized and y is not None:
random_state_generator = check_random_state(self.random_state)
X[col] = (X[col] * random_state_generator.normal(1., self.sigma, X[col].shape[0]))
return X
def get_feature_names(self):
"""
Returns the names of all transformed / added columns.
Returns
-------
feature_names: list
A list with all feature names transformed or added.
Note: potentially dropped features are not included!
"""
if not isinstance(self.feature_names, list):
raise ValueError("Estimator has to be fitted to return feature names.")
else:
return self.feature_names
| scikit-learn-contrib/categorical-encoding | category_encoders/m_estimate.py | Python | bsd-3-clause | 11,352 | [
"Gaussian"
] | d442dcc7db29e39bc1386b62b70a8b3360a5a1ce9568003cb5e3f8e4c1b03705 |
#!/usr/bin/env python
## \file plot.py
# \brief python package for plotting
# \author T. Lukaczyk, F. Palacios
# \version 5.0.0 "Raven"
#
# SU2 Lead Developers: Dr. Francisco Palacios (Francisco.D.Palacios@boeing.com).
# Dr. Thomas D. Economon (economon@stanford.edu).
#
# SU2 Developers: Prof. Juan J. Alonso's group at Stanford University.
# Prof. Piero Colonna's group at Delft University of Technology.
# Prof. Nicolas R. Gauger's group at Kaiserslautern University of Technology.
# Prof. Alberto Guardone's group at Polytechnic University of Milan.
# Prof. Rafael Palacios' group at Imperial College London.
# Prof. Edwin van der Weide's group at the University of Twente.
# Prof. Vincent Terrapon's group at the University of Liege.
#
# Copyright (C) 2012-2017 SU2, the open-source CFD code.
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SU2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
def write_plot(filename,plot_format,data_plot,keys_plot=None):
""" write_plot(filename,plot_format,data_plot,keys_plot=[])
writes a tecplot or paraview plot of dictionary data
data_plot is a dictionary of lists with equal length
if data_plot is an ordered dictionary, will output in order
otherwise use keys_plot to specify the order of output
"""
default_spacing = 16
indent_spacing = 0
if keys_plot is None: keys_plot = []
if not keys_plot:
keys_plot = data_plot.keys()
keys_print = [ '"'+key+'"' for key in keys_plot ]
keys_space = [default_spacing] * len(keys_plot)
header = ''
if plot_format == 'TECPLOT':
header = 'VARIABLES='
indent_spacing += 10
indent_spacing = ' '*indent_spacing
n_lines = 0
for i,key in enumerate(keys_plot):
# check vector lengths
value = data_plot[key]
if i == 0:
n_lines = len(value)
else:
assert n_lines == len(value) , 'unequal plot vector lengths'
# check spacing
if len(key) > keys_space[i]:
keys_space[i] = len(key)
keys_space[i] = "%-" + str(keys_space[i]) + "s"
plotfile = open(filename,'w')
plotfile.write(header)
for i,key in enumerate(keys_print):
if i > 0: plotfile.write(", ")
plotfile.write(keys_space[i] % key)
plotfile.write('\n')
for i_line in range(n_lines):
plotfile.write(indent_spacing)
for j,key in enumerate(keys_plot):
value = data_plot[key]
if j > 0: plotfile.write(", ")
plotfile.write(keys_space[j] % value[i_line])
plotfile.write('\n')
plotfile.close()
return
def tecplot(filename,data_plot,keys_plot=[]):
write_plot(filename,'TECPLOT',data_plot,keys_plot)
def paraview(filename,data_plot,keys_plot=[]):
write_plot(filename,'PARAVIEW',data_plot,keys_plot)
| opfeifle/SU2 | SU2_PY/SU2/util/plot.py | Python | lgpl-2.1 | 3,559 | [
"ParaView"
] | da7b81b0e8c16a6120cda24e7f5faf27d85ccf3d90d66155afca37457e6b7348 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""
Spack allows very fine-grained control over how packages are installed and
over how they are built and configured. To make this easy, it has its own
syntax for declaring a dependence. We call a descriptor of a particular
package configuration a "spec".
The syntax looks like this:
.. code-block:: sh
$ spack install mpileaks ^openmpi @1.2:1.4 +debug %intel @12.1 =bgqos_0
0 1 2 3 4 5 6
The first part of this is the command, 'spack install'. The rest of the
line is a spec for a particular installation of the mpileaks package.
0. The package to install
1. A dependency of the package, prefixed by ^
2. A version descriptor for the package. This can either be a specific
version, like "1.2", or it can be a range of versions, e.g. "1.2:1.4".
If multiple specific versions or multiple ranges are acceptable, they
can be separated by commas, e.g. if a package will only build with
versions 1.0, 1.2-1.4, and 1.6-1.8 of mavpich, you could say:
depends_on("mvapich@1.0,1.2:1.4,1.6:1.8")
3. A compile-time variant of the package. If you need openmpi to be
built in debug mode for your package to work, you can require it by
adding +debug to the openmpi spec when you depend on it. If you do
NOT want the debug option to be enabled, then replace this with -debug.
4. The name of the compiler to build with.
5. The versions of the compiler to build with. Note that the identifier
for a compiler version is the same '@' that is used for a package version.
A version list denoted by '@' is associated with the compiler only if
if it comes immediately after the compiler name. Otherwise it will be
associated with the current package spec.
6. The architecture to build with. This is needed on machines where
cross-compilation is required
Here is the EBNF grammar for a spec::
spec-list = { spec [ dep-list ] }
dep_list = { ^ spec }
spec = id [ options ]
options = { @version-list | +variant | -variant | ~variant |
%compiler | arch=architecture | [ flag ]=value}
flag = { cflags | cxxflags | fcflags | fflags | cppflags |
ldflags | ldlibs }
variant = id
architecture = id
compiler = id [ version-list ]
version-list = version [ { , version } ]
version = id | id: | :id | id:id
id = [A-Za-z0-9_][A-Za-z0-9_.-]*
Identifiers using the <name>=<value> command, such as architectures and
compiler flags, require a space before the name.
There is one context-sensitive part: ids in versions may contain '.', while
other ids may not.
There is one ambiguity: since '-' is allowed in an id, you need to put
whitespace space before -variant for it to be tokenized properly. You can
either use whitespace, or you can just use ~variant since it means the same
thing. Spack uses ~variant in directory names and in the canonical form of
specs to avoid ambiguity. Both are provided because ~ can cause shell
expansion when it is the first character in an id typed on the command line.
"""
import base64
import sys
import collections
import ctypes
import hashlib
import itertools
import os
import re
from operator import attrgetter
from six import StringIO
from six import string_types
from six import iteritems
from llnl.util.filesystem import find_headers, find_libraries, is_exe
from llnl.util.lang import key_ordering, HashableMap, ObjectWrapper, dedupe
from llnl.util.lang import check_kwargs
from llnl.util.tty.color import cwrite, colorize, cescape, get_color_when
import spack
import spack.architecture
import spack.compilers as compilers
import spack.error
import spack.parse
import spack.store
import spack.util.spack_json as sjson
import spack.util.spack_yaml as syaml
from spack.dependency import Dependency, all_deptypes, canonical_deptype
from spack.util.module_cmd import get_path_from_module, load_module
from spack.error import SpecError, UnsatisfiableSpecError
from spack.provider_index import ProviderIndex
from spack.util.crypto import prefix_bits
from spack.util.executable import Executable
from spack.util.prefix import Prefix
from spack.util.spack_yaml import syaml_dict
from spack.util.string import comma_or
from spack.variant import MultiValuedVariant, AbstractVariant
from spack.variant import BoolValuedVariant, substitute_abstract_variants
from spack.variant import VariantMap, UnknownVariantError
from spack.variant import DuplicateVariantError
from spack.variant import UnsatisfiableVariantSpecError
from spack.version import VersionList, VersionRange, Version, ver
from yaml.error import MarkedYAMLError
__all__ = [
'Spec',
'parse',
'parse_anonymous_spec',
'SpecError',
'SpecParseError',
'DuplicateDependencyError',
'DuplicateVariantError',
'DuplicateCompilerSpecError',
'UnsupportedCompilerError',
'UnknownVariantError',
'DuplicateArchitectureError',
'InconsistentSpecError',
'InvalidDependencyError',
'NoProviderError',
'MultipleProviderError',
'UnsatisfiableSpecError',
'UnsatisfiableSpecNameError',
'UnsatisfiableVersionSpecError',
'UnsatisfiableCompilerSpecError',
'UnsatisfiableVariantSpecError',
'UnsatisfiableCompilerFlagSpecError',
'UnsatisfiableArchitectureSpecError',
'UnsatisfiableProviderSpecError',
'UnsatisfiableDependencySpecError',
'AmbiguousHashError',
'InvalidHashError',
'NoSuchHashError',
'RedundantSpecError']
#: Valid pattern for an identifier in Spack
identifier_re = r'\w[\w-]*'
compiler_color = '@g' #: color for highlighting compilers
version_color = '@c' #: color for highlighting versions
architecture_color = '@m' #: color for highlighting architectures
enabled_variant_color = '@B' #: color for highlighting enabled variants
disabled_variant_color = '@r' #: color for highlighting disabled varaints
dependency_color = '@.' #: color for highlighting dependencies
hash_color = '@K' #: color for highlighting package hashes
#: This map determines the coloring of specs when using color output.
#: We make the fields different colors to enhance readability.
#: See spack.color for descriptions of the color codes.
color_formats = {'%': compiler_color,
'@': version_color,
'=': architecture_color,
'+': enabled_variant_color,
'~': disabled_variant_color,
'^': dependency_color,
'#': hash_color}
#: Regex used for splitting by spec field separators.
#: These need to be escaped to avoid metacharacters in
#: ``color_formats.keys()``.
_separators = '[\\%s]' % '\\'.join(color_formats.keys())
#: Versionlist constant so we don't have to build a list
#: every time we call str()
_any_version = VersionList([':'])
#: Max integer helps avoid passing too large a value to cyaml.
maxint = 2 ** (ctypes.sizeof(ctypes.c_int) * 8 - 1) - 1
def colorize_spec(spec):
"""Returns a spec colorized according to the colors specified in
color_formats."""
class insert_color:
def __init__(self):
self.last = None
def __call__(self, match):
# ignore compiler versions (color same as compiler)
sep = match.group(0)
if self.last == '%' and sep == '@':
return cescape(sep)
self.last = sep
return '%s%s' % (color_formats[sep], cescape(sep))
return colorize(re.sub(_separators, insert_color(), str(spec)) + '@.')
@key_ordering
class ArchSpec(object):
""" The ArchSpec class represents an abstract architecture specification
that a package should be built with. At its core, each ArchSpec is
comprised of three elements: a platform (e.g. Linux), an OS (e.g.
RHEL6), and a target (e.g. x86_64).
"""
# TODO: Formalize the specifications for architectures and then use
# the appropriate parser here to read these specifications.
def __init__(self, *args):
to_attr_string = lambda s: str(s) if s and s != "None" else None
self.platform, self.platform_os, self.target = (None, None, None)
if len(args) == 1:
spec_like = args[0]
if isinstance(spec_like, ArchSpec):
self._dup(spec_like)
elif isinstance(spec_like, string_types):
spec_fields = spec_like.split("-")
if len(spec_fields) == 3:
self.platform, self.platform_os, self.target = tuple(
to_attr_string(f) for f in spec_fields)
else:
raise ValueError("%s is an invalid arch spec" % spec_like)
elif len(args) == 3:
self.platform = to_attr_string(args[0])
self.platform_os = to_attr_string(args[1])
self.target = to_attr_string(args[2])
elif len(args) != 0:
raise TypeError("Can't make arch spec from %s" % args)
def _autospec(self, spec_like):
if isinstance(spec_like, ArchSpec):
return spec_like
return ArchSpec(spec_like)
def _cmp_key(self):
return (self.platform, self.platform_os, self.target)
def _dup(self, other):
self.platform = other.platform
self.platform_os = other.platform_os
self.target = other.target
@property
def platform(self):
return self._platform
@platform.setter
def platform(self, value):
""" The platform of the architecture spec will be verified as a
supported Spack platform before it's set to ensure all specs
refer to valid platforms.
"""
value = str(value) if value is not None else None
self._platform = value
@property
def platform_os(self):
return self._platform_os
@platform_os.setter
def platform_os(self, value):
""" The OS of the architecture spec will update the platform field
if the OS is set to one of the reserved OS types so that the
default OS type can be resolved. Since the reserved OS
information is only available for the host machine, the platform
will assumed to be the host machine's platform.
"""
value = str(value) if value is not None else None
if value in spack.architecture.Platform.reserved_oss:
curr_platform = str(spack.architecture.platform())
self.platform = self.platform or curr_platform
if self.platform != curr_platform:
raise ValueError(
"Can't set arch spec OS to reserved value '%s' when the "
"arch platform (%s) isn't the current platform (%s)" %
(value, self.platform, curr_platform))
spec_platform = spack.architecture.get_platform(self.platform)
value = str(spec_platform.operating_system(value))
self._platform_os = value
@property
def target(self):
return self._target
@target.setter
def target(self, value):
""" The target of the architecture spec will update the platform field
if the target is set to one of the reserved target types so that
the default target type can be resolved. Since the reserved target
information is only available for the host machine, the platform
will assumed to be the host machine's platform.
"""
value = str(value) if value is not None else None
if value in spack.architecture.Platform.reserved_targets:
curr_platform = str(spack.architecture.platform())
self.platform = self.platform or curr_platform
if self.platform != curr_platform:
raise ValueError(
"Can't set arch spec target to reserved value '%s' when "
"the arch platform (%s) isn't the current platform (%s)" %
(value, self.platform, curr_platform))
spec_platform = spack.architecture.get_platform(self.platform)
value = str(spec_platform.target(value))
self._target = value
def satisfies(self, other, strict=False):
other = self._autospec(other)
sdict, odict = self.to_cmp_dict(), other.to_cmp_dict()
if strict or self.concrete:
return all(getattr(self, attr) == getattr(other, attr)
for attr in odict if odict[attr])
else:
return all(getattr(self, attr) == getattr(other, attr)
for attr in odict if sdict[attr] and odict[attr])
def constrain(self, other):
""" Projects all architecture fields that are specified in the given
spec onto the instance spec if they're missing from the instance
spec. This will only work if the two specs are compatible.
"""
other = self._autospec(other)
if not self.satisfies(other):
raise UnsatisfiableArchitectureSpecError(self, other)
constrained = False
for attr, svalue in iteritems(self.to_cmp_dict()):
ovalue = getattr(other, attr)
if svalue is None and ovalue is not None:
setattr(self, attr, ovalue)
constrained = True
return constrained
def copy(self):
clone = ArchSpec.__new__(ArchSpec)
clone._dup(self)
return clone
@property
def concrete(self):
return all(v for k, v in iteritems(self.to_cmp_dict()))
def to_cmp_dict(self):
"""Returns a dictionary that can be used for field comparison."""
return dict([
('platform', self.platform),
('platform_os', self.platform_os),
('target', self.target)])
def to_dict(self):
d = syaml_dict([
('platform', self.platform),
('platform_os', self.platform_os),
('target', self.target)])
return syaml_dict([('arch', d)])
@staticmethod
def from_dict(d):
"""Import an ArchSpec from raw YAML/JSON data.
This routine implements a measure of compatibility with older
versions of Spack. Spack releases before 0.10 used a single
string with no OS or platform identifiers. We import old Spack
architectures with platform ``spack09``, OS ``unknown``, and the
old arch string as the target.
Specs from `0.10` or later have a more fleshed out architecture
descriptor with a platform, an OS, and a target.
"""
if not isinstance(d['arch'], dict):
return ArchSpec('spack09', 'unknown', d['arch'])
d = d['arch']
return ArchSpec(d['platform'], d['platform_os'], d['target'])
def __str__(self):
return "%s-%s-%s" % (self.platform, self.platform_os, self.target)
def __repr__(self):
return str(self)
def __contains__(self, string):
return string in str(self)
@key_ordering
class CompilerSpec(object):
"""The CompilerSpec field represents the compiler or range of compiler
versions that a package should be built with. CompilerSpecs have a
name and a version list. """
def __init__(self, *args):
nargs = len(args)
if nargs == 1:
arg = args[0]
# If there is one argument, it's either another CompilerSpec
# to copy or a string to parse
if isinstance(arg, string_types):
c = SpecParser().parse_compiler(arg)
self.name = c.name
self.versions = c.versions
elif isinstance(arg, CompilerSpec):
self.name = arg.name
self.versions = arg.versions.copy()
else:
raise TypeError(
"Can only build CompilerSpec from string or " +
"CompilerSpec. Found %s" % type(arg))
elif nargs == 2:
name, version = args
self.name = name
self.versions = VersionList()
self.versions.add(ver(version))
else:
raise TypeError(
"__init__ takes 1 or 2 arguments. (%d given)" % nargs)
def _add_version(self, version):
self.versions.add(version)
def _autospec(self, compiler_spec_like):
if isinstance(compiler_spec_like, CompilerSpec):
return compiler_spec_like
return CompilerSpec(compiler_spec_like)
def satisfies(self, other, strict=False):
other = self._autospec(other)
return (self.name == other.name and
self.versions.satisfies(other.versions, strict=strict))
def constrain(self, other):
"""Intersect self's versions with other.
Return whether the CompilerSpec changed.
"""
other = self._autospec(other)
# ensure that other will actually constrain this spec.
if not other.satisfies(self):
raise UnsatisfiableCompilerSpecError(other, self)
return self.versions.intersect(other.versions)
@property
def concrete(self):
"""A CompilerSpec is concrete if its versions are concrete and there
is an available compiler with the right version."""
return self.versions.concrete
@property
def version(self):
if not self.concrete:
raise SpecError("Spec is not concrete: " + str(self))
return self.versions[0]
def copy(self):
clone = CompilerSpec.__new__(CompilerSpec)
clone.name = self.name
clone.versions = self.versions.copy()
return clone
def _cmp_key(self):
return (self.name, self.versions)
def to_dict(self):
d = syaml_dict([('name', self.name)])
d.update(self.versions.to_dict())
return syaml_dict([('compiler', d)])
@staticmethod
def from_dict(d):
d = d['compiler']
return CompilerSpec(d['name'], VersionList.from_dict(d))
def __str__(self):
out = self.name
if self.versions and self.versions != _any_version:
vlist = ",".join(str(v) for v in self.versions)
out += "@%s" % vlist
return out
def __repr__(self):
return str(self)
@key_ordering
class DependencySpec(object):
"""DependencySpecs connect two nodes in the DAG, and contain deptypes.
Dependencies can be one (or more) of several types:
- build: needs to be in the PATH at build time.
- link: is linked to and added to compiler flags.
- run: needs to be in the PATH for the package to run.
Fields:
- spec: Spec depended on by parent.
- parent: Spec that depends on `spec`.
- deptypes: list of strings, representing dependency relationships.
"""
def __init__(self, parent, spec, deptypes):
self.parent = parent
self.spec = spec
self.deptypes = tuple(sorted(set(deptypes)))
def update_deptypes(self, deptypes):
deptypes = set(deptypes)
deptypes.update(self.deptypes)
deptypes = tuple(sorted(deptypes))
changed = self.deptypes != deptypes
self.deptypes = deptypes
return changed
def copy(self):
return DependencySpec(self.parent, self.spec, self.deptypes)
def _cmp_key(self):
return (self.parent.name if self.parent else None,
self.spec.name if self.spec else None,
self.deptypes)
def __str__(self):
return "%s %s--> %s" % (self.parent.name if self.parent else None,
self.deptypes,
self.spec.name if self.spec else None)
_valid_compiler_flags = [
'cflags', 'cxxflags', 'fflags', 'ldflags', 'ldlibs', 'cppflags']
class FlagMap(HashableMap):
def __init__(self, spec):
super(FlagMap, self).__init__()
self.spec = spec
def satisfies(self, other, strict=False):
if strict or (self.spec and self.spec._concrete):
return all(f in self and set(self[f]) == set(other[f])
for f in other)
else:
return all(set(self[f]) == set(other[f])
for f in other if (other[f] != [] and f in self))
def constrain(self, other):
"""Add all flags in other that aren't in self to self.
Return whether the spec changed.
"""
if other.spec and other.spec._concrete:
for k in self:
if k not in other:
raise UnsatisfiableCompilerFlagSpecError(
self[k], '<absent>')
changed = False
for k in other:
if k in self and not set(self[k]) <= set(other[k]):
raise UnsatisfiableCompilerFlagSpecError(
' '.join(f for f in self[k]),
' '.join(f for f in other[k]))
elif k not in self:
self[k] = other[k]
changed = True
return changed
@staticmethod
def valid_compiler_flags():
return _valid_compiler_flags
def copy(self):
clone = FlagMap(None)
for name, value in self.items():
clone[name] = value
return clone
def _cmp_key(self):
return tuple((k, tuple(v)) for k, v in sorted(iteritems(self)))
def __str__(self):
sorted_keys = [k for k in sorted(self.keys()) if self[k] != []]
cond_symbol = ' ' if len(sorted_keys) > 0 else ''
return cond_symbol + ' '.join(
str(key) + '=\"' + ' '.join(
str(f) for f in self[key]) + '\"'
for key in sorted_keys) + cond_symbol
class DependencyMap(HashableMap):
"""Each spec has a DependencyMap containing specs for its dependencies.
The DependencyMap is keyed by name. """
def __str__(self):
return "{deps: %s}" % ', '.join(str(d) for d in sorted(self.values()))
def _command_default_handler(descriptor, spec, cls):
"""Default handler when looking for the 'command' attribute.
Tries to search for ``spec.name`` in the ``spec.prefix.bin`` directory.
Parameters:
descriptor (ForwardQueryToPackage): descriptor that triggered the call
spec (Spec): spec that is being queried
cls (type(spec)): type of spec, to match the signature of the
descriptor ``__get__`` method
Returns:
Executable: An executable of the command
Raises:
RuntimeError: If the command is not found
"""
path = os.path.join(spec.prefix.bin, spec.name)
if is_exe(path):
return Executable(path)
else:
msg = 'Unable to locate {0} command in {1}'
raise RuntimeError(msg.format(spec.name, spec.prefix.bin))
def _headers_default_handler(descriptor, spec, cls):
"""Default handler when looking for the 'headers' attribute.
Tries to search for ``*.h`` files recursively starting from
``spec.prefix.include``.
Parameters:
descriptor (ForwardQueryToPackage): descriptor that triggered the call
spec (Spec): spec that is being queried
cls (type(spec)): type of spec, to match the signature of the
descriptor ``__get__`` method
Returns:
HeaderList: The headers in ``prefix.include``
Raises:
RuntimeError: If no headers are found
"""
headers = find_headers('*', root=spec.prefix.include, recurse=True)
if headers:
return headers
else:
msg = 'Unable to locate {0} headers in {1}'
raise RuntimeError(msg.format(spec.name, spec.prefix.include))
def _libs_default_handler(descriptor, spec, cls):
"""Default handler when looking for the 'libs' attribute.
Tries to search for ``lib{spec.name}`` recursively starting from
``spec.prefix``.
Parameters:
descriptor (ForwardQueryToPackage): descriptor that triggered the call
spec (Spec): spec that is being queried
cls (type(spec)): type of spec, to match the signature of the
descriptor ``__get__`` method
Returns:
LibraryList: The libraries found
Raises:
RuntimeError: If no libraries are found
"""
# Variable 'name' is passed to function 'find_libraries', which supports
# glob characters. For example, we have a package with a name 'abc-abc'.
# Now, we don't know if the original name of the package is 'abc_abc'
# (and it generates a library 'libabc_abc.so') or 'abc-abc' (and it
# generates a library 'libabc-abc.so'). So, we tell the function
# 'find_libraries' to give us anything that matches 'libabc?abc' and it
# gives us either 'libabc-abc.so' or 'libabc_abc.so' (or an error)
# depending on which one exists (there is a possibility, of course, to
# get something like 'libabcXabc.so, but for now we consider this
# unlikely).
name = 'lib' + spec.name.replace('-', '?')
if '+shared' in spec:
libs = find_libraries(
name, root=spec.prefix, shared=True, recurse=True
)
elif '~shared' in spec:
libs = find_libraries(
name, root=spec.prefix, shared=False, recurse=True
)
else:
# Prefer shared
libs = find_libraries(
name, root=spec.prefix, shared=True, recurse=True
)
if libs:
return libs
libs = find_libraries(
name, root=spec.prefix, shared=False, recurse=True
)
if libs:
return libs
else:
msg = 'Unable to recursively locate {0} libraries in {1}'
raise RuntimeError(msg.format(spec.name, spec.prefix))
class ForwardQueryToPackage(object):
"""Descriptor used to forward queries from Spec to Package"""
def __init__(self, attribute_name, default_handler=None):
"""Create a new descriptor.
Parameters:
attribute_name (str): name of the attribute to be
searched for in the Package instance
default_handler (callable, optional): default function to be
called if the attribute was not found in the Package
instance
"""
self.attribute_name = attribute_name
# Turn the default handler into a function with the right
# signature that always returns None
if default_handler is None:
default_handler = lambda descriptor, spec, cls: None
self.default = default_handler
def __get__(self, instance, cls):
"""Retrieves the property from Package using a well defined chain
of responsibility.
The order of call is:
1. if the query was through the name of a virtual package try to
search for the attribute `{virtual_name}_{attribute_name}`
in Package
2. try to search for attribute `{attribute_name}` in Package
3. try to call the default handler
The first call that produces a value will stop the chain.
If no call can handle the request or a None value is produced,
then AttributeError is raised.
"""
pkg = instance.package
try:
query = instance.last_query
except AttributeError:
# There has been no query yet: this means
# a spec is trying to access its own attributes
_ = instance[instance.name] # NOQA: ignore=F841
query = instance.last_query
callbacks_chain = []
# First in the chain : specialized attribute for virtual packages
if query.isvirtual:
specialized_name = '{0}_{1}'.format(
query.name, self.attribute_name
)
callbacks_chain.append(lambda: getattr(pkg, specialized_name))
# Try to get the generic method from Package
callbacks_chain.append(lambda: getattr(pkg, self.attribute_name))
# Final resort : default callback
callbacks_chain.append(lambda: self.default(self, instance, cls))
# Trigger the callbacks in order, the first one producing a
# value wins
value = None
for f in callbacks_chain:
try:
value = f()
break
except AttributeError:
pass
# 'None' value raises AttributeError : this permits to 'disable'
# the call in a particular package by returning None from the
# queried attribute, or will trigger an exception if things
# searched for were not found
if value is None:
fmt = '\'{name}\' package has no relevant attribute \'{query}\'\n' # NOQA: ignore=E501
fmt += '\tspec : \'{spec}\'\n'
fmt += '\tqueried as : \'{spec.last_query.name}\'\n'
fmt += '\textra parameters : \'{spec.last_query.extra_parameters}\'\n' # NOQA: ignore=E501
message = fmt.format(
name=pkg.name,
query=self.attribute_name,
spec=instance
)
raise AttributeError(message)
return value
def __set__(self, instance, value):
cls_name = type(instance).__name__
msg = "'{0}' object attribute '{1}' is read-only"
raise AttributeError(msg.format(cls_name, self.attribute_name))
class SpecBuildInterface(ObjectWrapper):
command = ForwardQueryToPackage(
'command',
default_handler=_command_default_handler
)
headers = ForwardQueryToPackage(
'headers',
default_handler=_headers_default_handler
)
libs = ForwardQueryToPackage(
'libs',
default_handler=_libs_default_handler
)
def __init__(self, spec, name, query_parameters):
super(SpecBuildInterface, self).__init__(spec)
# Represents a query state in a BuildInterface object
QueryState = collections.namedtuple(
'QueryState', ['name', 'extra_parameters', 'isvirtual']
)
is_virtual = Spec.is_virtual(name)
self.last_query = QueryState(
name=name,
extra_parameters=query_parameters,
isvirtual=is_virtual
)
@key_ordering
class Spec(object):
@staticmethod
def from_literal(spec_dict, normal=True):
"""Builds a Spec from a dictionary containing the spec literal.
The dictionary must have a single top level key, representing the root,
and as many secondary level keys as needed in the spec.
The keys can be either a string or a Spec or a tuple containing the
Spec and the dependency types.
Args:
spec_dict (dict): the dictionary containing the spec literal
normal (bool): if True the same key appearing at different levels
of the ``spec_dict`` will map to the same object in memory.
Examples:
A simple spec ``foo`` with no dependencies:
.. code-block:: python
{'foo': None}
A spec ``foo`` with a ``(build, link)`` dependency ``bar``:
.. code-block:: python
{'foo':
{'bar:build,link': None}}
A spec with a diamond dependency and various build types:
.. code-block:: python
{'dt-diamond': {
'dt-diamond-left:build,link': {
'dt-diamond-bottom:build': None
},
'dt-diamond-right:build,link': {
'dt-diamond-bottom:build,link,run': None
}
}}
The same spec with a double copy of ``dt-diamond-bottom`` and
no diamond structure:
.. code-block:: python
{'dt-diamond': {
'dt-diamond-left:build,link': {
'dt-diamond-bottom:build': None
},
'dt-diamond-right:build,link': {
'dt-diamond-bottom:build,link,run': None
}
}, normal=False}
Constructing a spec using a Spec object as key:
.. code-block:: python
mpich = Spec('mpich')
libelf = Spec('libelf@1.8.11')
expected_normalized = Spec.from_literal({
'mpileaks': {
'callpath': {
'dyninst': {
'libdwarf': {libelf: None},
libelf: None
},
mpich: None
},
mpich: None
},
})
"""
# Maps a literal to a Spec, to be sure we are reusing the same object
spec_cache = LazySpecCache()
def spec_builder(d):
# The invariant is that the top level dictionary must have
# only one key
assert len(d) == 1
# Construct the top-level spec
spec_like, dep_like = next(iter(d.items()))
# If the requirements was for unique nodes (default)
# then re-use keys from the local cache. Otherwise build
# a new node every time.
if not isinstance(spec_like, Spec):
spec = spec_cache[spec_like] if normal else Spec(spec_like)
else:
spec = spec_like
if dep_like is None:
return spec
def name_and_dependency_types(s):
"""Given a key in the dictionary containing the literal,
extracts the name of the spec and its dependency types.
Args:
s (str): key in the dictionary containing the literal
"""
t = s.split(':')
if len(t) > 2:
msg = 'more than one ":" separator in key "{0}"'
raise KeyError(msg.format(s))
n = t[0]
if len(t) == 2:
dtypes = tuple(dt.strip() for dt in t[1].split(','))
else:
dtypes = ()
return n, dtypes
def spec_and_dependency_types(s):
"""Given a non-string key in the literal, extracts the spec
and its dependency types.
Args:
s (spec or tuple): either a Spec object or a tuple
composed of a Spec object and a string with the
dependency types
"""
if isinstance(s, Spec):
return s, ()
spec_obj, dtypes = s
return spec_obj, tuple(dt.strip() for dt in dtypes.split(','))
# Recurse on dependencies
for s, s_dependencies in dep_like.items():
if isinstance(s, string_types):
dag_node, dependency_types = name_and_dependency_types(s)
else:
dag_node, dependency_types = spec_and_dependency_types(s)
dependency_spec = spec_builder({dag_node: s_dependencies})
spec._add_dependency(dependency_spec, dependency_types)
return spec
return spec_builder(spec_dict)
def __init__(self, spec_like, **kwargs):
# Copy if spec_like is a Spec.
if isinstance(spec_like, Spec):
self._dup(spec_like)
return
# Parse if the spec_like is a string.
if not isinstance(spec_like, string_types):
raise TypeError("Can't make spec out of %s" % type(spec_like))
# parse string types *into* this spec
spec_list = SpecParser(self).parse(spec_like)
if len(spec_list) > 1:
raise ValueError("More than one spec in string: " + spec_like)
if len(spec_list) < 1:
raise ValueError("String contains no specs: " + spec_like)
# Specs are by default not assumed to be normal, but in some
# cases we've read them from a file want to assume normal.
# This allows us to manipulate specs that Spack doesn't have
# package.py files for.
self._normal = kwargs.get('normal', False)
self._concrete = kwargs.get('concrete', False)
# Allow a spec to be constructed with an external path.
self.external_path = kwargs.get('external_path', None)
self.external_module = kwargs.get('external_module', None)
@property
def external(self):
return bool(self.external_path) or bool(self.external_module)
def get_dependency(self, name):
dep = self._dependencies.get(name)
if dep is not None:
return dep
raise InvalidDependencyError(
self.name + " does not depend on " + comma_or(name))
def _find_deps(self, where, deptype):
deptype = canonical_deptype(deptype)
return [dep for dep in where.values()
if deptype and (not dep.deptypes or
any(d in deptype for d in dep.deptypes))]
def dependencies(self, deptype='all'):
return [d.spec
for d in self._find_deps(self._dependencies, deptype)]
def dependents(self, deptype='all'):
return [d.parent
for d in self._find_deps(self._dependents, deptype)]
def dependencies_dict(self, deptype='all'):
return dict((d.spec.name, d)
for d in self._find_deps(self._dependencies, deptype))
def dependents_dict(self, deptype='all'):
return dict((d.parent.name, d)
for d in self._find_deps(self._dependents, deptype))
#
# Private routines here are called by the parser when building a spec.
#
def _add_version(self, version):
"""Called by the parser to add an allowable version."""
self.versions.add(version)
def _add_flag(self, name, value):
"""Called by the parser to add a known flag.
Known flags currently include "arch"
"""
valid_flags = FlagMap.valid_compiler_flags()
if name == 'arch' or name == 'architecture':
parts = tuple(value.split('-'))
plat, os, tgt = parts if len(parts) == 3 else (None, None, value)
self._set_architecture(platform=plat, platform_os=os, target=tgt)
elif name == 'platform':
self._set_architecture(platform=value)
elif name == 'os' or name == 'operating_system':
self._set_architecture(platform_os=value)
elif name == 'target':
self._set_architecture(target=value)
elif name in valid_flags:
assert(self.compiler_flags is not None)
self.compiler_flags[name] = value.split()
else:
# FIXME:
# All other flags represent variants. 'foo=true' and 'foo=false'
# map to '+foo' and '~foo' respectively. As such they need a
# BoolValuedVariant instance.
if str(value).upper() == 'TRUE' or str(value).upper() == 'FALSE':
self.variants[name] = BoolValuedVariant(name, value)
else:
self.variants[name] = AbstractVariant(name, value)
def _set_architecture(self, **kwargs):
"""Called by the parser to set the architecture."""
arch_attrs = ['platform', 'platform_os', 'target']
if self.architecture and self.architecture.concrete:
raise DuplicateArchitectureError(
"Spec for '%s' cannot have two architectures." % self.name)
if not self.architecture:
new_vals = tuple(kwargs.get(arg, None) for arg in arch_attrs)
self.architecture = ArchSpec(*new_vals)
else:
new_attrvals = [(a, v) for a, v in iteritems(kwargs)
if a in arch_attrs]
for new_attr, new_value in new_attrvals:
if getattr(self.architecture, new_attr):
raise DuplicateArchitectureError(
"Spec for '%s' cannot have two '%s' specified "
"for its architecture" % (self.name, new_attr))
else:
setattr(self.architecture, new_attr, new_value)
def _set_compiler(self, compiler):
"""Called by the parser to set the compiler."""
if self.compiler:
raise DuplicateCompilerSpecError(
"Spec for '%s' cannot have two compilers." % self.name)
self.compiler = compiler
def _add_dependency(self, spec, deptypes):
"""Called by the parser to add another spec as a dependency."""
if spec.name in self._dependencies:
raise DuplicateDependencyError(
"Cannot depend on '%s' twice" % spec)
# create an edge and add to parent and child
dspec = DependencySpec(self, spec, deptypes)
self._dependencies[spec.name] = dspec
spec._dependents[self.name] = dspec
#
# Public interface
#
@property
def fullname(self):
return (
('%s.%s' % (self.namespace, self.name)) if self.namespace else
(self.name if self.name else ''))
@property
def root(self):
"""Follow dependent links and find the root of this spec's DAG.
Spack specs have a single root (the package being installed).
"""
if not self._dependents:
return self
return next(iter(self._dependents.values())).parent.root
@property
def package(self):
return spack.repo.get(self)
@property
def package_class(self):
"""Internal package call gets only the class object for a package.
Use this to just get package metadata.
"""
return spack.repo.get_pkg_class(self.fullname)
@property
def virtual(self):
"""Right now, a spec is virtual if no package exists with its name.
TODO: revisit this -- might need to use a separate namespace and
be more explicit about this.
Possible idea: just use conventin and make virtual deps all
caps, e.g., MPI vs mpi.
"""
return Spec.is_virtual(self.name)
@staticmethod
def is_virtual(name):
"""Test if a name is virtual without requiring a Spec."""
return (name is not None) and (not spack.repo.exists(name))
@property
def concrete(self):
"""A spec is concrete if it describes a single build of a package.
More formally, a spec is concrete if concretize() has been called
on it and it has been marked `_concrete`.
Concrete specs either can be or have been built. All constraints
have been resolved, optional dependencies have been added or
removed, a compiler has been chosen, and all variants have
values.
"""
return self._concrete
def traverse(self, **kwargs):
direction = kwargs.get('direction', 'children')
depth = kwargs.get('depth', False)
get_spec = lambda s: s.spec
if direction == 'parents':
get_spec = lambda s: s.parent
if depth:
for d, dspec in self.traverse_edges(**kwargs):
yield d, get_spec(dspec)
else:
for dspec in self.traverse_edges(**kwargs):
yield get_spec(dspec)
def traverse_edges(self, visited=None, d=0, deptype='all',
dep_spec=None, **kwargs):
"""Generic traversal of the DAG represented by this spec.
This will yield each node in the spec. Options:
order [=pre|post]
Order to traverse spec nodes. Defaults to preorder traversal.
Options are:
'pre': Pre-order traversal; each node is yielded before its
children in the dependency DAG.
'post': Post-order traversal; each node is yielded after its
children in the dependency DAG.
cover [=nodes|edges|paths]
Determines how extensively to cover the dag. Possible values:
'nodes': Visit each node in the dag only once. Every node
yielded by this function will be unique.
'edges': If a node has been visited once but is reached along a
new path from the root, yield it but do not descend
into it. This traverses each 'edge' in the DAG once.
'paths': Explore every unique path reachable from the root.
This descends into visited subtrees and will yield
nodes twice if they're reachable by multiple paths.
depth [=False]
Defaults to False. When True, yields not just nodes in the
spec, but also their depth from the root in a (depth, node)
tuple.
key [=id]
Allow a custom key function to track the identity of nodes
in the traversal.
root [=True]
If False, this won't yield the root node, just its descendents.
direction [=children|parents]
If 'children', does a traversal of this spec's children. If
'parents', traverses upwards in the DAG towards the root.
"""
# get initial values for kwargs
depth = kwargs.get('depth', False)
key_fun = kwargs.get('key', id)
if isinstance(key_fun, string_types):
key_fun = attrgetter(key_fun)
yield_root = kwargs.get('root', True)
cover = kwargs.get('cover', 'nodes')
direction = kwargs.get('direction', 'children')
order = kwargs.get('order', 'pre')
deptype = canonical_deptype(deptype)
# Make sure kwargs have legal values; raise ValueError if not.
def validate(name, val, allowed_values):
if val not in allowed_values:
raise ValueError("Invalid value for %s: %s. Choices are %s"
% (name, val, ",".join(allowed_values)))
validate('cover', cover, ('nodes', 'edges', 'paths'))
validate('direction', direction, ('children', 'parents'))
validate('order', order, ('pre', 'post'))
if visited is None:
visited = set()
key = key_fun(self)
# Node traversal does not yield visited nodes.
if key in visited and cover == 'nodes':
return
def return_val(dspec):
if not dspec:
# make a fake dspec for the root.
if direction == 'parents':
dspec = DependencySpec(self, None, ())
else:
dspec = DependencySpec(None, self, ())
return (d, dspec) if depth else dspec
yield_me = yield_root or d > 0
# Preorder traversal yields before successors
if yield_me and order == 'pre':
yield return_val(dep_spec)
# Edge traversal yields but skips children of visited nodes
if not (key in visited and cover == 'edges'):
visited.add(key)
# This code determines direction and yields the children/parents
if direction == 'children':
where = self._dependencies
succ = lambda dspec: dspec.spec
elif direction == 'parents':
where = self._dependents
succ = lambda dspec: dspec.parent
else:
raise ValueError('Invalid traversal direction: %s' % direction)
for name, dspec in sorted(where.items()):
dt = dspec.deptypes
if dt and not any(d in deptype for d in dt):
continue
for child in succ(dspec).traverse_edges(
visited, d + 1, deptype, dspec, **kwargs):
yield child
# Postorder traversal yields after successors
if yield_me and order == 'post':
yield return_val(dep_spec)
@property
def short_spec(self):
"""Returns a version of the spec with the dependencies hashed
instead of completely enumerated."""
return self.format('$_$@$%@$+$=$/')
@property
def cshort_spec(self):
"""Returns an auto-colorized version of ``self.short_spec``."""
return self.cformat('$_$@$%@$+$=$/')
@property
def prefix(self):
return Prefix(spack.store.layout.path_for_spec(self))
def dag_hash(self, length=None):
"""Return a hash of the entire spec DAG, including connectivity."""
if self._hash:
return self._hash[:length]
else:
yaml_text = syaml.dump(
self.to_node_dict(), default_flow_style=True, width=maxint)
sha = hashlib.sha1(yaml_text.encode('utf-8'))
b32_hash = base64.b32encode(sha.digest()).lower()
if sys.version_info[0] >= 3:
b32_hash = b32_hash.decode('utf-8')
if self.concrete:
self._hash = b32_hash
return b32_hash[:length]
def dag_hash_bit_prefix(self, bits):
"""Get the first <bits> bits of the DAG hash as an integer type."""
return base32_prefix_bits(self.dag_hash(), bits)
def to_node_dict(self):
d = syaml_dict()
if self.versions:
d.update(self.versions.to_dict())
if self.architecture:
d.update(self.architecture.to_dict())
if self.compiler:
d.update(self.compiler.to_dict())
if self.namespace:
d['namespace'] = self.namespace
params = syaml_dict(
sorted(
v.yaml_entry() for _, v in self.variants.items()
)
)
params.update(sorted(self.compiler_flags.items()))
if params:
d['parameters'] = params
if self.external:
d['external'] = {
'path': self.external_path,
'module': bool(self.external_module)
}
# TODO: restore build dependencies here once we have less picky
# TODO: concretization.
deps = self.dependencies_dict(deptype=('link', 'run'))
if deps:
d['dependencies'] = syaml_dict([
(name,
syaml_dict([
('hash', dspec.spec.dag_hash()),
('type', sorted(str(s) for s in dspec.deptypes))])
) for name, dspec in sorted(deps.items())
])
return syaml_dict([(self.name, d)])
def to_dict(self):
node_list = []
for s in self.traverse(order='pre', deptype=('link', 'run')):
node = s.to_node_dict()
node[s.name]['hash'] = s.dag_hash()
node_list.append(node)
return syaml_dict([('spec', node_list)])
def to_yaml(self, stream=None):
return syaml.dump(
self.to_dict(), stream=stream, default_flow_style=False)
def to_json(self, stream=None):
return sjson.dump(self.to_dict(), stream)
@staticmethod
def from_node_dict(node):
name = next(iter(node))
node = node[name]
spec = Spec(name)
spec.namespace = node.get('namespace', None)
spec._hash = node.get('hash', None)
if 'version' in node or 'versions' in node:
spec.versions = VersionList.from_dict(node)
if 'arch' in node:
spec.architecture = ArchSpec.from_dict(node)
if 'compiler' in node:
spec.compiler = CompilerSpec.from_dict(node)
else:
spec.compiler = None
if 'parameters' in node:
for name, value in node['parameters'].items():
if name in _valid_compiler_flags:
spec.compiler_flags[name] = value
else:
spec.variants[name] = MultiValuedVariant.from_node_dict(
name, value)
elif 'variants' in node:
for name, value in node['variants'].items():
spec.variants[name] = MultiValuedVariant.from_node_dict(
name, value
)
for name in FlagMap.valid_compiler_flags():
spec.compiler_flags[name] = []
if 'external' in node:
spec.external_path = None
spec.external_module = None
# This conditional is needed because sometimes this function is
# called with a node already constructed that contains a 'versions'
# and 'external' field. Related to virtual packages provider
# indexes.
if node['external']:
spec.external_path = node['external']['path']
spec.external_module = node['external']['module']
if spec.external_module is False:
spec.external_module = None
else:
spec.external_path = None
spec.external_module = None
# Don't read dependencies here; from_node_dict() is used by
# from_yaml() to read the root *and* each dependency spec.
return spec
@staticmethod
def read_yaml_dep_specs(dependency_dict):
"""Read the DependencySpec portion of a YAML-formatted Spec.
This needs to be backward-compatible with older spack spec
formats so that reindex will work on old specs/databases.
"""
for dep_name, elt in dependency_dict.items():
if isinstance(elt, string_types):
# original format, elt is just the dependency hash.
dag_hash, deptypes = elt, ['build', 'link']
elif isinstance(elt, tuple):
# original deptypes format: (used tuples, not future-proof)
dag_hash, deptypes = elt
elif isinstance(elt, dict):
# new format: elements of dependency spec are keyed.
dag_hash, deptypes = elt['hash'], elt['type']
else:
raise SpecError("Couldn't parse dependency types in spec.")
yield dep_name, dag_hash, list(deptypes)
@staticmethod
def from_dict(data):
"""Construct a spec from YAML.
Parameters:
data -- a nested dict/list data structure read from YAML or JSON.
"""
nodes = data['spec']
# Read nodes out of list. Root spec is the first element;
# dependencies are the following elements.
dep_list = [Spec.from_node_dict(node) for node in nodes]
if not dep_list:
raise SpecError("YAML spec contains no nodes.")
deps = dict((spec.name, spec) for spec in dep_list)
spec = dep_list[0]
for node in nodes:
# get dependency dict from the node.
name = next(iter(node))
if 'dependencies' not in node[name]:
continue
yaml_deps = node[name]['dependencies']
for dname, dhash, dtypes in Spec.read_yaml_dep_specs(yaml_deps):
# Fill in dependencies by looking them up by name in deps dict
deps[name]._dependencies[dname] = DependencySpec(
deps[name], deps[dname], dtypes)
return spec
@staticmethod
def from_yaml(stream):
"""Construct a spec from YAML.
Parameters:
stream -- string or file object to read from.
"""
try:
data = syaml.load(stream)
return Spec.from_dict(data)
except MarkedYAMLError as e:
raise syaml.SpackYAMLError("error parsing YAML spec:", str(e))
@staticmethod
def from_json(stream):
"""Construct a spec from JSON.
Parameters:
stream -- string or file object to read from.
"""
try:
data = sjson.load(stream)
return Spec.from_dict(data)
except Exception as e:
raise sjson.SpackJSONError("error parsing JSON spec:", str(e))
def _concretize_helper(self, presets=None, visited=None):
"""Recursive helper function for concretize().
This concretizes everything bottom-up. As things are
concretized, they're added to the presets, and ancestors
will prefer the settings of their children.
"""
if presets is None:
presets = {}
if visited is None:
visited = set()
if self.name in visited:
return False
if self.concrete:
visited.add(self.name)
return False
changed = False
# Concretize deps first -- this is a bottom-up process.
for name in sorted(self._dependencies.keys()):
changed |= self._dependencies[
name].spec._concretize_helper(presets, visited)
if self.name in presets:
changed |= self.constrain(presets[self.name])
else:
# Concretize virtual dependencies last. Because they're added
# to presets below, their constraints will all be merged, but we'll
# still need to select a concrete package later.
if not self.virtual:
changed |= any(
(spack.concretizer.concretize_architecture(self),
spack.concretizer.concretize_compiler(self),
spack.concretizer.concretize_compiler_flags(
self), # has to be concretized after compiler
spack.concretizer.concretize_version(self),
spack.concretizer.concretize_variants(self)))
presets[self.name] = self
visited.add(self.name)
return changed
def _replace_with(self, concrete):
"""Replace this virtual spec with a concrete spec."""
assert(self.virtual)
for name, dep_spec in self._dependents.items():
dependent = dep_spec.parent
deptypes = dep_spec.deptypes
# remove self from all dependents, unless it is already removed
if self.name in dependent._dependencies:
del dependent._dependencies[self.name]
# add the replacement, unless it is already a dep of dependent.
if concrete.name not in dependent._dependencies:
dependent._add_dependency(concrete, deptypes)
def _expand_virtual_packages(self):
"""Find virtual packages in this spec, replace them with providers,
and normalize again to include the provider's (potentially virtual)
dependencies. Repeat until there are no virtual deps.
Precondition: spec is normalized.
.. todo::
If a provider depends on something that conflicts with
other dependencies in the spec being expanded, this can
produce a conflicting spec. For example, if mpich depends
on hwloc@:1.3 but something in the spec needs hwloc1.4:,
then we should choose an MPI other than mpich. Cases like
this are infrequent, but should implement this before it is
a problem.
"""
# Make an index of stuff this spec already provides
self_index = ProviderIndex(self.traverse(), restrict=True)
changed = False
done = False
while not done:
done = True
for spec in list(self.traverse()):
replacement = None
if spec.external:
continue
if spec.virtual:
replacement = self._find_provider(spec, self_index)
if replacement:
# TODO: may break if in-place on self but
# shouldn't happen if root is traversed first.
spec._replace_with(replacement)
done = False
break
if not replacement:
# Get a list of possible replacements in order of
# preference.
candidates = spack.concretizer.choose_virtual_or_external(
spec)
# Try the replacements in order, skipping any that cause
# satisfiability problems.
for replacement in candidates:
if replacement is spec:
break
# Replace spec with the candidate and normalize
copy = self.copy()
copy[spec.name]._dup(replacement, deps=False)
try:
# If there are duplicate providers or duplicate
# provider deps, consolidate them and merge
# constraints.
copy.normalize(force=True)
break
except SpecError:
# On error, we'll try the next replacement.
continue
# If replacement is external then trim the dependencies
if replacement.external:
if (spec._dependencies):
changed = True
spec._dependencies = DependencyMap()
replacement._dependencies = DependencyMap()
replacement.architecture = self.architecture
# TODO: could this and the stuff in _dup be cleaned up?
def feq(cfield, sfield):
return (not cfield) or (cfield == sfield)
if replacement is spec or (
feq(replacement.name, spec.name) and
feq(replacement.versions, spec.versions) and
feq(replacement.compiler, spec.compiler) and
feq(replacement.architecture, spec.architecture) and
feq(replacement._dependencies, spec._dependencies) and
feq(replacement.variants, spec.variants) and
feq(replacement.external_path,
spec.external_path) and
feq(replacement.external_module,
spec.external_module)):
continue
# Refine this spec to the candidate. This uses
# replace_with AND dup so that it can work in
# place. TODO: make this more efficient.
if spec.virtual:
spec._replace_with(replacement)
changed = True
if spec._dup(replacement, deps=False, cleardeps=False):
changed = True
spec._dependencies.owner = spec
self_index.update(spec)
done = False
break
return changed
def concretize(self):
"""A spec is concrete if it describes one build of a package uniquely.
This will ensure that this spec is concrete.
If this spec could describe more than one version, variant, or build
of a package, this will add constraints to make it concrete.
Some rigorous validation and checks are also performed on the spec.
Concretizing ensures that it is self-consistent and that it's
consistent with requirements of its pacakges. See flatten() and
normalize() for more details on this.
It also ensures that:
.. code-block:: python
for x in self.traverse():
assert x.package.spec == x
which may not be true *during* the concretization step.
"""
if not self.name:
raise SpecError("Attempting to concretize anonymous spec")
if self._concrete:
return
changed = True
force = False
while changed:
changes = (self.normalize(force),
self._expand_virtual_packages(),
self._concretize_helper())
changed = any(changes)
force = True
for s in self.traverse():
# After concretizing, assign namespaces to anything left.
# Note that this doesn't count as a "change". The repository
# configuration is constant throughout a spack run, and
# normalize and concretize evaluate Packages using Repo.get(),
# which respects precedence. So, a namespace assignment isn't
# changing how a package name would have been interpreted and
# we can do it as late as possible to allow as much
# compatibility across repositories as possible.
if s.namespace is None:
s.namespace = spack.repo.repo_for_pkg(s.name).namespace
if s.concrete:
continue
# Add any patches from the package to the spec.
patches = []
for cond, patch_list in s.package_class.patches.items():
if s.satisfies(cond):
for patch in patch_list:
patches.append(patch.sha256)
if patches:
mvar = s.variants.setdefault(
'patches', MultiValuedVariant('patches', ())
)
mvar.value = patches
# FIXME: Monkey patches mvar to store patches order
mvar._patches_in_order_of_appearance = patches
# Apply patches required on dependencies by depends_on(..., patch=...)
for dspec in self.traverse_edges(deptype=all,
cover='edges', root=False):
pkg_deps = dspec.parent.package_class.dependencies
if dspec.spec.name not in pkg_deps:
continue
if dspec.spec.concrete:
continue
patches = []
for cond, dependency in pkg_deps[dspec.spec.name].items():
if dspec.parent.satisfies(cond):
for pcond, patch_list in dependency.patches.items():
if dspec.spec.satisfies(pcond):
for patch in patch_list:
patches.append(patch.sha256)
if patches:
mvar = dspec.spec.variants.setdefault(
'patches', MultiValuedVariant('patches', ())
)
mvar.value = mvar.value + tuple(patches)
# FIXME: Monkey patches mvar to store patches order
p = getattr(mvar, '_patches_in_order_of_appearance', [])
mvar._patches_in_order_of_appearance = dedupe(p + patches)
for s in self.traverse():
if s.external_module:
compiler = spack.compilers.compiler_for_spec(
s.compiler, s.architecture)
for mod in compiler.modules:
load_module(mod)
s.external_path = get_path_from_module(s.external_module)
# Mark everything in the spec as concrete, as well.
self._mark_concrete()
# Now that the spec is concrete we should check if
# there are declared conflicts
matches = []
for x in self.traverse():
for conflict_spec, when_list in x.package.conflicts.items():
if x.satisfies(conflict_spec):
for when_spec, msg in when_list:
if x.satisfies(when_spec):
matches.append((x, conflict_spec, when_spec, msg))
if matches:
raise ConflictsInSpecError(self, matches)
# At this point the spec-package mutual references should
# be self-consistent
for x in self.traverse():
x.package.spec = x
def _mark_concrete(self, value=True):
"""Mark this spec and its dependencies as concrete.
Only for internal use -- client code should use "concretize"
unless there is a need to force a spec to be concrete.
"""
for s in self.traverse():
if (not value) and s.concrete and s.package.installed:
continue
s._normal = value
s._concrete = value
def concretized(self):
"""This is a non-destructive version of concretize(). First clones,
then returns a concrete version of this package without modifying
this package. """
clone = self.copy()
clone.concretize()
return clone
def flat_dependencies(self, **kwargs):
"""Return a DependencyMap containing all of this spec's
dependencies with their constraints merged.
If copy is True, returns merged copies of its dependencies
without modifying the spec it's called on.
If copy is False, clears this spec's dependencies and
returns them.
"""
copy = kwargs.get('copy', True)
flat_deps = {}
try:
deptree = self.traverse(root=False)
for spec in deptree:
if spec.name not in flat_deps:
if copy:
spec = spec.copy(deps=False)
flat_deps[spec.name] = spec
else:
flat_deps[spec.name].constrain(spec)
if not copy:
for spec in flat_deps.values():
spec._dependencies.clear()
spec._dependents.clear()
self._dependencies.clear()
return flat_deps
except UnsatisfiableSpecError as e:
# Here, the DAG contains two instances of the same package
# with inconsistent constraints. Users cannot produce
# inconsistent specs like this on the command line: the
# parser doesn't allow it. Spack must be broken!
raise InconsistentSpecError("Invalid Spec DAG: %s" % e.message)
def index(self, deptype='all'):
"""Return DependencyMap that points to all the dependencies in this
spec."""
dm = DependencyMap()
for spec in self.traverse(deptype=deptype):
dm[spec.name] = spec
return dm
def _evaluate_dependency_conditions(self, name):
"""Evaluate all the conditions on a dependency with this name.
Args:
name (str): name of dependency to evaluate conditions on.
Returns:
(Dependency): new Dependency object combining all constraints.
If the package depends on <name> in the current spec
configuration, return the constrained dependency and
corresponding dependency types.
If no conditions are True (and we don't depend on it), return
``(None, None)``.
"""
pkg = spack.repo.get(self.fullname)
conditions = pkg.dependencies[name]
substitute_abstract_variants(self)
# evaluate when specs to figure out constraints on the dependency.
dep = None
for when_spec, dependency in conditions.items():
if self.satisfies(when_spec, strict=True):
if dep is None:
dep = Dependency(self.name, Spec(name), type=())
try:
dep.merge(dependency)
except UnsatisfiableSpecError as e:
e.message = ("Conflicting conditional dependencies on"
"package %s for spec %s" % (self.name, self))
raise e
return dep
def _find_provider(self, vdep, provider_index):
"""Find provider for a virtual spec in the provider index.
Raise an exception if there is a conflicting virtual
dependency already in this spec.
"""
assert(vdep.virtual)
# note that this defensively copies.
providers = provider_index.providers_for(vdep)
# If there is a provider for the vpkg, then use that instead of
# the virtual package.
if providers:
# Remove duplicate providers that can concretize to the same
# result.
for provider in providers:
for spec in providers:
if spec is not provider and provider.satisfies(spec):
providers.remove(spec)
# Can't have multiple providers for the same thing in one spec.
if len(providers) > 1:
raise MultipleProviderError(vdep, providers)
return providers[0]
else:
# The user might have required something insufficient for
# pkg_dep -- so we'll get a conflict. e.g., user asked for
# mpi@:1.1 but some package required mpi@2.1:.
required = provider_index.providers_for(vdep.name)
if len(required) > 1:
raise MultipleProviderError(vdep, required)
elif required:
raise UnsatisfiableProviderSpecError(required[0], vdep)
def _merge_dependency(
self, dependency, visited, spec_deps, provider_index):
"""Merge dependency information from a Package into this Spec.
Args:
dependency (Dependency): dependency metadata from a package;
this is typically the result of merging *all* matching
dependency constraints from the package.
visited (set): set of dependency nodes already visited by
``normalize()``.
spec_deps (dict): ``dict`` of all dependencies from the spec
being normalized.
provider_index (dict): ``provider_index`` of virtual dep
providers in the ``Spec`` as normalized so far.
NOTE: Caller should assume that this routine owns the
``dependency`` parameter, i.e., it needs to be a copy of any
internal structures.
This is the core of ``normalize()``. There are some basic steps:
* If dep is virtual, evaluate whether it corresponds to an
existing concrete dependency, and merge if so.
* If it's real and it provides some virtual dep, see if it provides
what some virtual dependency wants and merge if so.
* Finally, if none of the above, merge dependency and its
constraints into this spec.
This method returns True if the spec was changed, False otherwise.
"""
changed = False
dep = dependency.spec
# If it's a virtual dependency, try to find an existing
# provider in the spec, and merge that.
if dep.virtual:
visited.add(dep.name)
provider = self._find_provider(dep, provider_index)
if provider:
dep = provider
else:
index = ProviderIndex([dep], restrict=True)
items = list(spec_deps.items())
for name, vspec in items:
if not vspec.virtual:
continue
if index.providers_for(vspec):
vspec._replace_with(dep)
del spec_deps[vspec.name]
changed = True
else:
required = index.providers_for(vspec.name)
if required:
raise UnsatisfiableProviderSpecError(required[0], dep)
provider_index.update(dep)
# If the spec isn't already in the set of dependencies, add it.
# Note: dep is always owned by this method. If it's from the
# caller, it's a copy from _evaluate_dependency_conditions. If it
# comes from a vdep, it's a defensive copy from _find_provider.
if dep.name not in spec_deps:
if self.concrete:
return False
spec_deps[dep.name] = dep
changed = True
else:
# merge package/vdep information into spec
try:
changed |= spec_deps[dep.name].constrain(dep)
except UnsatisfiableSpecError as e:
fmt = 'An unsatisfiable {0}'.format(e.constraint_type)
fmt += ' constraint has been detected for spec:'
fmt += '\n\n{0}\n\n'.format(spec_deps[dep.name].tree(indent=4))
fmt += 'while trying to concretize the partial spec:'
fmt += '\n\n{0}\n\n'.format(self.tree(indent=4))
fmt += '{0} requires {1} {2} {3}, but spec asked for {4}'
e.message = fmt.format(
self.name,
dep.name,
e.constraint_type,
e.required,
e.provided)
raise
# Add merged spec to my deps and recurse
spec_dependency = spec_deps[dep.name]
if dep.name not in self._dependencies:
self._add_dependency(spec_dependency, dependency.type)
changed |= spec_dependency._normalize_helper(
visited, spec_deps, provider_index)
return changed
def _normalize_helper(self, visited, spec_deps, provider_index):
"""Recursive helper function for _normalize."""
if self.name in visited:
return False
visited.add(self.name)
# if we descend into a virtual spec, there's nothing more
# to normalize. Concretize will finish resolving it later.
if self.virtual or self.external:
return False
# Combine constraints from package deps with constraints from
# the spec, until nothing changes.
any_change = False
changed = True
pkg = spack.repo.get(self.fullname)
while changed:
changed = False
for dep_name in pkg.dependencies:
# Do we depend on dep_name? If so pkg_dep is not None.
dep = self._evaluate_dependency_conditions(dep_name)
# If dep is a needed dependency, merge it.
if dep and (spack.package_testing.check(self.name) or
set(dep.type) - set(['test'])):
changed |= self._merge_dependency(
dep, visited, spec_deps, provider_index)
any_change |= changed
return any_change
def normalize(self, force=False):
"""When specs are parsed, any dependencies specified are hanging off
the root, and ONLY the ones that were explicitly provided are there.
Normalization turns a partial flat spec into a DAG, where:
1. Known dependencies of the root package are in the DAG.
2. Each node's dependencies dict only contains its known direct
deps.
3. There is only ONE unique spec for each package in the DAG.
* This includes virtual packages. If there a non-virtual
package that provides a virtual package that is in the spec,
then we replace the virtual package with the non-virtual one.
TODO: normalize should probably implement some form of cycle
detection, to ensure that the spec is actually a DAG.
"""
if not self.name:
raise SpecError("Attempting to normalize anonymous spec")
# Set _normal and _concrete to False when forced
if force:
self._mark_concrete(False)
if self._normal:
return False
# Ensure first that all packages & compilers in the DAG exist.
self.validate_or_raise()
# Get all the dependencies into one DependencyMap
spec_deps = self.flat_dependencies(copy=False)
# Initialize index of virtual dependency providers if
# concretize didn't pass us one already
provider_index = ProviderIndex(
[s for s in spec_deps.values()], restrict=True)
# traverse the package DAG and fill out dependencies according
# to package files & their 'when' specs
visited = set()
any_change = self._normalize_helper(visited, spec_deps, provider_index)
# If there are deps specified but not visited, they're not
# actually deps of this package. Raise an error.
extra = set(spec_deps.keys()).difference(visited)
if extra:
raise InvalidDependencyError(
self.name + " does not depend on " + comma_or(extra))
# Mark the spec as normal once done.
self._normal = True
return any_change
def normalized(self):
"""
Return a normalized copy of this spec without modifying this spec.
"""
clone = self.copy()
clone.normalize()
return clone
def validate_or_raise(self):
"""Checks that names and values in this spec are real. If they're not,
it will raise an appropriate exception.
"""
# FIXME: this function should be lazy, and collect all the errors
# FIXME: before raising the exceptions, instead of being greedy and
# FIXME: raise just the first one encountered
for spec in self.traverse():
# raise an UnknownPackageError if the spec's package isn't real.
if (not spec.virtual) and spec.name:
spack.repo.get(spec.fullname)
# validate compiler in addition to the package name.
if spec.compiler:
if not compilers.supported(spec.compiler):
raise UnsupportedCompilerError(spec.compiler.name)
# Ensure correctness of variants (if the spec is not virtual)
if not spec.virtual:
pkg_cls = spec.package_class
pkg_variants = pkg_cls.variants
# reserved names are variants that may be set on any package
# but are not necessarily recorded by the package's class
not_existing = set(spec.variants) - (
set(pkg_variants) | set(spack.directives.reserved_names))
if not_existing:
raise UnknownVariantError(spec.name, not_existing)
substitute_abstract_variants(spec)
def constrain(self, other, deps=True):
"""Merge the constraints of other with self.
Returns True if the spec changed as a result, False if not.
"""
# If we are trying to constrain a concrete spec, either the spec
# already satisfies the constraint (and the method returns False)
# or it raises an exception
if self.concrete:
if self.satisfies(other):
return False
else:
raise UnsatisfiableSpecError(
self, other, 'constrain a concrete spec'
)
other = self._autospec(other)
if not (self.name == other.name or
(not self.name) or
(not other.name)):
raise UnsatisfiableSpecNameError(self.name, other.name)
if (other.namespace is not None and
self.namespace is not None and
other.namespace != self.namespace):
raise UnsatisfiableSpecNameError(self.fullname, other.fullname)
if not self.versions.overlaps(other.versions):
raise UnsatisfiableVersionSpecError(self.versions, other.versions)
for v in [x for x in other.variants if x in self.variants]:
if not self.variants[v].compatible(other.variants[v]):
raise UnsatisfiableVariantSpecError(
self.variants[v], other.variants[v]
)
# TODO: Check out the logic here
sarch, oarch = self.architecture, other.architecture
if sarch is not None and oarch is not None:
if sarch.platform is not None and oarch.platform is not None:
if sarch.platform != oarch.platform:
raise UnsatisfiableArchitectureSpecError(sarch, oarch)
if sarch.platform_os is not None and oarch.platform_os is not None:
if sarch.platform_os != oarch.platform_os:
raise UnsatisfiableArchitectureSpecError(sarch, oarch)
if sarch.target is not None and oarch.target is not None:
if sarch.target != oarch.target:
raise UnsatisfiableArchitectureSpecError(sarch, oarch)
changed = False
if self.compiler is not None and other.compiler is not None:
changed |= self.compiler.constrain(other.compiler)
elif self.compiler is None:
changed |= (self.compiler != other.compiler)
self.compiler = other.compiler
changed |= self.versions.intersect(other.versions)
changed |= self.variants.constrain(other.variants)
changed |= self.compiler_flags.constrain(other.compiler_flags)
old = str(self.architecture)
sarch, oarch = self.architecture, other.architecture
if sarch is None or other.architecture is None:
self.architecture = sarch or oarch
else:
if sarch.platform is None or oarch.platform is None:
self.architecture.platform = sarch.platform or oarch.platform
if sarch.platform_os is None or oarch.platform_os is None:
sarch.platform_os = sarch.platform_os or oarch.platform_os
if sarch.target is None or oarch.target is None:
sarch.target = sarch.target or oarch.target
changed |= (str(self.architecture) != old)
if deps:
changed |= self._constrain_dependencies(other)
return changed
def _constrain_dependencies(self, other):
"""Apply constraints of other spec's dependencies to this spec."""
other = self._autospec(other)
if not self._dependencies or not other._dependencies:
return False
# TODO: might want more detail than this, e.g. specific deps
# in violation. if this becomes a priority get rid of this
# check and be more specific about what's wrong.
if not other.satisfies_dependencies(self):
raise UnsatisfiableDependencySpecError(other, self)
# Handle common first-order constraints directly
changed = False
for name in self.common_dependencies(other):
changed |= self[name].constrain(other[name], deps=False)
if name in self._dependencies:
changed |= self._dependencies[name].update_deptypes(
other._dependencies[name].deptypes)
# Update with additional constraints from other spec
for name in other.dep_difference(self):
dep_spec_copy = other.get_dependency(name)
dep_copy = dep_spec_copy.spec
deptypes = dep_spec_copy.deptypes
self._add_dependency(dep_copy.copy(), deptypes)
changed = True
return changed
def common_dependencies(self, other):
"""Return names of dependencies that self an other have in common."""
common = set(
s.name for s in self.traverse(root=False))
common.intersection_update(
s.name for s in other.traverse(root=False))
return common
def constrained(self, other, deps=True):
"""Return a constrained copy without modifying this spec."""
clone = self.copy(deps=deps)
clone.constrain(other, deps)
return clone
def dep_difference(self, other):
"""Returns dependencies in self that are not in other."""
mine = set(s.name for s in self.traverse(root=False))
mine.difference_update(
s.name for s in other.traverse(root=False))
return mine
def _autospec(self, spec_like):
"""
Used to convert arguments to specs. If spec_like is a spec, returns
it. If it's a string, tries to parse a string. If that fails, tries
to parse a local spec from it (i.e. name is assumed to be self's name).
"""
if isinstance(spec_like, spack.spec.Spec):
return spec_like
try:
spec = spack.spec.Spec(spec_like)
if not spec.name:
raise SpecError(
"anonymous package -- this will always be handled")
return spec
except SpecError:
return parse_anonymous_spec(spec_like, self.name)
def satisfies(self, other, deps=True, strict=False, strict_deps=False):
"""Determine if this spec satisfies all constraints of another.
There are two senses for satisfies:
* `loose` (default): the absence of a constraint in self
implies that it *could* be satisfied by other, so we only
check that there are no conflicts with other for
constraints that this spec actually has.
* `strict`: strict means that we *must* meet all the
constraints specified on other.
"""
other = self._autospec(other)
# The only way to satisfy a concrete spec is to match its hash exactly.
if other.concrete:
return self.concrete and self.dag_hash() == other.dag_hash()
# A concrete provider can satisfy a virtual dependency.
if not self.virtual and other.virtual:
try:
pkg = spack.repo.get(self.fullname)
except spack.repository.UnknownEntityError:
# If we can't get package info on this spec, don't treat
# it as a provider of this vdep.
return False
if pkg.provides(other.name):
for provided, when_specs in pkg.provided.items():
if any(self.satisfies(when_spec, deps=False, strict=strict)
for when_spec in when_specs):
if provided.satisfies(other):
return True
return False
# Otherwise, first thing we care about is whether the name matches
if self.name != other.name and self.name and other.name:
return False
# namespaces either match, or other doesn't require one.
if (other.namespace is not None and
self.namespace is not None and
self.namespace != other.namespace):
return False
if self.versions and other.versions:
if not self.versions.satisfies(other.versions, strict=strict):
return False
elif strict and (self.versions or other.versions):
return False
# None indicates no constraints when not strict.
if self.compiler and other.compiler:
if not self.compiler.satisfies(other.compiler, strict=strict):
return False
elif strict and (other.compiler and not self.compiler):
return False
var_strict = strict
if (not self.name) or (not other.name):
var_strict = True
if not self.variants.satisfies(other.variants, strict=var_strict):
return False
# Architecture satisfaction is currently just string equality.
# If not strict, None means unconstrained.
if self.architecture and other.architecture:
if not self.architecture.satisfies(other.architecture, strict):
return False
elif strict and (other.architecture and not self.architecture):
return False
if not self.compiler_flags.satisfies(
other.compiler_flags,
strict=strict):
return False
# If we need to descend into dependencies, do it, otherwise we're done.
if deps:
deps_strict = strict
if self._concrete and not other.name:
# We're dealing with existing specs
deps_strict = True
return self.satisfies_dependencies(other, strict=deps_strict)
else:
return True
def satisfies_dependencies(self, other, strict=False):
"""
This checks constraints on common dependencies against each other.
"""
other = self._autospec(other)
# If there are no constraints to satisfy, we're done.
if not other._dependencies:
return True
if strict:
# if we have no dependencies, we can't satisfy any constraints.
if not self._dependencies:
return False
selfdeps = self.traverse(root=False)
otherdeps = other.traverse(root=False)
if not all(any(d.satisfies(dep) for d in selfdeps)
for dep in otherdeps):
return False
elif not self._dependencies:
# if not strict, this spec *could* eventually satisfy the
# constraints on other.
return True
# Handle first-order constraints directly
for name in self.common_dependencies(other):
if not self[name].satisfies(other[name], deps=False):
return False
# For virtual dependencies, we need to dig a little deeper.
self_index = ProviderIndex(self.traverse(), restrict=True)
other_index = ProviderIndex(other.traverse(), restrict=True)
# This handles cases where there are already providers for both vpkgs
if not self_index.satisfies(other_index):
return False
# These two loops handle cases where there is an overly restrictive
# vpkg in one spec for a provider in the other (e.g., mpi@3: is not
# compatible with mpich2)
for spec in self.virtual_dependencies():
if (spec.name in other_index and
not other_index.providers_for(spec)):
return False
for spec in other.virtual_dependencies():
if spec.name in self_index and not self_index.providers_for(spec):
return False
return True
def virtual_dependencies(self):
"""Return list of any virtual deps in this spec."""
return [spec for spec in self.traverse() if spec.virtual]
@property
def patches(self):
"""Return patch objects for any patch sha256 sums on this Spec.
This is for use after concretization to iterate over any patches
associated with this spec.
TODO: this only checks in the package; it doesn't resurrect old
patches from install directories, but it probably should.
"""
if 'patches' not in self.variants:
return []
patches = []
# FIXME: The private attribute below is attached after
# FIXME: concretization to store the order of patches somewhere.
# FIXME: Needs to be refactored in a cleaner way.
for sha256 in self.variants['patches']._patches_in_order_of_appearance:
patch = self.package.lookup_patch(sha256)
if patch:
patches.append(patch)
continue
# if not found in this package, check immediate dependents
# for dependency patches
for dep_spec in self._dependents.values():
patch = dep_spec.parent.package.lookup_patch(sha256)
if patch:
patches.append(patch)
return patches
def _dup(self, other, deps=True, cleardeps=True, caches=None):
"""Copy the spec other into self. This is an overwriting
copy. It does not copy any dependents (parents), but by default
copies dependencies.
To duplicate an entire DAG, call _dup() on the root of the DAG.
Args:
other (Spec): spec to be copied onto ``self``
deps (bool or Sequence): if True copies all the dependencies. If
False copies None. If a sequence of dependency types copy
only those types.
cleardeps (bool): if True clears the dependencies of ``self``,
before possibly copying the dependencies of ``other`` onto
``self``
caches (bool or None): preserve cached fields such as
``_normal``, ``_concrete``, and ``_cmp_key_cache``. By
default this is ``False`` if DAG structure would be
changed by the copy, ``True`` if it's an exact copy.
Returns:
True if ``self`` changed because of the copy operation,
False otherwise.
"""
# We don't count dependencies as changes here
changed = True
if hasattr(self, 'name'):
changed = (self.name != other.name and
self.versions != other.versions and
self.architecture != other.architecture and
self.compiler != other.compiler and
self.variants != other.variants and
self._normal != other._normal and
self.concrete != other.concrete and
self.external_path != other.external_path and
self.external_module != other.external_module and
self.compiler_flags != other.compiler_flags)
# Local node attributes get copied first.
self.name = other.name
self.versions = other.versions.copy()
self.architecture = other.architecture.copy() if other.architecture \
else None
self.compiler = other.compiler.copy() if other.compiler else None
if cleardeps:
self._dependents = DependencyMap()
self._dependencies = DependencyMap()
self.compiler_flags = other.compiler_flags.copy()
self.compiler_flags.spec = self
self.variants = other.variants.copy()
self.variants.spec = self
self.external_path = other.external_path
self.external_module = other.external_module
self.namespace = other.namespace
# Cached fields are results of expensive operations.
# If we preserved the original structure, we can copy them
# safely. If not, they need to be recomputed.
if caches is None:
caches = (deps is True or deps == all_deptypes)
# If we copy dependencies, preserve DAG structure in the new spec
if deps:
# If caller restricted deptypes to be copied, adjust that here.
# By default, just copy all deptypes
deptypes = all_deptypes
if isinstance(deps, (tuple, list)):
deptypes = deps
self._dup_deps(other, deptypes, caches)
if caches:
self._hash = other._hash
self._cmp_key_cache = other._cmp_key_cache
self._normal = other._normal
self._concrete = other._concrete
else:
self._hash = None
self._cmp_key_cache = None
self._normal = False
self._concrete = False
return changed
def _dup_deps(self, other, deptypes, caches):
new_specs = {self.name: self}
for dspec in other.traverse_edges(cover='edges',
root=False):
if (dspec.deptypes and
not any(d in deptypes for d in dspec.deptypes)):
continue
if dspec.parent.name not in new_specs:
new_specs[dspec.parent.name] = dspec.parent.copy(
deps=False, caches=caches)
if dspec.spec.name not in new_specs:
new_specs[dspec.spec.name] = dspec.spec.copy(
deps=False, caches=caches)
new_specs[dspec.parent.name]._add_dependency(
new_specs[dspec.spec.name], dspec.deptypes)
def copy(self, deps=True, **kwargs):
"""Make a copy of this spec.
Args:
deps (bool or tuple): Defaults to True. If boolean, controls
whether dependencies are copied (copied if True). If a
tuple is provided, *only* dependencies of types matching
those in the tuple are copied.
kwargs: additional arguments for internal use (passed to ``_dup``).
Returns:
A copy of this spec.
Examples:
Deep copy with dependnecies::
spec.copy()
spec.copy(deps=True)
Shallow copy (no dependencies)::
spec.copy(deps=False)
Only build and run dependencies::
deps=('build', 'run'):
"""
clone = Spec.__new__(Spec)
clone._dup(self, deps=deps, **kwargs)
return clone
@property
def version(self):
if not self.versions.concrete:
raise SpecError("Spec version is not concrete: " + str(self))
return self.versions[0]
def __getitem__(self, name):
"""Get a dependency from the spec by its name. This call implicitly
sets a query state in the package being retrieved. The behavior of
packages may be influenced by additional query parameters that are
passed after a colon symbol.
Note that if a virtual package is queried a copy of the Spec is
returned while for non-virtual a reference is returned.
"""
query_parameters = name.split(':')
if len(query_parameters) > 2:
msg = 'key has more than one \':\' symbol.'
msg += ' At most one is admitted.'
raise KeyError(msg)
name, query_parameters = query_parameters[0], query_parameters[1:]
if query_parameters:
# We have extra query parameters, which are comma separated
# values
csv = query_parameters.pop().strip()
query_parameters = re.split(r'\s*,\s*', csv)
try:
value = next(
itertools.chain(
# Regular specs
(x for x in self.traverse() if x.name == name),
(x for x in self.traverse()
if (not x.virtual) and x.package.provides(name))
)
)
except StopIteration:
raise KeyError("No spec with name %s in %s" % (name, self))
if self._concrete:
return SpecBuildInterface(value, name, query_parameters)
return value
def __contains__(self, spec):
"""True if this spec satisfies the provided spec, or if any dependency
does. If the spec has no name, then we parse this one first.
"""
spec = self._autospec(spec)
for s in self.traverse():
if s.satisfies(spec, strict=True):
return True
return False
def sorted_deps(self):
"""Return a list of all dependencies sorted by name."""
deps = self.flat_dependencies()
return tuple(deps[name] for name in sorted(deps))
def _eq_dag(self, other, vs, vo, deptypes):
"""Recursive helper for eq_dag and ne_dag. Does the actual DAG
traversal."""
vs.add(id(self))
vo.add(id(other))
if self.ne_node(other):
return False
if len(self._dependencies) != len(other._dependencies):
return False
ssorted = [self._dependencies[name]
for name in sorted(self._dependencies)]
osorted = [other._dependencies[name]
for name in sorted(other._dependencies)]
for s_dspec, o_dspec in zip(ssorted, osorted):
if deptypes and s_dspec.deptypes != o_dspec.deptypes:
return False
s, o = s_dspec.spec, o_dspec.spec
visited_s = id(s) in vs
visited_o = id(o) in vo
# Check for duplicate or non-equal dependencies
if visited_s != visited_o:
return False
# Skip visited nodes
if visited_s or visited_o:
continue
# Recursive check for equality
if not s._eq_dag(o, vs, vo, deptypes):
return False
return True
def eq_dag(self, other, deptypes=True):
"""True if the full dependency DAGs of specs are equal."""
return self._eq_dag(other, set(), set(), deptypes)
def ne_dag(self, other, deptypes=True):
"""True if the full dependency DAGs of specs are not equal."""
return not self.eq_dag(other, set(), set(), deptypes)
def _cmp_node(self):
"""Comparison key for just *this node* and not its deps."""
return (self.name,
self.namespace,
self.versions,
self.variants,
self.architecture,
self.compiler,
self.compiler_flags)
def eq_node(self, other):
"""Equality with another spec, not including dependencies."""
return self._cmp_node() == other._cmp_node()
def ne_node(self, other):
"""Inequality with another spec, not including dependencies."""
return self._cmp_node() != other._cmp_node()
def _cmp_key(self):
"""This returns a key for the spec *including* DAG structure.
The key is the concatenation of:
1. A tuple describing this node in the DAG.
2. The hash of each of this node's dependencies' cmp_keys.
"""
if self._cmp_key_cache:
return self._cmp_key_cache
dep_tuple = tuple(
(d.spec.name, hash(d.spec), tuple(sorted(d.deptypes)))
for name, d in sorted(self._dependencies.items()))
key = (self._cmp_node(), dep_tuple)
if self._concrete:
self._cmp_key_cache = key
return key
def colorized(self):
return colorize_spec(self)
def format(self, format_string='$_$@$%@+$+$=', **kwargs):
"""Prints out particular pieces of a spec, depending on what is
in the format string.
The format strings you can provide are::
$_ Package name
$. Full package name (with namespace)
$@ Version with '@' prefix
$% Compiler with '%' prefix
$%@ Compiler with '%' prefix & compiler version with '@' prefix
$%+ Compiler with '%' prefix & compiler flags prefixed by name
$%@+ Compiler, compiler version, and compiler flags with same
prefixes as above
$+ Options
$= Architecture prefixed by 'arch='
$/ 7-char prefix of DAG hash with '-' prefix
$$ $
You can also use full-string versions, which elide the prefixes::
${PACKAGE} Package name
${VERSION} Version
${COMPILER} Full compiler string
${COMPILERNAME} Compiler name
${COMPILERVER} Compiler version
${COMPILERFLAGS} Compiler flags
${OPTIONS} Options
${ARCHITECTURE} Architecture
${SHA1} Dependencies 8-char sha1 prefix
${HASH:len} DAG hash with optional length specifier
${SPACK_ROOT} The spack root directory
${SPACK_INSTALL} The default spack install directory,
${SPACK_PREFIX}/opt
${PREFIX} The package prefix
Note these are case-insensitive: for example you can specify either
``${PACKAGE}`` or ``${package}``.
Optionally you can provide a width, e.g. ``$20_`` for a 20-wide name.
Like printf, you can provide '-' for left justification, e.g.
``$-20_`` for a left-justified name.
Anything else is copied verbatim into the output stream.
Args:
format_string (str): string containing the format to be expanded
**kwargs (dict): the following list of keywords is supported
- color (bool): True if returned string is colored
- transform (dict): maps full-string formats to a callable \
that accepts a string and returns another one
Examples:
The following line:
.. code-block:: python
s = spec.format('$_$@$+')
translates to the name, version, and options of the package, but no
dependencies, arch, or compiler.
TODO: allow, e.g., ``$6#`` to customize short hash length
TODO: allow, e.g., ``$//`` for full hash.
"""
color = kwargs.get('color', False)
# Dictionary of transformations for named tokens
token_transforms = {}
token_transforms.update(kwargs.get('transform', {}))
length = len(format_string)
out = StringIO()
named = escape = compiler = False
named_str = fmt = ''
def write(s, c):
f = color_formats[c] + cescape(s) + '@.'
cwrite(f, stream=out, color=color)
iterator = enumerate(format_string)
for i, c in iterator:
if escape:
fmt = '%'
if c == '-':
fmt += c
i, c = next(iterator)
while c in '0123456789':
fmt += c
i, c = next(iterator)
fmt += 's'
if c == '_':
name = self.name if self.name else ''
out.write(fmt % name)
elif c == '.':
out.write(fmt % self.fullname)
elif c == '@':
if self.versions and self.versions != _any_version:
write(fmt % (c + str(self.versions)), c)
elif c == '%':
if self.compiler:
write(fmt % (c + str(self.compiler.name)), c)
compiler = True
elif c == '+':
if self.variants:
write(fmt % str(self.variants), c)
elif c == '=':
if self.architecture and str(self.architecture):
a_str = ' arch' + c + str(self.architecture) + ' '
write(fmt % (a_str), c)
elif c == '/':
out.write('/' + fmt % (self.dag_hash(7)))
elif c == '$':
if fmt != '%s':
raise ValueError("Can't use format width with $$.")
out.write('$')
elif c == '{':
named = True
named_str = ''
escape = False
elif compiler:
if c == '@':
if (self.compiler and self.compiler.versions and
self.compiler.versions != _any_version):
write(c + str(self.compiler.versions), '%')
elif c == '+':
if self.compiler_flags:
write(fmt % str(self.compiler_flags), '%')
compiler = False
elif c == '$':
escape = True
compiler = False
else:
out.write(c)
compiler = False
elif named:
if not c == '}':
if i == length - 1:
raise ValueError("Error: unterminated ${ in format:"
"'%s'" % format_string)
named_str += c
continue
named_str = named_str.upper()
# Retrieve the token transformation from the dictionary.
#
# The default behavior is to leave the string unchanged
# (`lambda x: x` is the identity function)
token_transform = token_transforms.get(named_str, lambda x: x)
if named_str == 'PACKAGE':
name = self.name if self.name else ''
write(fmt % token_transform(name), '@')
if named_str == 'VERSION':
if self.versions and self.versions != _any_version:
write(fmt % token_transform(str(self.versions)), '@')
elif named_str == 'COMPILER':
if self.compiler:
write(fmt % token_transform(self.compiler), '%')
elif named_str == 'COMPILERNAME':
if self.compiler:
write(fmt % token_transform(self.compiler.name), '%')
elif named_str in ['COMPILERVER', 'COMPILERVERSION']:
if self.compiler:
write(
fmt % token_transform(self.compiler.versions),
'%'
)
elif named_str == 'COMPILERFLAGS':
if self.compiler:
write(
fmt % token_transform(str(self.compiler_flags)),
'%'
)
elif named_str == 'OPTIONS':
if self.variants:
write(fmt % token_transform(str(self.variants)), '+')
elif named_str == 'ARCHITECTURE':
if self.architecture and str(self.architecture):
write(
fmt % token_transform(str(self.architecture)),
'='
)
elif named_str == 'SHA1':
if self.dependencies:
out.write(fmt % token_transform(str(self.dag_hash(7))))
elif named_str == 'SPACK_ROOT':
out.write(fmt % token_transform(spack.prefix))
elif named_str == 'SPACK_INSTALL':
out.write(fmt % token_transform(spack.store.root))
elif named_str == 'PREFIX':
out.write(fmt % token_transform(self.prefix))
elif named_str.startswith('HASH'):
if named_str.startswith('HASH:'):
_, hashlen = named_str.split(':')
hashlen = int(hashlen)
else:
hashlen = None
out.write(fmt % (self.dag_hash(hashlen)))
named = False
elif c == '$':
escape = True
if i == length - 1:
raise ValueError("Error: unterminated $ in format: '%s'"
% format_string)
else:
out.write(c)
result = out.getvalue()
return result
def cformat(self, *args, **kwargs):
"""Same as format, but color defaults to auto instead of False."""
kwargs = kwargs.copy()
kwargs.setdefault('color', None)
return self.format(*args, **kwargs)
def dep_string(self):
return ''.join("^" + dep.format() for dep in self.sorted_deps())
def __str__(self):
ret = self.format() + self.dep_string()
return ret.strip()
def _install_status(self):
"""Helper for tree to print DB install status."""
if not self.concrete:
return None
try:
record = spack.store.db.get_record(self)
return record.installed
except KeyError:
return None
def _installed_explicitly(self):
"""Helper for tree to print DB install status."""
if not self.concrete:
return None
try:
record = spack.store.db.get_record(self)
return record.explicit
except KeyError:
return None
def tree(self, **kwargs):
"""Prints out this spec and its dependencies, tree-formatted
with indentation."""
color = kwargs.pop('color', get_color_when())
depth = kwargs.pop('depth', False)
hashes = kwargs.pop('hashes', False)
hlen = kwargs.pop('hashlen', None)
install_status = kwargs.pop('install_status', False)
cover = kwargs.pop('cover', 'nodes')
indent = kwargs.pop('indent', 0)
fmt = kwargs.pop('format', '$_$@$%@+$+$=')
prefix = kwargs.pop('prefix', None)
show_types = kwargs.pop('show_types', False)
deptypes = kwargs.pop('deptypes', ('build', 'link'))
check_kwargs(kwargs, self.tree)
out = ""
for d, dep_spec in self.traverse_edges(
order='pre', cover=cover, depth=True, deptypes=deptypes):
node = dep_spec.spec
if prefix is not None:
out += prefix(node)
out += " " * indent
if depth:
out += "%-4d" % d
if install_status:
status = node._install_status()
if status is None:
out += " " # Package isn't installed
elif status:
out += colorize("@g{[+]} ", color=color) # installed
else:
out += colorize("@r{[-]} ", color=color) # missing
if hashes:
out += colorize('@K{%s} ', color=color) % node.dag_hash(hlen)
if show_types:
out += '['
if dep_spec.deptypes:
for t in all_deptypes:
out += ''.join(t[0] if t in dep_spec.deptypes else ' ')
else:
out += ' ' * len(all_deptypes)
out += '] '
out += (" " * d)
if d > 0:
out += "^"
out += node.format(fmt, color=color) + "\n"
return out
def __repr__(self):
return str(self)
class LazySpecCache(collections.defaultdict):
"""Cache for Specs that uses a spec_like as key, and computes lazily
the corresponding value ``Spec(spec_like``.
"""
def __init__(self):
super(LazySpecCache, self).__init__(Spec)
def __missing__(self, key):
value = self.default_factory(key)
self[key] = value
return value
#
# These are possible token types in the spec grammar.
#
HASH, DEP, AT, COLON, COMMA, ON, OFF, PCT, EQ, ID, VAL = range(11)
class SpecLexer(spack.parse.Lexer):
"""Parses tokens that make up spack specs."""
def __init__(self):
super(SpecLexer, self).__init__([
(r'/', lambda scanner, val: self.token(HASH, val)),
(r'\^', lambda scanner, val: self.token(DEP, val)),
(r'\@', lambda scanner, val: self.token(AT, val)),
(r'\:', lambda scanner, val: self.token(COLON, val)),
(r'\,', lambda scanner, val: self.token(COMMA, val)),
(r'\+', lambda scanner, val: self.token(ON, val)),
(r'\-', lambda scanner, val: self.token(OFF, val)),
(r'\~', lambda scanner, val: self.token(OFF, val)),
(r'\%', lambda scanner, val: self.token(PCT, val)),
(r'\=', lambda scanner, val: self.token(EQ, val)),
# This is more liberal than identifier_re (see above).
# Checked by check_identifier() for better error messages.
(r'\w[\w.-]*', lambda scanner, val: self.token(ID, val)),
(r'\s+', lambda scanner, val: None)],
[EQ],
[(r'[\S].*', lambda scanner, val: self.token(VAL, val)),
(r'\s+', lambda scanner, val: None)],
[VAL])
# Lexer is always the same for every parser.
_lexer = SpecLexer()
class SpecParser(spack.parse.Parser):
def __init__(self, initial_spec=None):
"""Construct a new SpecParser.
Args:
initial_spec (Spec, optional): provide a Spec that we'll parse
directly into. This is used to avoid construction of a
superfluous Spec object in the Spec constructor.
"""
super(SpecParser, self).__init__(_lexer)
self.previous = None
self._initial = initial_spec
def do_parse(self):
specs = []
try:
while self.next:
# TODO: clean this parsing up a bit
if self.accept(ID):
self.previous = self.token
if self.accept(EQ):
# We're parsing an anonymous spec beginning with a
# key-value pair.
if not specs:
self.push_tokens([self.previous, self.token])
self.previous = None
specs.append(self.spec(None))
else:
if specs[-1].concrete:
# Trying to add k-v pair to spec from hash
raise RedundantSpecError(specs[-1],
'key-value pair')
# We should never end up here.
# This requires starting a new spec with ID, EQ
# After another spec that is not concrete
# If the previous spec is not concrete, this is
# handled in the spec parsing loop
# If it is concrete, see the if statement above
# If there is no previous spec, we don't land in
# this else case.
self.unexpected_token()
else:
# We're parsing a new spec by name
self.previous = None
specs.append(self.spec(self.token.value))
elif self.accept(HASH):
# We're finding a spec by hash
specs.append(self.spec_by_hash())
elif self.accept(DEP):
if not specs:
# We're parsing an anonymous spec beginning with a
# dependency. Push the token to recover after creating
# anonymous spec
self.push_tokens([self.token])
specs.append(self.spec(None))
else:
if self.accept(HASH):
# We're finding a dependency by hash for an
# anonymous spec
dep = self.spec_by_hash()
else:
# We're adding a dependency to the last spec
self.expect(ID)
dep = self.spec(self.token.value)
# Raise an error if the previous spec is already
# concrete (assigned by hash)
if specs[-1]._hash:
raise RedundantSpecError(specs[-1], 'dependency')
# command line deps get empty deptypes now.
# Real deptypes are assigned later per packages.
specs[-1]._add_dependency(dep, ())
else:
# If the next token can be part of a valid anonymous spec,
# create the anonymous spec
if self.next.type in (AT, ON, OFF, PCT):
# Raise an error if the previous spec is already
# concrete (assigned by hash)
if specs and specs[-1]._hash:
raise RedundantSpecError(specs[-1],
'compiler, version, '
'or variant')
specs.append(self.spec(None))
else:
self.unexpected_token()
except spack.parse.ParseError as e:
raise SpecParseError(e)
# If the spec has an os or a target and no platform, give it
# the default platform
platform_default = spack.architecture.platform().name
for spec in specs:
for s in spec.traverse():
if s.architecture and not s.architecture.platform and \
(s.architecture.platform_os or s.architecture.target):
s._set_architecture(platform=platform_default)
return specs
def parse_compiler(self, text):
self.setup(text)
return self.compiler()
def spec_by_hash(self):
self.expect(ID)
specs = spack.store.db.query()
matches = [spec for spec in specs if
spec.dag_hash()[:len(self.token.value)] == self.token.value]
if not matches:
raise NoSuchHashError(self.token.value)
if len(matches) != 1:
raise AmbiguousHashError(
"Multiple packages specify hash beginning '%s'."
% self.token.value, *matches)
return matches[0]
def spec(self, name):
"""Parse a spec out of the input. If a spec is supplied, initialize
and return it instead of creating a new one."""
if name:
spec_namespace, dot, spec_name = name.rpartition('.')
if not spec_namespace:
spec_namespace = None
self.check_identifier(spec_name)
else:
spec_namespace = None
spec_name = None
if self._initial is None:
# This will init the spec without calling Spec.__init__
spec = Spec.__new__(Spec)
else:
# this is used by Spec.__init__
spec = self._initial
self._initial = None
spec.name = spec_name
spec.versions = VersionList()
spec.variants = VariantMap(spec)
spec.architecture = None
spec.compiler = None
spec.external_path = None
spec.external_module = None
spec.compiler_flags = FlagMap(spec)
spec._dependents = DependencyMap()
spec._dependencies = DependencyMap()
spec.namespace = spec_namespace
spec._hash = None
spec._cmp_key_cache = None
spec._normal = False
spec._concrete = False
# record this so that we know whether version is
# unspecified or not.
added_version = False
while self.next:
if self.accept(AT):
vlist = self.version_list()
for version in vlist:
spec._add_version(version)
added_version = True
elif self.accept(ON):
name = self.variant()
spec.variants[name] = BoolValuedVariant(name, True)
elif self.accept(OFF):
name = self.variant()
spec.variants[name] = BoolValuedVariant(name, False)
elif self.accept(PCT):
spec._set_compiler(self.compiler())
elif self.accept(ID):
self.previous = self.token
if self.accept(EQ):
# We're adding a key-value pair to the spec
self.expect(VAL)
spec._add_flag(self.previous.value, self.token.value)
self.previous = None
else:
# We've found the start of a new spec. Go back to do_parse
# and read this token again.
self.push_tokens([self.token])
self.previous = None
break
elif self.accept(HASH):
# Get spec by hash and confirm it matches what we already have
hash_spec = self.spec_by_hash()
if hash_spec.satisfies(spec):
spec._dup(hash_spec)
break
else:
raise InvalidHashError(spec, hash_spec.dag_hash())
else:
break
# If there was no version in the spec, consier it an open range
if not added_version and not spec._hash:
spec.versions = VersionList(':')
return spec
def variant(self, name=None):
if name:
return name
else:
self.expect(ID)
self.check_identifier()
return self.token.value
def version(self):
start = None
end = None
if self.accept(ID):
start = self.token.value
if self.accept(COLON):
if self.accept(ID):
if self.next and self.next.type is EQ:
# This is a start: range followed by a key=value pair
self.push_tokens([self.token])
else:
end = self.token.value
elif start:
# No colon, but there was a version.
return Version(start)
else:
# No colon and no id: invalid version.
self.next_token_error("Invalid version specifier")
if start:
start = Version(start)
if end:
end = Version(end)
return VersionRange(start, end)
def version_list(self):
vlist = []
vlist.append(self.version())
while self.accept(COMMA):
vlist.append(self.version())
return vlist
def compiler(self):
self.expect(ID)
self.check_identifier()
compiler = CompilerSpec.__new__(CompilerSpec)
compiler.name = self.token.value
compiler.versions = VersionList()
if self.accept(AT):
vlist = self.version_list()
for version in vlist:
compiler._add_version(version)
else:
compiler.versions = VersionList(':')
return compiler
def check_identifier(self, id=None):
"""The only identifiers that can contain '.' are versions, but version
ids are context-sensitive so we have to check on a case-by-case
basis. Call this if we detect a version id where it shouldn't be.
"""
if not id:
id = self.token.value
if '.' in id:
self.last_token_error(
"{0}: Identifier cannot contain '.'".format(id))
def parse(string):
"""Returns a list of specs from an input string.
For creating one spec, see Spec() constructor.
"""
return SpecParser().parse(string)
def parse_anonymous_spec(spec_like, pkg_name):
"""Allow the user to omit the package name part of a spec if they
know what it has to be already.
e.g., provides('mpi@2', when='@1.9:') says that this package
provides MPI-3 when its version is higher than 1.9.
"""
if not isinstance(spec_like, (str, Spec)):
raise TypeError('spec must be Spec or spec string. Found %s'
% type(spec_like))
if isinstance(spec_like, str):
try:
anon_spec = Spec(spec_like)
if anon_spec.name != pkg_name:
raise SpecParseError(spack.parse.ParseError(
"",
"",
"Expected anonymous spec for package %s but found spec for"
"package %s" % (pkg_name, anon_spec.name)))
except SpecParseError:
anon_spec = Spec(pkg_name + ' ' + spec_like)
if anon_spec.name != pkg_name:
raise ValueError(
"Invalid spec for package %s: %s" % (pkg_name, spec_like))
else:
anon_spec = spec_like.copy()
if anon_spec.name != pkg_name:
raise ValueError("Spec name '%s' must match package name '%s'"
% (anon_spec.name, pkg_name))
return anon_spec
def base32_prefix_bits(hash_string, bits):
"""Return the first <bits> bits of a base32 string as an integer."""
if bits > len(hash_string) * 5:
raise ValueError("Too many bits! Requested %d bit prefix of '%s'."
% (bits, hash_string))
hash_bytes = base64.b32decode(hash_string, casefold=True)
return prefix_bits(hash_bytes, bits)
class SpecParseError(SpecError):
"""Wrapper for ParseError for when we're parsing specs."""
def __init__(self, parse_error):
super(SpecParseError, self).__init__(parse_error.message)
self.string = parse_error.string
self.pos = parse_error.pos
class DuplicateDependencyError(SpecError):
"""Raised when the same dependency occurs in a spec twice."""
class DuplicateCompilerSpecError(SpecError):
"""Raised when the same compiler occurs in a spec twice."""
class UnsupportedCompilerError(SpecError):
"""Raised when the user asks for a compiler spack doesn't know about."""
def __init__(self, compiler_name):
super(UnsupportedCompilerError, self).__init__(
"The '%s' compiler is not yet supported." % compiler_name)
class DuplicateArchitectureError(SpecError):
"""Raised when the same architecture occurs in a spec twice."""
class InconsistentSpecError(SpecError):
"""Raised when two nodes in the same spec DAG have inconsistent
constraints."""
class InvalidDependencyError(SpecError):
"""Raised when a dependency in a spec is not actually a dependency
of the package."""
class NoProviderError(SpecError):
"""Raised when there is no package that provides a particular
virtual dependency.
"""
def __init__(self, vpkg):
super(NoProviderError, self).__init__(
"No providers found for virtual package: '%s'" % vpkg)
self.vpkg = vpkg
class MultipleProviderError(SpecError):
"""Raised when there is no package that provides a particular
virtual dependency.
"""
def __init__(self, vpkg, providers):
"""Takes the name of the vpkg"""
super(MultipleProviderError, self).__init__(
"Multiple providers found for '%s': %s"
% (vpkg, [str(s) for s in providers]))
self.vpkg = vpkg
self.providers = providers
class UnsatisfiableSpecNameError(UnsatisfiableSpecError):
"""Raised when two specs aren't even for the same package."""
def __init__(self, provided, required):
super(UnsatisfiableSpecNameError, self).__init__(
provided, required, "name")
class UnsatisfiableVersionSpecError(UnsatisfiableSpecError):
"""Raised when a spec version conflicts with package constraints."""
def __init__(self, provided, required):
super(UnsatisfiableVersionSpecError, self).__init__(
provided, required, "version")
class UnsatisfiableCompilerSpecError(UnsatisfiableSpecError):
"""Raised when a spec comiler conflicts with package constraints."""
def __init__(self, provided, required):
super(UnsatisfiableCompilerSpecError, self).__init__(
provided, required, "compiler")
class UnsatisfiableCompilerFlagSpecError(UnsatisfiableSpecError):
"""Raised when a spec variant conflicts with package constraints."""
def __init__(self, provided, required):
super(UnsatisfiableCompilerFlagSpecError, self).__init__(
provided, required, "compiler_flags")
class UnsatisfiableArchitectureSpecError(UnsatisfiableSpecError):
"""Raised when a spec architecture conflicts with package constraints."""
def __init__(self, provided, required):
super(UnsatisfiableArchitectureSpecError, self).__init__(
provided, required, "architecture")
class UnsatisfiableProviderSpecError(UnsatisfiableSpecError):
"""Raised when a provider is supplied but constraints don't match
a vpkg requirement"""
def __init__(self, provided, required):
super(UnsatisfiableProviderSpecError, self).__init__(
provided, required, "provider")
# TODO: get rid of this and be more specific about particular incompatible
# dep constraints
class UnsatisfiableDependencySpecError(UnsatisfiableSpecError):
"""Raised when some dependency of constrained specs are incompatible"""
def __init__(self, provided, required):
super(UnsatisfiableDependencySpecError, self).__init__(
provided, required, "dependency")
class AmbiguousHashError(SpecError):
def __init__(self, msg, *specs):
specs_str = '\n ' + '\n '.join(spec.format('$.$@$%@+$+$=$/')
for spec in specs)
super(AmbiguousHashError, self).__init__(msg + specs_str)
class InvalidHashError(SpecError):
def __init__(self, spec, hash):
super(InvalidHashError, self).__init__(
"The spec specified by %s does not match provided spec %s"
% (hash, spec))
class NoSuchHashError(SpecError):
def __init__(self, hash):
super(NoSuchHashError, self).__init__(
"No installed spec matches the hash: '%s'"
% hash)
class RedundantSpecError(SpecError):
def __init__(self, spec, addition):
super(RedundantSpecError, self).__init__(
"Attempting to add %s to spec %s which is already concrete."
" This is likely the result of adding to a spec specified by hash."
% (addition, spec))
class ConflictsInSpecError(SpecError, RuntimeError):
def __init__(self, spec, matches):
message = 'Conflicts in concretized spec "{0}"\n'.format(
spec.short_spec
)
visited = set()
long_message = ''
match_fmt_default = '{0}. "{1}" conflicts with "{2}"\n'
match_fmt_custom = '{0}. "{1}" conflicts with "{2}" [{3}]\n'
for idx, (s, c, w, msg) in enumerate(matches):
if s not in visited:
visited.add(s)
long_message += 'List of matching conflicts for spec:\n\n'
long_message += s.tree(indent=4) + '\n'
if msg is None:
long_message += match_fmt_default.format(idx + 1, c, w)
else:
long_message += match_fmt_custom.format(idx + 1, c, w, msg)
super(ConflictsInSpecError, self).__init__(message, long_message)
| lgarren/spack | lib/spack/spack/spec.py | Python | lgpl-2.1 | 139,642 | [
"VisIt"
] | f9156a59eb9717e48b64759c05610a4b56c2fceff322ba1d7b07f5dc6414de5e |
# - Coding UTF8 -
#
# Networked Decision Making
# Development Sites (source code):
# http://code.google.com/p/global-decision-making-system/
# http://github.com/NewGlobalStrategy/NetDecisionMaking
#
# Demo Sites (Google App Engine)
# http://netdecisionmaking.appspot.com
# http://globaldecisionmaking.appspot.com
#
# License Code: MIT
# License Content: Creative Commons Attribution 3.0
#
# Also visit: www.web2py.com
# or Groups: http://groups.google.com/group/web2py
# For details on the web framework used for this development
#
# Developed by Russ King (newglobalstrategy@gmail.com
# Russ also blogs occasionally to pass the time at:
# http://proudofyourplanent.blogspot.com
# His general thinking on why this project is very important is available at
# http://www.scribd.com/doc/98216626/New-Global-Strategy
# With thanks to Guido, Massimo and many other that make this sort of thing
# much easier than it used to be
#
# search is now using the haystack plugin and generating a full text index on question text only
# which we might add a capabilty to filter by action or question
# it will allow generation of a list of up to x items to display the questions and status which I
# think may just be another method of populating the existing review function
# so looks like the in operator would work for this - but most likely search is different and we keep
# as separate function to begin with until requirements fully understood
# but by some means search will get a list of 20 items (which may be hard coded) GQL is limited to
# 30 and then go and retrieve the related ids so will always be two queries one on the full text search
# which probably needs a bit of further understanding on my part
# and then a second one to get the questions which would have pagination I think
#
# pagination to be added here at some point but currently search limit of 20 and refine search will do
# aiming to now support 3 searches and default will be a google simple search
def newsearch():
#term = request.args[0]
#term = request.vars.keyword
#fields= ['searchstring','sortorder','showscope','scope', 'continent','country',
# 'subdivision','showcat','category']
fields = ['searchstring']
form = SQLFORM(db.viewscope, fields=fields)
results = None
if form.validate():
query = indsearch.search(questiontext=form.vars.searchstring)
results = db(query).select(db.question.id, db.question.status, db.question.questiontext,
db.question.correctanstext, db.question.category, db.question.activescope,
db.question.qtype, db.question.resolvedate, db.question.createdate, db.question.priority)
count = 3
if results:
session.networklist = [x.id for x in results]
else:
session.networklist = []
#topic_search=db(db.question.id>0).select(db.question.questiontext).find(
# lambda row:row.questiontext.find(term))
#count=len(topic_search)
return dict(form=form, results=results, count=count)
def gae_simple_search():
#This will aim to replace newsearch on GAE but rather than the search returning question ids
#it will bring back the document details that are in the search system and therefore can
#avoid using belongs which currently doesn't work with NDB api
#fields= ['searchstring','sortorder','showscope','scope', 'continent','country',
# 'subdivision','showcat','category']
fields = ['searchstring']
form = SQLFORM(db.viewscope, fields=fields)
results = None
search_results = None
clean_results = []
clean_dict={}
count=3
fieldkeys = ['doc_id', 'status', 'questiontext', 'answers', 'category', 'activescope' ,'qtype', 'resolvedate', 'createdate']
for x in fieldkeys:
clean_dict[x] = ''
if form.validate():
search_results = indsearch.searchdocs(questiontext=form.vars.searchstring)
if search_results:
for doc in search_results:
doc_id = doc.doc_id
row_dict=clean_dict.copy()
row_dict['doc_id'] = doc_id[doc_id.index('.')+1:]
for field in doc.fields:
if field.name in fieldkeys:
row_dict[field.name]=field.value
clean_results.append(row_dict)
fullids = [str(doc.doc_id) for doc in search_results]
session.networklist = [docid[docid.index('.')+1:] for docid in fullids if docid.index('.') > 0]
else:
session.networklist = []
#topic_search=db(db.question.id>0).select(db.question.questiontext).find(
# lambda row:row.questiontext.find(term))
#count=len(topic_search)
return dict(form=form, search_results=search_results, count=count, clean_results=clean_results) | NewGlobalStrategy/NetDecisionMaking | controllers/search.py | Python | mit | 4,809 | [
"VisIt"
] | 99c0234379141afa9ad6900b684861ce84097acb17d29d1bba7fd16ac47a127b |
import glob
try: paraview.simple
except: from paraview.simple import *
paraview.simple._DisableFirstRenderCameraReset()
# TODO:
# 1. Set min/max value for every frame in AnimationScene
# See http://www.paraview.org/pipermail/paraview/2011-September/022682.html
#
LoadPlugin('/Users/schmitt/paraview/paraview/trunk/plugins/vtkLFMReader/build/libvtkLFMReader.dylib',ns=globals())
# Load LFM file(s)
#vtkLfmReaderObject = vtkLFMReader( FileNames=['/Users/schmitt/paraview/testData/LRs_mhd_1995-03-21T04-20-00Z.hdf'] )
#vtkLfmReaderObject = vtkLFMReader( FileNames=['/Users/schmitt/paraview/testData/doctoredAnimation/LMs_mhd_004.hdf',
# '/Users/schmitt/paraview/testData/doctoredAnimation/LMs_mhd_005.hdf',
# '/Users/schmitt/paraview/testData/doctoredAnimation/LMs_mhd_006.hdf',
# '/Users/schmitt/paraview/testData/doctoredAnimation/LMs_mhd_007.hdf',
# '/Users/schmitt/paraview/testData/doctoredAnimation/LMs_mhd_008.hdf',
# '/Users/schmitt/paraview/testData/doctoredAnimation/LMs_mhd_009.hdf',
# '/Users/schmitt/paraview/testData/doctoredAnimation/LMs_mhd_010.hdf',
# '/Users/schmitt/paraview/testData/doctoredAnimation/LMs_mhd_011.hdf',
# '/Users/schmitt/paraview/testData/doctoredAnimation/LMs_mhd_012.hdf',
# '/Users/schmitt/paraview/testData/doctoredAnimation/LMs_mhd_013.hdf'] )
#files = glob.glob('/Users/schmitt/paraview/testData/doctoredAnimation/LMs_mhd_*.hdf')
files = glob.glob('/Users/schmitt/paraview/testData/doctoredAnimation/orig/LMs_mhd_*.hdf')
files.sort()
vtkLfmReaderObject = vtkLFMReader(FileNames = files)
vtkLfmReaderObject.PointArrayStatus = []
vtkLfmReaderObject.CellArrayStatus = []
vtkLfmReaderObject.GridScaleFactor = 'Earth Radius: 6.5e8 cm'
vtkLfmReaderObject.CellArrayStatus = ['Plasma Density', 'Sound Speed', 'Velocity Vector','Magnetic Field Vector']
Show().Visibility = 0
##################
# Top-left panel #
########################################################################
# Orient the camera
TopLeftRenderView = GetRenderView()
TopLeftRenderView.CameraPosition = [-7.0, -70.0, 0.0]
TopLeftRenderView.CameraFocalPoint = [-7, 0.0, 0.0]
TopLeftRenderView.CameraViewUp = [0.0, 0.0, 1.0]
TopLeftRenderView.CameraClippingRange = [122.35967717295773, 129.70814347061219]
TopLeftRenderView.CameraParallelScale = 218.48459610631258
TopLeftRenderView.CenterOfRotation = [0.0, 0.0, 0.0]
# Add plane and map it to the dataset
XZVectorPlane = Plane()
XZVectorPlane.Origin = [-60.0, 0.0, -30.0]
XZVectorPlane.Point1 = [30.0, 0.0, -30.0]
XZVectorPlane.Point2 = [-60.0, 0.0, 30.0]
XZVectorPlane.XResolution = 20
XZVectorPlane.YResolution = 15
SetActiveSource(XZVectorPlane)
Show().Visibility = 0
# ResampleWithDataset
SetActiveSource(vtkLfmReaderObject)
XZField = ResampleWithDataset( Source=XZVectorPlane )
Show().Visibility = 0
# Render vector field
XZVectors = Glyph( GlyphType="Arrow", GlyphTransform="Transform2" )
XZVectors.SetScaleFactor = 9.0
XZVectors.Vectors = ['POINTS', 'Velocity Vector']
XZVectors.GlyphTransform = "Transform2"
XZVectors.GlyphType = "Arrow"
XZVectors.GlyphType.TipRadius = 0.04
XZVectors.GlyphType.TipLength = 0.15
XZVectors.GlyphType.ShaftRadius = 0.015
XZVectors.SetScaleFactor = 2.14564239898506e-07
DataRepresentation16 = Show()
DataRepresentation16.EdgeColor = [0.0, 0.0, 0.5019607843137255]
DataRepresentation16.ColorArrayName = ''
# XY cutplane for colormap
SetActiveSource(vtkLfmReaderObject)
XZSlice = Slice( SliceType="Plane" )
XZSlice.SliceOffsetValues = [0.0]
XZSlice.SliceType.Origin = [-151.826509475708, 0.0, 0.0]
XZSlice.SliceType = "Plane"
XZSlice.SliceType.Normal = [0.0, 1.0, 0.0]
# Calculator for pressure
Pressure = Calculator()
Pressure.AttributeMode = 'point_data'
Pressure.Function = 'Plasma Density*4.7619e23*Sound Speed*Sound Speed*3.75e8'
Pressure.ResultArrayName = 'Pressure'
PressureRepresentation = Show()
PressureRepresentation.EdgeColor = [0.0, 0.0, 0.5000076295109483]
PressureRepresentation.ColorArrayName = 'Pressure'
a1_Pressure_PVLookupTable = GetLookupTableForArray( "Pressure", 1, NanColor=[0.498039, 0.498039, 0.498039], RGBPoints=[7.232339585875363e+19, 0.0, 0.0, 1.0, 3.964840999531023e+24, 1.0, 0.0, 0.0], VectorMode='Magnitude', ColorSpace='HSV', ScalarRangeInitialized=1.0 )
a1_Pressure_PiecewiseFunction = CreatePiecewiseFunction()
PressureRepresentation.LookupTable = a1_Pressure_PVLookupTable
ScalarBarWidgetLog10Pressure = CreateScalarBar( Orientation='Horizontal', Title='Pressure', Position2=[0.5, 0.15], LabelFontSize=12, Enabled=1, TitleFontSize=12, Position=[0.25,0.85] )
TopLeftRenderView.Representations.append(ScalarBarWidgetLog10Pressure)
a1_Pressure_PVLookupTable = GetLookupTableForArray( "Pressure", 1, UseLogScale=1, RGBPoints=[1e+22, 0.0, 0.0, 1.0, 3.96484e+24, 1.0, 0.0, 0.0], LockScalarRange=1 )
TopLeftRenderView.CameraClippingRange = [119.96970760320372, 132.85099018726737]
ScalarBarWidgetLog10Pressure.LookupTable = a1_Pressure_PVLookupTable
# Describe the view
minValue = a1_Pressure_PVLookupTable.RGBPoints[0]
maxValue = a1_Pressure_PVLookupTable.RGBPoints[4]
TopLeftText = Text()
TopLeftText.Text = 'XZ (min=%e max=%e)' % (minValue, maxValue)
TextRep = GetDisplayProperties(TopLeftText)
TextRep.Visibility = 1
###################
# Top-Right panel #
########################################################################
TopRightRenderView = CreateRenderView()
#TopRightRenderView.CameraPosition = [-9.54128751659703, -1.5694684006493071, 150.56293391130203]
TopRightRenderView.CameraPosition = [-7, 0.0, 70]
#TopRightRenderView.CameraFocalPoint = [-9.54128751659703, -1.5694684006493071, 0.0]
TopRightRenderView.CameraFocalPoint = [-7, 0.0, 0.0]
TopRightRenderView.CameraViewUp = [0.0, 1.0, 0.0]
TopRightRenderView.CompressorConfig = 'vtkSquirtCompressor 0 3'
TopRightRenderView.UseLight = 1
TopRightRenderView.LightSwitch = 0
TopRightRenderView.RemoteRenderThreshold = 3.0
TopRightRenderView.ViewTime = 0.0
TopRightRenderView.Background = [0.31999694819562063, 0.3400015259021897, 0.4299992370489052]
TopRightRenderView.CenterAxesVisibility = 0
TopRightRenderView.CenterOfRotation = [0.0, 0.0, 0.0]
TopRightRenderView.CameraParallelScale = 317.214749894812
TopRightRenderView.CameraClippingRange = [149.05730362328296, 152.8213791144487]
# XY Cutplane
SetActiveSource(vtkLfmReaderObject)
# Subtract Dipole
BzMinusDipole = Calculator()
BzMinusDipole.AttributeMode = 'point_data'
BzMinusDipole.Function = '(Magnetic Field Vector_Z*1e5)+(3.05e4*((coordsX^2+coordsY^2+coordsZ^2)^(-1.5))*(2-(3*(coordsX^2+coordsY^2))/(coordsX^2+coordsY^2+coordsZ^2)))'
BzMinusDipole.ResultArrayName = 'Bz-Dipole'
BzNoDipole = Slice( SliceType="Plane" )
BzNoDipole.SliceOffsetValues = [0.0]
BzNoDipole.SliceType.Origin = [-151.826509475708, 0.0, 0.0]
BzNoDipole.SliceType = "Plane"
BzNoDipole.SliceType.Normal = [0.0, 0.0, 1.0]
DataRepresentation22 = Show()
DataRepresentation22.EdgeColor = [0.0, 0.0, 0.5000076295109483]
DataRepresentation22.EdgeColor = [0.0, 0.0, 0.5019607843137255]
a1_BzDipole_PVLookupTable = GetLookupTableForArray( "Bz-Dipole", 1, RGBPoints=[-20.0, 0.0, 0.0, 1.0, 20.0, 1.0, 0.0, 0.0], VectorMode='Magnitude', NanColor=[0.498039, 0.498039, 0.498039], ColorSpace='HSV', ScalarRangeInitialized=1.0, LockScalarRange=1 )
a1_BzDipole_PiecewiseFunction = CreatePiecewiseFunction()
DataRepresentation22.ColorArrayName = 'Bz-Dipole'
DataRepresentation22.LookupTable = a1_BzDipole_PVLookupTable
ScalarBarWidgetBzNoDipole = CreateScalarBar( Orientation='Horizontal',Title='Bz-Dipole', LabelFontSize=12,Position2=[0.5, 0.15], Enabled=1, TitleFontSize=12,Position=[0.25,0.85] )
TopRightRenderView.Representations.append(ScalarBarWidgetBzNoDipole)
a1_BzNoDip_PVLookupTable = GetLookupTableForArray( "Bz-Dipole", 1, UseLogScale=1 )
ScalarBarWidgetBzNoDipole.LookupTable = a1_BzNoDip_PVLookupTable
# Describe the view
minValue = a1_BzNoDip_PVLookupTable.RGBPoints[0]
maxValue = a1_BzNoDip_PVLookupTable.RGBPoints[4]
TopRightText = Text()
TopRightText.Text = 'XY (min=%e max=%e)' % (minValue, maxValue)
TextRep = GetDisplayProperties(TopRightText)
TextRep.Visibility = 1
#####################
# Bottom-left panel #
########################################################################
SetActiveView(TopLeftRenderView)
BotLeftRenderView = CreateRenderView()
#BotLeftRenderView.CameraPosition = [0.0, 0.0, 116.77367590722402]
#BotLeftRenderView.CameraFocalPoint = [-7, 0.0, 0.0]
#BotLeftRenderView.CameraViewUp = [0.0, 0.0, 1.0]
BotLeftRenderView.CameraPosition = [-7, 0.0, 70]
BotLeftRenderView.CameraFocalPoint = [-7, 0.0, 0.0]
BotLeftRenderView.CameraViewUp = [0.0, 1.0, 0.0]
BotLeftRenderView.CameraParallelScale = 1.7320508075688772
BotLeftRenderView.CompressorConfig = 'vtkSquirtCompressor 0 3'
BotLeftRenderView.UseLight = 1
BotLeftRenderView.LightSwitch = 0
BotLeftRenderView.RemoteRenderThreshold = 3.0
BotLeftRenderView.CameraClippingRange = [111.82555065103759, 126.028886930742]
BotLeftRenderView.LODResolution = 50.0
BotLeftRenderView.Background = [0.31999694819562063, 0.3400015259021897, 0.4299992370489052]
BotLeftRenderView.CenterAxesVisibility = 0
BotLeftRenderView.CenterOfRotation = [0.0, 0.0, 0.0]
# Add plane and map it to the dataset
XYVectorPlane = Plane()
XYVectorPlane.Origin = [-60.0, -30.0, 0.0]
XYVectorPlane.Point1 = [30.0, -30.0, 0.0]
XYVectorPlane.Point2 = [-60.0, 30.0, 0.0]
XYVectorPlane.XResolution = 20
XYVectorPlane.YResolution = 15
SetActiveSource(XYVectorPlane)
Show().Visibility = 0
RenameSource("XY Vector Plane", XYVectorPlane)
# ResampleWithDataset
SetActiveSource(vtkLfmReaderObject)
XYField = ResampleWithDataset( Source=XYVectorPlane )
Show().Visibility = 0
# Render vector field
XYVectors = Glyph( GlyphType="Arrow", GlyphTransform="Transform2" )
XYVectors.SetScaleFactor = 9.0
XYVectors.Vectors = ['POINTS', 'Velocity Vector']
XYVectors.GlyphTransform = "Transform2"
XYVectors.GlyphType = "Arrow"
XYVectors.GlyphType.TipRadius = 0.04
XYVectors.GlyphType.TipLength = 0.15
XYVectors.GlyphType.ShaftRadius = 0.015
XYVectors.SetScaleFactor = 2.14564239898506e-07
DataRepresentation16 = Show()
DataRepresentation16.EdgeColor = [0.0, 0.0, 0.5019607843137255]
DataRepresentation16.ColorArrayName = ''
# XY cutplane for colormap
SetActiveSource(vtkLfmReaderObject)
XYSlice = Slice( SliceType="Plane" )
XYSlice.SliceOffsetValues = [0.0]
XYSlice.SliceType.Origin = [-151.826509475708, 0.0, 0.0]
XYSlice.SliceType = "Plane"
XYSlice.SliceType.Normal = [0.0, 0.0, 1.0]
# Calculator for pressure
Pressure = Calculator()
Pressure.AttributeMode = 'point_data'
Pressure.Function = 'Plasma Density*4.7619e23*Sound Speed*Sound Speed*3.75e8'
Pressure.ResultArrayName = 'Pressure'
PressureRepresentation = Show()
PressureRepresentation.EdgeColor = [0.0, 0.0, 0.5000076295109483]
PressureRepresentation.ColorArrayName = 'Pressure'
a1_Pressure_PVLookupTable = GetLookupTableForArray( "Pressure", 1, NanColor=[0.498039, 0.498039, 0.498039], RGBPoints=[7.232339585875363e+19, 0.0, 0.0, 1.0, 3.964840999531023e+24, 1.0, 0.0, 0.0], VectorMode='Magnitude', ColorSpace='HSV', ScalarRangeInitialized=1.0 )
a1_Pressure_PiecewiseFunction = CreatePiecewiseFunction()
PressureRepresentation.LookupTable = a1_Pressure_PVLookupTable
ScalarBarWidgetLog10Pressure = CreateScalarBar( Orientation='Horizontal', Title='Pressure', Position2=[0.5, 0.15],LabelFontSize=12, Enabled=1, TitleFontSize=12,Position=[0.25,0.85] )
BotLeftRenderView.Representations.append(ScalarBarWidgetLog10Pressure)
a1_Pressure_PVLookupTable = GetLookupTableForArray( "Pressure", 1, UseLogScale=1,RGBPoints=[1e+22, 0.0, 0.0, 1.0, 3.96484e+24, 1.0, 0.0, 0.0], LockScalarRange=1 )
ScalarBarWidgetLog10Pressure.LookupTable = a1_Pressure_PVLookupTable
# Describe the view
minValue = a1_Pressure_PVLookupTable.RGBPoints[0]
maxValue = a1_Pressure_PVLookupTable.RGBPoints[4]
BotLeftText = Text()
BotLeftText.Text = 'XY (min=%e max=%e)' % (minValue, maxValue)
TextRep = GetDisplayProperties(BotLeftText)
TextRep.Visibility = 1
######################
# Bottom-Right panel #
########################################################################
SetActiveView(TopRightRenderView)
BotRightRenderView = CreateRenderView()
#BotRightRenderView.CameraPosition = [-8.45319037422091, 0.7965184288563187, 127.82383156323988]
#BotRightRenderView.CameraFocalPoint = [-8.45319037422091, 0.7965184288563187, 0.0]
BotRightRenderView.CameraPosition = [-7, 0.0, 70]
BotRightRenderView.CameraFocalPoint = [-7, 0.0, 0.0]
BotRightRenderView.CameraViewUp = [0.0, 1.0, 0.0]
BotRightRenderView.CompressorConfig = 'vtkSquirtCompressor 0 3'
BotRightRenderView.UseLight = 1
BotRightRenderView.LightSwitch = 0
BotRightRenderView.RemoteRenderThreshold = 3.0
BotRightRenderView.LODResolution = 50.0
BotRightRenderView.Background = [0.31999694819562063, 0.3400015259021897, 0.4299992370489052]
BotRightRenderView.CenterAxesVisibility = 0
BotRightRenderView.CameraClippingRange = [126.54559229870145, 129.7411902311656]
BotRightRenderView.CenterOfRotation = [0.0, 0.0, 0.0]
BotRightRenderView.CameraParallelScale = 325.86109001049476
SetActiveSource(vtkLfmReaderObject)
# XY Cutplane
Slice5 = Slice( SliceType="Plane" )
Slice5.SliceOffsetValues = [0.0]
Slice5.SliceType.Origin = [-151.826509475708, 0.0, 0.0]
Slice5.SliceType = "Plane"
Slice5.SliceType.Normal = [0.0, 0.0, 1.0]
DataRepresentation23 = Show()
DataRepresentation23.EdgeColor = [0.0, 0.0, 0.5000076295109483]
a3_VelocityVector_PVLookupTable = GetLookupTableForArray( "Velocity Vector", 3, NanColor=[0.498039, 0.498039, 0.498039], RGBPoints=[6236.560207233221, 0.0, 0.0, 1.0, 59331831.819066755, 1.0, 0.0, 0.0], VectorMode='Magnitude', ColorSpace='HSV', ScalarRangeInitialized=1.0 )
a3_VelocityVector_PiecewiseFunction = CreatePiecewiseFunction()
DataRepresentation23.ColorArrayName = 'Velocity Vector'
DataRepresentation23.LookupTable = a3_VelocityVector_PVLookupTable
ScalarBarWidgetVelocity = CreateScalarBar( ComponentTitle='Magnitude', Orientation='Horizontal', Title='Velocity Vector', Position2=[0.5, 0.15], Enabled=1, LabelFontSize=12, TitleFontSize=12,Position=[0.25,0.85] )
BotRightRenderView.Representations.append(ScalarBarWidgetVelocity)
a3_VelocityVector_PVLookupTable = GetLookupTableForArray( "Velocity Vector", 3, RGBPoints=[0.0, 0.0, 0.0, 1.0, 50000000.0, 1.0, 0.0, 0.0], LockScalarRange=1 )
ScalarBarWidgetVelocity.LookupTable = a3_VelocityVector_PVLookupTable
# Describe the view
minValue = a3_VelocityVector_PVLookupTable.RGBPoints[0]
maxValue = a3_VelocityVector_PVLookupTable.RGBPoints[4]
BotRightText = Text()
BotRightText.Text = 'XY (min=%e max=%e)' % (minValue, maxValue)
TextRep = GetDisplayProperties(BotRightText)
TextRep.Visibility = 1
#################################
# Global visualization settings #
########################################################################
AnimationScene = GetAnimationScene()
AnimationScene.ViewModules = [ TopLeftRenderView, TopRightRenderView, BotLeftRenderView, BotRightRenderView ]
#WriteAnimation('/Users/schmitt/paraview/scripts/testAnimation.jpg', Magnification=1, Quality=2, FrameRate=1.000000)
Render()
#WriteImage('/Users/schmitt/paraview/scripts/LRs_mhd_1995-03-21T04-20-00Z.png')
#### Animate from 1st time step to last
###AnimationScene.StartTime = vtkLfmReaderObject.TimestepValues.GetData()[0]
###AnimationScene.EndTime = vtkLfmReaderObject.TimestepValues.GetData()[-1]
###
###for idx, cur_time in enumerate(vtkLfmReaderObject.TimestepValues.GetData()):
### AnimationScene.AnimationTime = cur_time
### vtkLfmReaderObject.UpdatePipelineInformation()
###
### WriteImage("testAnimation_topLeft_%03d.png" % idx, TopLeftRenderView);
### #WriteImage("testAnimation_topright_%03d.png" % idx, TopRightRenderView);
### #WriteImage("testAnimation_botLeft_%03d.png" % idx, BotLeftRenderView);
### #WriteImage("testAnimation_botRight_%03d.png" % idx, BotRightRenderView);
| ghost-kit/archive | scripts/LFM_mhd_ParaView.py | Python | bsd-3-clause | 16,165 | [
"ParaView"
] | 7b9f71d2334e688ccfb3135b79133b399381686fe8aedcf60e69b681615f0338 |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# set up espressopp basics
from espressopp.main._setup import *
# load espressopp into PMI
pmiimport('espressopp')
import _espressopp
from espressopp.Exceptions import *
from espressopp.Real3D import *
from espressopp.Quaternion import *
from espressopp.RealND import *
from espressopp.Tensor import *
from espressopp.Int3D import *
from espressopp.Particle import *
from espressopp.ParticleGroup import *
from espressopp.System import *
from espressopp.VerletList import *
from espressopp.VerletListTriple import *
from espressopp.VerletListAdress import *
from espressopp.FixedSingleList import *
from espressopp.FixedPairList import *
from espressopp.FixedPairDistList import *
from espressopp.FixedPairListAdress import *
from espressopp.FixedTripleList import *
from espressopp.FixedTripleAngleList import *
from espressopp.FixedTripleListAdress import *
from espressopp.FixedQuadrupleList import *
from espressopp.FixedQuadrupleListAdress import *
from espressopp.FixedQuadrupleAngleList import *
from espressopp.FixedTupleList import *
from espressopp.FixedTupleListAdress import *
from espressopp.FixedLocalTupleList import *
from espressopp.MultiSystem import *
from espressopp.ParallelTempering import *
from espressopp.Version import *
from espressopp.PLogger import *
infinity=float("inf")
nan=float("nan")
auto='auto'
# fetch the different subpackages
from espressopp import esutil, bc, storage, integrator, interaction, analysis, tools, standard_system, external, check, io
if pmi.isController :
# make sure that the workers exit when the script ends
pmi.registerAtExit()
# the script continues after this call
else :
pmi.startWorkerLoop()
# the script will usually not reach this point on the workers
| MrTheodor/espressopp | src/__init__.py | Python | gpl-3.0 | 2,605 | [
"ESPResSo"
] | 46bdd8e6bd54e2ca7b28f9f957f2fac735f9c7a4e379f21570244d8d32e46d68 |
# Audio Tools, a module and set of tools for manipulating audio data
# Copyright (C) 2007-2016 Brian Langenberger
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from audiotools import MetaData
class ID3v1Comment(MetaData):
"""a complete ID3v1.1 tag"""
ID3v1_FIELDS = {"track_name": "__track_name__",
"artist_name": "__artist_name__",
"album_name": "__album_name__",
"year": "__year__",
"comment": "__comment__",
"track_number": "__track_number__"}
FIELD_LENGTHS = {"track_name": 30,
"artist_name": 30,
"album_name": 30,
"year": 4,
"comment": 28}
def __init__(self, track_name=u"",
artist_name=u"",
album_name=u"",
year=u"",
comment=u"",
track_number=0,
genre=0):
"""fields are as follows:
| field | length |
|--------------+--------|
| track_name | 30 |
| artist_name | 30 |
| album_name | 30 |
| year | 4 |
| comment | 28 |
| track_number | 1 |
| genre | 1 |
|--------------+--------|
track_name, artist_name, album_name, year and
comment are unicode strings
track_number and genre are integers
"""
if len(track_name) > 30:
raise ValueError("track_name cannot be longer than 30 characters")
if len(artist_name) > 30:
raise ValueError("artist_name cannot be longer than 30 characters")
if len(album_name) > 30:
raise ValueError("album_name cannot be longer than 30 characters")
if len(year) > 4:
raise ValueError("year cannot be longer than 4 characters")
if len(comment) > 28:
raise ValueError("comment cannot be longer than 28 characters")
MetaData.__setattr__(self, "__track_name__", track_name)
MetaData.__setattr__(self, "__artist_name__", artist_name)
MetaData.__setattr__(self, "__album_name__", album_name)
MetaData.__setattr__(self, "__year__", year)
MetaData.__setattr__(self, "__comment__", comment)
MetaData.__setattr__(self, "__track_number__", track_number)
MetaData.__setattr__(self, "__genre__", genre)
def __repr__(self):
return "ID3v1Comment({!r},{!r},{!r},{!r},{!r},{!r},{!r})".format(
self.__track_name__,
self.__artist_name__,
self.__album_name__,
self.__year__,
self.__comment__,
self.__track_number__,
self.__genre__)
def __getattr__(self, attr):
if attr == "track_number":
number = self.__track_number__
if number > 0:
return number
else:
return None
elif attr in self.ID3v1_FIELDS:
value = getattr(self, self.ID3v1_FIELDS[attr])
if len(value) > 0:
return value
else:
return None
elif attr in self.FIELDS:
return None
else:
return MetaData.__getattribute__(self, attr)
def __setattr__(self, attr, value):
if attr == "track_number":
MetaData.__setattr__(
self,
"__track_number__",
min(0 if (value is None) else int(value), 0xFF))
elif attr in self.FIELD_LENGTHS:
if value is None:
delattr(self, attr)
else:
# all are text fields
MetaData.__setattr__(
self,
self.ID3v1_FIELDS[attr],
value[0:self.FIELD_LENGTHS[attr]])
elif attr in self.FIELDS:
# field not supported by ID3v1Comment, so ignore it
pass
else:
MetaData.__setattr__(self, attr, value)
def __delattr__(self, attr):
if attr == "track_number":
MetaData.__setattr__(self, "__track_number__", 0)
elif attr in self.FIELD_LENGTHS:
MetaData.__setattr__(self,
self.ID3v1_FIELDS[attr],
u"")
elif attr in self.FIELDS:
# field not supported by ID3v1Comment, so ignore it
pass
else:
MetaData.__delattr__(self, attr)
def raw_info(self):
"""returns a human-readable version of this metadata
as a unicode string"""
from os import linesep
return linesep.join(
[u"ID3v1.1:"] +
[u"{} = {}".format(label, getattr(self, attr))
for (label, attr) in [(u" track name", "track_name"),
(u" artist name", "artist_name"),
(u" album name", "album_name"),
(u" year", "year"),
(u" comment", "comment"),
(u"track number", "track_number")]
if (getattr(self, attr) is not None)] +
[u" genre = {:d}".format(self.__genre__)])
@classmethod
def parse(cls, mp3_file):
"""given an MP3 file, returns an ID3v1Comment
raises ValueError if the comment is invalid"""
from audiotools.bitstream import parse
def decode_string(s):
return s.rstrip(b"\x00").decode("ascii", "replace")
mp3_file.seek(-128, 2)
(tag,
track_name,
artist_name,
album_name,
year,
comment,
track_number,
genre) = parse("3b 30b 30b 30b 4b 28b 8p 8u 8u",
False,
mp3_file.read(128))
if tag != b'TAG':
raise ValueError(u"invalid ID3v1 tag")
return ID3v1Comment(track_name=decode_string(track_name),
artist_name=decode_string(artist_name),
album_name=decode_string(album_name),
year=decode_string(year),
comment=decode_string(comment),
track_number=track_number,
genre=genre)
def build(self, mp3_file):
"""given an MP3 file positioned at the file's end, generate a tag"""
from audiotools.bitstream import build
def encode_string(u, max_chars):
s = u.encode("ascii", "replace")
if len(s) >= max_chars:
return s[0:max_chars]
else:
return s + b"\x00" * (max_chars - len(s))
mp3_file.write(
build("3b 30b 30b 30b 4b 28b 8p 8u 8u",
False,
(b"TAG",
encode_string(self.__track_name__, 30),
encode_string(self.__artist_name__, 30),
encode_string(self.__album_name__, 30),
encode_string(self.__year__, 4),
encode_string(self.__comment__, 28),
self.__track_number__,
self.__genre__)))
@classmethod
def supports_images(cls):
"""returns False"""
return False
@classmethod
def converted(cls, metadata):
"""converts a MetaData object to an ID3v1Comment object"""
if metadata is None:
return None
elif isinstance(metadata, ID3v1Comment):
# duplicate all fields as-is
return ID3v1Comment(track_name=metadata.__track_name__,
artist_name=metadata.__artist_name__,
album_name=metadata.__album_name__,
year=metadata.__year__,
comment=metadata.__comment__,
track_number=metadata.__track_number__,
genre=metadata.__genre__)
else:
# convert fields using setattr
id3v1 = ID3v1Comment()
for attr in ["track_name",
"artist_name",
"album_name",
"year",
"comment",
"track_number"]:
setattr(id3v1, attr, getattr(metadata, attr))
return id3v1
def images(self):
"""returns an empty list of Image objects"""
return []
def clean(self):
import sys
"""returns a new ID3v1Comment object that's been cleaned of problems"""
from audiotools.text import (CLEAN_REMOVE_TRAILING_WHITESPACE,
CLEAN_REMOVE_LEADING_WHITESPACE)
fixes_performed = []
fields = {}
for (attr,
name) in [("track_name", u"title"),
("artist_name", u"artist"),
("album_name", u"album"),
("year", u"year"),
("comment", u"comment")]:
# strip out trailing NULL bytes
initial_value = getattr(self, attr)
if initial_value is not None:
fix1 = initial_value.rstrip()
if fix1 != initial_value:
fixes_performed.append(
CLEAN_REMOVE_TRAILING_WHITESPACE.format(name))
fix2 = fix1.lstrip()
if fix2 != fix1:
fixes_performed.append(
CLEAN_REMOVE_LEADING_WHITESPACE.format(name))
# restore trailing NULL bytes
fields[attr] = fix2
# copy non-text fields as-is
return (ID3v1Comment(track_number=self.__track_number__,
genre=self.__genre__,
**fields), fixes_performed)
def intersection(self, metadata):
"""given a MetaData-compatible object,
returns a new MetaData object which contains
all the matching fields and images of this object and 'metadata'
"""
if type(metadata) is ID3v1Comment:
return ID3v1Comment(
genre=(self.__genre__ if
self.__genre__ == metadata.__genre__ else 0),
**{arg: getattr(self, field)
for arg, field in ID3v1Comment.ID3v1_FIELDS.items()
if getattr(self, field) == getattr(metadata, field)})
else:
return MetaData.intersection(self, metadata)
| tuffy/python-audio-tools | audiotools/id3v1.py | Python | gpl-2.0 | 11,355 | [
"Brian"
] | 45c93c586873470dbc111b1c30ce8aec4d08407d74ceaf219482bf46be1ec536 |
# -*- coding: utf-8 -*-
"""
End-to-end tests for survey of biz feature
"""
import codecs
import csv
import os
import shutil
from bok_choy.web_app_test import WebAppTest
from django.utils.crypto import get_random_string
from nose.plugins.attrib import attr
from common.test.acceptance.pages.biz.ga_contract import BizContractPage
from common.test.acceptance.pages.biz.ga_navigation import BizNavPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.course_nav import CourseNavPage
from common.test.acceptance.pages.lms.ga_instructor_dashboard import InstructorDashboardPage
from common.test.acceptance.pages.lms.ga_survey import SurveyPage
from common.test.acceptance.tests.biz import PLATFORMER_USER_INFO, \
GaccoBizTestMixin, A_DIRECTOR_USER_INFO, A_COMPANY, A_COMPANY_NAME, SUPER_USER_INFO
from ..helpers import load_data_str
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ...pages.lms.ga_django_admin import DjangoAdminPage
DOWNLOAD_DIR = '/tmp'
@attr('shard_ga_biz_1')
class BizSurveyTest(WebAppTest, GaccoBizTestMixin):
"""
Tests that the survey functionality of biz works
"""
def setUp(self):
super(BizSurveyTest, self).setUp()
# Install course
self.course = CourseFixture('plat', self._testMethodName, 'biz_test_run', 'Biz Course ' + self._testMethodName)
self.course.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('html', 'Test HTML', data=load_data_str('ga_survey.html'))
)
)
)
).install()
# Register a contract for A company
self.switch_to_user(PLATFORMER_USER_INFO)
self.contract = self.create_contract(BizContractPage(self.browser).visit(), 'PF', '2016/01/01',
'2100/01/01', contractor_organization=A_COMPANY,
detail_info=[self.course._course_key])
# Change login user and answer survey
acom_employees = [self.register_user(), self.register_user()]
self.answers = [
['1', '2', 'yes', u'意見 by {}_{}'.format(self._testMethodName, acom_employees[0]['username']),
acom_employees[0]['username']],
['2', '1', 'no', u'意見 by {}_{}'.format(self._testMethodName, acom_employees[1]['username']),
acom_employees[1]['username']]
]
self._answer_survey(acom_employees[0], self.answers[0])
self._answer_survey(acom_employees[1], self.answers[1])
def test_survey_as_director(self):
"""
Tests that director of contractor can download survey.
"""
self.switch_to_user(A_DIRECTOR_USER_INFO)
BizNavPage(self.browser).visit().change_role(A_COMPANY, self.contract['Contract Name'], self.course._course_key)\
.click_survey().check_encoding_utf8(False).click_download_button()
self._verify_csv_answers(self.answers, 'utf16')
def test_survey_as_director_utf8(self):
"""
Tests that director of contractor can download survey.
"""
self.switch_to_user(A_DIRECTOR_USER_INFO)
BizNavPage(self.browser).visit().change_role(A_COMPANY, self.contract['Contract Name'], self.course._course_key)\
.click_survey().check_encoding_utf8(True).click_download_button()
self._verify_csv_answers(self.answers, 'utf8')
def test_survey_as_staff(self):
"""
Tests that staff of platfomer can download survey.
"""
self.switch_to_user(SUPER_USER_INFO)
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course._course_key).visit()
survey_page_section = instructor_dashboard_page.select_survey()
survey_page_section.check_encoding_utf8(False).click_download_button()
self._verify_csv_answers(self.answers, 'utf16')
def test_survey_as_staff_utf8(self):
"""
Tests that staff of platfomer can download survey.
"""
self.switch_to_user(SUPER_USER_INFO)
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course._course_key).visit()
survey_page_section = instructor_dashboard_page.select_survey()
survey_page_section.check_encoding_utf8(True).click_download_button()
self._verify_csv_answers(self.answers, 'utf8')
def test_utf8_checkbox_is_saved_as_biz(self):
"""
Test that the value of the checkbox is saved as biz.
"""
# checked
self.switch_to_user(A_DIRECTOR_USER_INFO)
bizSurveyPage = BizNavPage(self.browser).visit().change_role(A_COMPANY, self.contract['Contract Name'],
self.course._course_key).click_survey()
bizSurveyPage.check_encoding_utf8(True).click_download_button()
bizSurveyPage = BizNavPage(self.browser).visit().change_role(A_COMPANY, self.contract['Contract Name'],
self.course._course_key).click_survey()
self.assertTrue(bizSurveyPage.is_encoding_utf8_selected())
# unchecked
bizSurveyPage.check_encoding_utf8(False).click_download_button()
bizSurveyPage = BizNavPage(self.browser).visit().change_role(A_COMPANY, self.contract['Contract Name'],
self.course._course_key).click_survey()
self.assertFalse(bizSurveyPage.is_encoding_utf8_selected())
def test_utf8_checkbox_is_saved_as_instructor(self):
"""
Test that the value of the checkbox is saved as instructor.
"""
# checked
self.switch_to_user(SUPER_USER_INFO)
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course._course_key).visit()
survey_page_section = instructor_dashboard_page.select_survey()
survey_page_section.check_encoding_utf8(True).click_download_button()
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course._course_key).visit()
survey_page_section = instructor_dashboard_page.select_survey()
self.assertTrue(survey_page_section.is_encoding_utf8_selected())
# unchecked
survey_page_section.check_encoding_utf8(False).click_download_button()
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course._course_key).visit()
survey_page_section = instructor_dashboard_page.select_survey()
self.assertFalse(survey_page_section.is_encoding_utf8_selected())
def _answer_survey(self, login_user, answer):
"""
Answer survey
"""
# Register invitation as manager of A company
self.switch_to_user(login_user)
self.register_invitation(self.contract['Invitation Code'], ['Marketing'])
# Visit survey pagex
CoursewarePage(self.browser, self.course._course_key).visit()
CourseNavPage(self.browser).go_to_section('Test Section', 'Test Subsection')
survey_page = SurveyPage(self.browser).wait_for_page()
self.assertTrue(survey_page.is_submit_button_enabled())
# Submit
survey_page.fill_item('Q1', answer[0])
survey_page.fill_item('Q2', answer[1])
survey_page.fill_item('Q3', answer[2])
survey_page.fill_item('Q4', answer[3])
survey_page.submit()
# Verify message
self.assertIn(u"ご回答ありがとうございました。", survey_page.wait_for_messages())
self.assertFalse(survey_page.is_submit_button_enabled())
def _verify_csv_answers(self, expect_data, encoding):
"""
Verify csv file.
"""
# Get csv file. (Content_type is 'text/tab-separated-values')
tmp_file = max(
[os.path.join(DOWNLOAD_DIR, f) for f in os.listdir(DOWNLOAD_DIR) if f.count('.tsv')],
key=os.path.getctime)
csv_file = os.path.join(os.environ.get('SELENIUM_DRIVER_LOG_DIR', ''), self._testMethodName + '.csv')
shutil.move(os.path.join(DOWNLOAD_DIR, tmp_file), csv_file)
# Read csv
with codecs.open(csv_file, encoding=encoding) as f:
reader = csv.DictReader([row.encode('utf8') for row in f], delimiter="\t")
csv_data = [[row.get('Q1').decode('utf8'), row.get('Q2').decode('utf8'), row.get('Q3').decode('utf8'),
row.get('Q4').decode('utf8'), row.get('User Name').decode('utf8')] for row in reader]
self.assertEqual(csv_data, expect_data)
@attr('shard_ga_biz_3')
class LoginCodeEnabledBizSurveyTest(WebAppTest, GaccoBizTestMixin):
"""
Tests that the login code enabled survey functionality of biz works
"""
def setUp(self):
super(LoginCodeEnabledBizSurveyTest, self).setUp()
# Install course
self.course = CourseFixture('plat', self._testMethodName, 'biz_test_run', 'Biz Course ' + self._testMethodName)
self.course.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('html', 'Test HTML', data=load_data_str('ga_survey.html'))
)
)
)
).install()
# Register a contract for A company
self.switch_to_user(PLATFORMER_USER_INFO)
self.contract = self.create_contract(BizContractPage(self.browser).visit(), 'PF', '2016/01/01',
'2100/01/01', contractor_organization=A_COMPANY,
detail_info=[self.course._course_key])
# Make contract auth
self.django_admin_page = DjangoAdminPage(self.browser)
self.new_url_code = get_random_string(8)
self.switch_to_user(SUPER_USER_INFO)
django_admin_add_page = self.django_admin_page.visit().click_add('ga_contract', 'contractauth')
django_admin_list_page = django_admin_add_page.input({
'contract': self.contract['Contract Name'],
'url_code': self.new_url_code,
'send_mail': True,
}).save()
# Register user as director
self.new_director = self.register_user(course_id=self.course._course_key)
self.grant(PLATFORMER_USER_INFO,A_COMPANY_NAME, 'director', self.new_director)
# Make acom employees
acom_employees = [self.new_user_info for _ in range(2)]
for acom_employee in acom_employees:
acom_employee['login_code'] = 'logincode_' + get_random_string(8)
self.switch_to_user(self.new_director)
biz_register_students_page = BizNavPage(self.browser).visit().change_role(A_COMPANY, self.contract['Contract Name'],
self.course._course_key).click_register_students().click_tab_one_register_student()
biz_register_students_page.input_one_user_info_auth(acom_employees[0]).click_one_register_button().click_popup_yes()
biz_register_students_page.input_one_user_info_auth(acom_employees[1]).click_one_register_button().click_popup_yes()
biz_register_students_page.wait_for_message(
u'Began the processing of Student Member Register.Execution status, please check from the task history.'
)
# Change login user and answer survey
self.answers = [
['1', '2', 'yes', u'意見 by {}_{}'.format(self._testMethodName, acom_employees[0]['username']),
acom_employees[0]['username']],
['2', '1', 'no', u'意見 by {}_{}'.format(self._testMethodName, acom_employees[1]['username']),
acom_employees[1]['username']]
]
self._answer_survey(acom_employees[0], self.answers[0])
self._answer_survey(acom_employees[1], self.answers[1])
# Make expect datas of survey csv
self.expect_datas_as_director = [
[acom_employees[0]['login_code'], '1', '2', 'yes', u'意見 by {}_{}'.format(self._testMethodName, acom_employees[0]['username']),
acom_employees[0]['username']],
[acom_employees[1]['login_code'], '2', '1', 'no', u'意見 by {}_{}'.format(self._testMethodName, acom_employees[1]['username']),
acom_employees[1]['username']]
]
self.expect_datas_as_staff = [
['1', '2', 'yes', u'意見 by {}_{}'.format(self._testMethodName, acom_employees[0]['username']),
acom_employees[0]['username']],
['2', '1', 'no', u'意見 by {}_{}'.format(self._testMethodName, acom_employees[1]['username']),
acom_employees[1]['username']]
]
def test_survey_as_director(self):
"""
Tests that director of contractor can download survey.
"""
self.switch_to_user(A_DIRECTOR_USER_INFO)
BizNavPage(self.browser).visit().change_role(A_COMPANY, self.contract['Contract Name'], self.course._course_key)\
.click_survey().check_encoding_utf8(False).click_download_button()
self._verify_csv_answers(self.expect_datas_as_director, True, 'utf16')
def test_survey_as_director_utf8(self):
"""
Tests that director of contractor can download survey.
"""
self.switch_to_user(A_DIRECTOR_USER_INFO)
BizNavPage(self.browser).visit().change_role(A_COMPANY, self.contract['Contract Name'], self.course._course_key)\
.click_survey().check_encoding_utf8(True).click_download_button()
self._verify_csv_answers(self.expect_datas_as_director, True, 'utf8')
def test_survey_as_staff(self):
"""
Tests that staff of platfomer can download survey.
"""
self.switch_to_user(SUPER_USER_INFO)
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course._course_key).visit()
survey_page_section = instructor_dashboard_page.select_survey()
survey_page_section.check_encoding_utf8(False).click_download_button()
self._verify_csv_answers(self.expect_datas_as_staff, False, 'utf16')
def test_survey_as_staff_utf8(self):
"""
Tests that staff of platfomer can download survey.
"""
self.switch_to_user(SUPER_USER_INFO)
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course._course_key).visit()
survey_page_section = instructor_dashboard_page.select_survey()
survey_page_section.check_encoding_utf8(True).click_download_button()
self._verify_csv_answers(self.expect_datas_as_staff, False, 'utf8')
def _answer_survey(self, login_user, answer):
"""
Answer survey
"""
# Register invitation as manager of A company
self.switch_to_user(login_user)
self.register_invitation(self.contract['Invitation Code'], ['Marketing'])
# Visit survey pagex
CoursewarePage(self.browser, self.course._course_key).visit()
CourseNavPage(self.browser).go_to_section('Test Section', 'Test Subsection')
survey_page = SurveyPage(self.browser).wait_for_page()
self.assertTrue(survey_page.is_submit_button_enabled())
# Submit
survey_page.fill_item('Q1', answer[0])
survey_page.fill_item('Q2', answer[1])
survey_page.fill_item('Q3', answer[2])
survey_page.fill_item('Q4', answer[3])
survey_page.submit()
# Verify message
self.assertIn(u"ご回答ありがとうございました。", survey_page.wait_for_messages())
self.assertFalse(survey_page.is_submit_button_enabled())
def _verify_csv_answers(self, expect_data, enable_login_code_check, encoding):
"""
Verify csv file.
"""
# Get csv file. (Content_type is 'text/tab-separated-values')
tmp_file = max(
[os.path.join(DOWNLOAD_DIR, f) for f in os.listdir(DOWNLOAD_DIR) if f.count('.tsv')],
key=os.path.getctime)
csv_file = os.path.join(os.environ.get('SELENIUM_DRIVER_LOG_DIR', ''), self._testMethodName + '.csv')
shutil.move(os.path.join(DOWNLOAD_DIR, tmp_file), csv_file)
# Read csv
with codecs.open(csv_file, encoding=encoding) as f:
reader = csv.DictReader([row.encode('utf8') for row in f], delimiter="\t")
if enable_login_code_check:
csv_data = [[row.get('Login Code').decode('utf8'), row.get('Q1').decode('utf8'), row.get('Q2').decode('utf8'),
row.get('Q3').decode('utf8'), row.get('Q4').decode('utf8'), row.get('User Name').decode('utf8')
] for row in reader]
else:
csv_data = [[row.get('Q1').decode('utf8'), row.get('Q2').decode('utf8'),
row.get('Q3').decode('utf8'), row.get('Q4').decode('utf8'), row.get('User Name').decode('utf8')
] for row in reader]
self.assertEqual(csv_data, expect_data)
| nttks/edx-platform | common/test/acceptance/tests/biz/test_ga_survey.py | Python | agpl-3.0 | 17,522 | [
"VisIt"
] | 2abe184210a74e8a8973c825ca491977487225944eb1ab6d9f95b04addd5f555 |
#==============================================================================
#
# Program: ParaView
# Module: annotation.py
#
# Copyright (c) Kitware, Inc.
# All rights reserved.
# See Copyright.txt or http://www.paraview.org/HTML/Copyright.html for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notice for more information.
#
#==============================================================================
r"""
This module is used by vtkPythonAnnotationFilter.
"""
try:
import numpy as np
except ImportError:
raise RuntimeError, "'numpy' module is not found. numpy is needed for "\
"this functionality to work. Please install numpy and try again."
from paraview import calculator
from vtk import vtkDataObject
from vtk.numpy_interface import dataset_adapter as dsa
def _get_ns(self, do, association):
if association == vtkDataObject.FIELD:
# For FieldData, it gets tricky. In general, one would think we are going
# to look at field data in inputDO directly -- same for composite datasets.
# However, ExodusIIReader likes to put field data on leaf nodes insead.
# So we also check leaf nodes, if the FieldData on the root is empty.
# We explicitly call dsa.DataObject.GetFieldData to ensure that
# when dealing with composite datasets, we get the FieldData on the
# vtkCompositeDataSet itself, not in the leaf nodes.
fieldData = dsa.DataObject.GetFieldData(do)
if len(fieldData.keys()) == 0:
# if this is a composite dataset, use field data from the first block with some
# field data.
if isinstance(do, dsa.CompositeDataSet):
for dataset in do:
fieldData = dataset.GetFieldData()
if (not fieldData is None) and (len(fieldData.keys()) > 0): break
else:
fieldData = do.GetAttributes(association)
arrays = calculator.get_arrays(fieldData)
ns = {}
ns["input"] = do
if self.GetDataTimeValid():
ns["time_value"] = self.GetDataTime()
ns["t_value"] = ns["time_value"]
if self.GetNumberOfTimeSteps() > 0:
ns["time_steps"] = [self.GetTimeStep(x) for x in xrange(self.GetNumberOfTimeSteps())]
ns["t_steps"] = ns["time_steps"]
if self.GetTimeRangeValid():
ns["time_range"] = self.GetTimeRange()
ns["t_range"] = ns["time_range"]
if self.GetDataTimeValid() and self.GetNumberOfTimeSteps() > 0:
try:
ns["time_index"] = ns["time_steps"].index(ns["time_value"])
ns["t_index"] = ns["time_index"]
except ValueError: pass
ns.update(arrays)
return ns
def execute(self):
"""Called by vtkPythonAnnotationFilter."""
expression = self.GetExpression()
inputDO = self.GetCurrentInputDataObject()
if not expression or not inputDO:
return True
inputs = [dsa.WrapDataObject(inputDO)]
association = self.GetArrayAssociation()
ns = _get_ns(self, inputs[0], association)
try:
result = calculator.compute(inputs, expression, ns=ns)
except:
from sys import stderr
print >> stderr, "Failed to evaluate expression '%s'. "\
"The following exception stack should provide additional "\
"developer specific information. This typically implies a malformed "\
"expression. Verify that the expression is valid.\n\n" \
"Variables in current scope are %s \n" % (expression, ns.keys())
raise
self.SetComputedAnnotationValue("%s" % result)
return True
def execute_on_global_data(self):
"""Called by vtkAnnotateGlobalDataFilter."""
inputDO = self.GetCurrentInputDataObject()
if not inputDO:
return True
inputs = [dsa.WrapDataObject(inputDO)]
association = self.GetArrayAssociation()
ns = _get_ns(self, inputs[0], association)
if not ns.has_key(self.GetFieldArrayName()):
print >> stderr, "Failed to locate global array '%s'." % self.GetFieldArrayName()
raise RuntimeError, "Failed to locate global array"
array = ns[self.GetFieldArrayName()]
chosen_element = array
try:
# if the array has as many elements as the timesteps, pick the element
# matching the current timestep.
if self.GetNumberOfTimeSteps() > 0 and \
array.shape[0] == self.GetNumberOfTimeSteps():
chosen_element = array[ns["time_index"]]
# if the array has as many elements as the `mode_shape_range`, pick the
# element matching the `mode_shape` (BUG #0015322).
elif ns.has_key("mode_shape") and ns.has_key("mode_shape_range") and \
ns["mode_shape_range"].shape[1] == 2 and \
array.shape[0] == (ns["mode_shape_range"].GetValue(1) - ns["mode_shape_range"].GetValue(0) + 1):
chosen_element = array[ns["mode_shape"].GetValue(0) - ns["mode_shape_range"].GetValue(0)]
elif array.shape[0] == 1:
# for single element arrays, just extract the value.
# This avoids the extra () when converting to string
# (see BUG #15321).
chosen_element = array[0]
except AttributeError: pass
try:
# hack for string array.
if chosen_element.IsA("vtkStringArray"):
chosen_element = chosen_element.GetValue(0)
except: pass
expression = self.GetPrefix() if self.GetPrefix() else ""
expression += str(chosen_element)
expression += self.GetPostfix() if self.GetPostfix() else ""
self.SetComputedAnnotationValue(expression)
return True
def execute_on_attribute_data(self, evaluate_locally):
"""Called by vtkAnnotateAttributeDataFilter."""
inputDO = self.GetCurrentInputDataObject()
if not inputDO:
return True
inputs = [dsa.WrapDataObject(inputDO)]
association = self.GetArrayAssociation()
ns = _get_ns(self, inputs[0], association)
if not ns.has_key(self.GetArrayName()):
print >> stderr, "Failed to locate array '%s'." % self.GetArrayName()
raise RuntimeError, "Failed to locate array"
if not evaluate_locally:
return True
array = ns[self.GetArrayName()]
chosen_element = array[self.GetElementId()]
expression = self.GetPrefix() if self.GetPrefix() else ""
expression += str(chosen_element)
self.SetComputedAnnotationValue(expression)
return True
| HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/Wrapping/Python/paraview/annotation.py | Python | gpl-3.0 | 6,555 | [
"ParaView",
"VTK"
] | 56dd0cb05fbd68ddc785c76df2fc656a6d854bbfb1708592ae554916bda6c4de |
from ase import Atoms
from ase.units import Bohr
from gpaw import GPAW
from gpaw.test import equal
a = 7.5 * Bohr
n = 16
atoms = Atoms('He', [(0.0, 0.0, 0.0)], cell=(a, a, a), pbc=True)
calc = GPAW(gpts=(n, n, n), nbands=1, xc='PBE')
atoms.set_calculator(calc)
e1 = atoms.get_potential_energy()
niter1 = calc.get_number_of_iterations()
e1ref = calc.get_reference_energy()
de12 = calc.get_xc_difference('revPBE')
calc.set(xc='revPBE')
e2 = atoms.get_potential_energy()
niter2 = calc.get_number_of_iterations()
e2ref = calc.get_reference_energy()
de21 = calc.get_xc_difference('PBE')
print e1ref + e1 + de12 - (e2ref + e2)
print e1ref + e1 - (e2ref + e2 + de21)
print de12, de21
equal(e1ref + e1 + de12, e2ref + e2, 8e-4)
equal(e1ref + e1, e2ref + e2 + de21, 3e-3)
calc.write('revPBE.gpw')
de21b = GPAW('revPBE.gpw').get_xc_difference('PBE')
equal(de21, de21b, 9e-8)
energy_tolerance = 0.000005
niter_tolerance = 0
equal(e1, -0.07904951, energy_tolerance)
equal(niter1, 16, niter_tolerance)
equal(e2, -0.08147563, energy_tolerance)
equal(niter2, 11, niter_tolerance)
| ajylee/gpaw-rtxs | gpaw/test/nonselfconsistent.py | Python | gpl-3.0 | 1,069 | [
"ASE",
"GPAW"
] | 5a7cbf284ef810fd322658d54b2e3dafc5f45cfe9166dd6e7a3dbf8e5da479a7 |
import sys
from ase.build import molecule, fcc111, add_adsorbate
from ase.optimize import QuasiNewton
from ase.constraints import FixAtoms
from ase.calculators.emt import EMT
from ase.vibrations import Vibrations
sys.path.append("../..")
from __init__ import AnharmonicModes
slab = fcc111('Au', size=(2, 2, 2), vacuum=4.0)
H = molecule('H')
add_adsorbate(slab, H, 3.0, 'bridge')
constraint = FixAtoms(mask=[a.symbol == 'Au' for a in slab])
slab.set_constraint(constraint)
slab.set_calculator(EMT())
dyn = QuasiNewton(slab)
dyn.run(fmax=0.01)
# Running vibrational analysis
vib = Vibrations(slab, indices=[8])
vib.run()
vib.summary()
# Here the number of initial sampling points are increased such
# that the potential energy surface of the translation comes
# out looking good.
# The result with the default value of 5 (not setting n_initial),
# will be a potential energy surface that is well fitted,
# but the thermodynamical properties are converged.
# This is therefore only for illustration.
AM = AnharmonicModes(
vibrations_object=vib,
pre_names='an_mode_relax_',
settings={
'plot_mode': True,
'n_initial': 10,
})
# Translation by moving from top position on 4 to 6
AM.define_translation(
from_atom_to_atom=[4, 6],
relax_axis=[0, 0, 1])
AM.clean()
AM.inspect_anmodes() # creates trajectory file
AM.run()
AM.pre_summary()
AM.summary()
# Delete all the generated files
# vib.clean()
# AM.clean()
| keldLundgaard/ase-anharmonics | examples/translate_on_surface/translate_H_on_Au_z-relax__EMT.py | Python | lgpl-2.1 | 1,454 | [
"ASE"
] | ccd769749322371375d15a3df513635718dd060da9e5ccd311d63f6c007844ea |
# Copyright 2014, Brian Coca <bcoca@ansible.com>
# Copyright 2017, Ken Celenza <ken@networktocode.com>
# Copyright 2017, Jason Edelman <jason@networktocode.com>
# Copyright 2017, Ansible Project
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import itertools
import math
from jinja2.filters import environmentfilter
from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError
from ansible.module_utils.common.text import formatters
from ansible.module_utils.six import binary_type, text_type
from ansible.module_utils.six.moves import zip, zip_longest
from ansible.module_utils.common._collections_compat import Hashable, Mapping, Iterable
from ansible.module_utils._text import to_native, to_text
from ansible.utils.display import Display
try:
from jinja2.filters import do_unique
HAS_UNIQUE = True
except ImportError:
HAS_UNIQUE = False
try:
from jinja2.filters import do_max, do_min
HAS_MIN_MAX = True
except ImportError:
HAS_MIN_MAX = False
display = Display()
@environmentfilter
# Use case_sensitive=None as a sentinel value, so we raise an error only when
# explicitly set and cannot be handle (by Jinja2 w/o 'unique' or fallback version)
def unique(environment, a, case_sensitive=None, attribute=None):
def _do_fail(e):
if case_sensitive is False or attribute:
raise AnsibleFilterError("Jinja2's unique filter failed and we cannot fall back to Ansible's version "
"as it does not support the parameters supplied", orig_exc=e)
error = e = None
try:
if HAS_UNIQUE:
c = list(do_unique(environment, a, case_sensitive=bool(case_sensitive), attribute=attribute))
except TypeError as e:
error = e
_do_fail(e)
except Exception as e:
error = e
_do_fail(e)
display.warning('Falling back to Ansible unique filter as Jinja2 one failed: %s' % to_text(e))
if not HAS_UNIQUE or error:
# handle Jinja2 specific attributes when using Ansible's version
if case_sensitive is False or attribute:
raise AnsibleFilterError("Ansible's unique filter does not support case_sensitive=False nor attribute parameters, "
"you need a newer version of Jinja2 that provides their version of the filter.")
c = []
for x in a:
if x not in c:
c.append(x)
return c
@environmentfilter
def intersect(environment, a, b):
if isinstance(a, Hashable) and isinstance(b, Hashable):
c = set(a) & set(b)
else:
c = unique(environment, [x for x in a if x in b], True)
return c
@environmentfilter
def difference(environment, a, b):
if isinstance(a, Hashable) and isinstance(b, Hashable):
c = set(a) - set(b)
else:
c = unique(environment, [x for x in a if x not in b], True)
return c
@environmentfilter
def symmetric_difference(environment, a, b):
if isinstance(a, Hashable) and isinstance(b, Hashable):
c = set(a) ^ set(b)
else:
isect = intersect(environment, a, b)
c = [x for x in union(environment, a, b) if x not in isect]
return c
@environmentfilter
def union(environment, a, b):
if isinstance(a, Hashable) and isinstance(b, Hashable):
c = set(a) | set(b)
else:
c = unique(environment, a + b, True)
return c
@environmentfilter
def min(environment, a, **kwargs):
if HAS_MIN_MAX:
return do_min(environment, a, **kwargs)
else:
if kwargs:
raise AnsibleFilterError("Ansible's min filter does not support any keyword arguments. "
"You need Jinja2 2.10 or later that provides their version of the filter.")
_min = __builtins__.get('min')
return _min(a)
@environmentfilter
def max(environment, a, **kwargs):
if HAS_MIN_MAX:
return do_max(environment, a, **kwargs)
else:
if kwargs:
raise AnsibleFilterError("Ansible's max filter does not support any keyword arguments. "
"You need Jinja2 2.10 or later that provides their version of the filter.")
_max = __builtins__.get('max')
return _max(a)
def logarithm(x, base=math.e):
try:
if base == 10:
return math.log10(x)
else:
return math.log(x, base)
except TypeError as e:
raise AnsibleFilterTypeError('log() can only be used on numbers: %s' % to_native(e))
def power(x, y):
try:
return math.pow(x, y)
except TypeError as e:
raise AnsibleFilterTypeError('pow() can only be used on numbers: %s' % to_native(e))
def inversepower(x, base=2):
try:
if base == 2:
return math.sqrt(x)
else:
return math.pow(x, 1.0 / float(base))
except (ValueError, TypeError) as e:
raise AnsibleFilterTypeError('root() can only be used on numbers: %s' % to_native(e))
def human_readable(size, isbits=False, unit=None):
''' Return a human readable string '''
try:
return formatters.bytes_to_human(size, isbits, unit)
except TypeError as e:
raise AnsibleFilterTypeError("human_readable() failed on bad input: %s" % to_native(e))
except Exception:
raise AnsibleFilterError("human_readable() can't interpret following string: %s" % size)
def human_to_bytes(size, default_unit=None, isbits=False):
''' Return bytes count from a human readable string '''
try:
return formatters.human_to_bytes(size, default_unit, isbits)
except TypeError as e:
raise AnsibleFilterTypeError("human_to_bytes() failed on bad input: %s" % to_native(e))
except Exception:
raise AnsibleFilterError("human_to_bytes() can't interpret following string: %s" % size)
def rekey_on_member(data, key, duplicates='error'):
"""
Rekey a dict of dicts on another member
May also create a dict from a list of dicts.
duplicates can be one of ``error`` or ``overwrite`` to specify whether to error out if the key
value would be duplicated or to overwrite previous entries if that's the case.
"""
if duplicates not in ('error', 'overwrite'):
raise AnsibleFilterError("duplicates parameter to rekey_on_member has unknown value: {0}".format(duplicates))
new_obj = {}
# Ensure the positional args are defined - raise jinja2.exceptions.UndefinedError if not
bool(data) and bool(key)
if isinstance(data, Mapping):
iterate_over = data.values()
elif isinstance(data, Iterable) and not isinstance(data, (text_type, binary_type)):
iterate_over = data
else:
raise AnsibleFilterTypeError("Type is not a valid list, set, or dict")
for item in iterate_over:
if not isinstance(item, Mapping):
raise AnsibleFilterTypeError("List item is not a valid dict")
try:
key_elem = item[key]
except KeyError:
raise AnsibleFilterError("Key {0} was not found".format(key))
except TypeError as e:
raise AnsibleFilterTypeError(to_native(e))
except Exception as e:
raise AnsibleFilterError(to_native(e))
# Note: if new_obj[key_elem] exists it will always be a non-empty dict (it will at
# minimum contain {key: key_elem}
if new_obj.get(key_elem, None):
if duplicates == 'error':
raise AnsibleFilterError("Key {0} is not unique, cannot correctly turn into dict".format(key_elem))
elif duplicates == 'overwrite':
new_obj[key_elem] = item
else:
new_obj[key_elem] = item
return new_obj
class FilterModule(object):
''' Ansible math jinja2 filters '''
def filters(self):
filters = {
# general math
'min': min,
'max': max,
# exponents and logarithms
'log': logarithm,
'pow': power,
'root': inversepower,
# set theory
'unique': unique,
'intersect': intersect,
'difference': difference,
'symmetric_difference': symmetric_difference,
'union': union,
# combinatorial
'product': itertools.product,
'permutations': itertools.permutations,
'combinations': itertools.combinations,
# computer theory
'human_readable': human_readable,
'human_to_bytes': human_to_bytes,
'rekey_on_member': rekey_on_member,
# zip
'zip': zip,
'zip_longest': zip_longest,
}
return filters
| srvg/ansible | lib/ansible/plugins/filter/mathstuff.py | Python | gpl-3.0 | 9,450 | [
"Brian"
] | 543de4da987f6ec211ad2dd4f19557e26b59c7041a8305f75900c67d054cd412 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from collections import namedtuple
from itertools import groupby
from numbers import Integral
import numpy as np
from sisl._internal import set_module
from sisl import Atom, Geometry, SuperCell
from ._common import geometry_define_nsc, geometry2uc
__all__ = ['fcc_slab', 'bcc_slab', 'rocksalt_slab']
def _layer2int(layer, periodicity):
"""Convert layer specification to integer"""
if layer is None:
return None
if isinstance(layer, str):
layer = "ABCDEF".index(layer.upper())
return layer % periodicity
def _calc_info(start, end, layers, periodicity):
"""Determine offset index from start or end specification"""
if start is not None and end is not None:
raise ValueError("Only one of 'start' or 'end' may be supplied")
Info = namedtuple("Info", ["layers", "nlayers", "offset", "periodicity"])
# First check valid input, start or end should conform or die
stacking = "ABCDEF"[:periodicity]
# convert to integers in range range(periodicity)
# However, if they are None, they will still be none
start = _layer2int(start, periodicity)
end = _layer2int(end, periodicity)
# First convert `layers` to integer, and possibly determine start/end
if layers is None:
# default to a single stacking
layers = periodicity
if isinstance(layers, Integral):
# convert to proper layers
nlayers = layers
# + 2 to allow rotating
layers = stacking * (nlayers // periodicity + 2)
if start is None and end is None:
# the following will figure it out
layers = layers[:nlayers]
elif start is None:
# end is not none
layers = layers[end+1:] + layers[:end+1]
layers = layers[-nlayers:]
elif end is None:
# start is not none
layers = layers[start:] + layers[:start]
layers = layers[:nlayers]
elif isinstance(layers, str):
nlayers = len(layers)
try:
# + 2 to allow rotating
(stacking * (nlayers // periodicity + 2)).index(layers)
except ValueError:
raise NotImplementedError(f"Stacking faults are not implemented, requested {layers} with stacking {stacking}")
if start is None and end is None:
# easy case, we just calculate one of them
start = _layer2int(layers[0], periodicity)
elif start is not None:
if _layer2int(layers[0], periodicity) != start:
raise ValueError(f"Passing both 'layers' and 'start' requires them to be conforming; found layers={layers} "
f"and start={'ABCDEF'[start]}")
elif end is not None:
if _layer2int(layers[-1], periodicity) != end:
raise ValueError(f"Passing both 'layers' and 'end' requires them to be conforming; found layers={layers} "
f"and end={'ABCDEF'[end]}")
# a sanity check for the algorithm, should always hold!
if start is not None:
assert _layer2int(layers[0], periodicity) == start
if end is not None:
assert _layer2int(layers[-1], periodicity) == end
# Convert layers variable to the list of layers in integer space
layers = [_layer2int(l, periodicity) for l in layers]
return Info(layers, nlayers, -_layer2int(layers[0], periodicity), periodicity)
def _finish_slab(g, vacuum):
"""Move slab to the unit-cell and move it very slightly to
stick to the lower side of the unit-cell borders.
"""
g = geometry2uc(g).sort(lattice=[2, 1, 0])
if vacuum is not None:
geometry_define_nsc(g, [True, True, False])
g.cell[2, 2] = g.xyz[:, 2].max() + vacuum
else:
geometry_define_nsc(g, [True, True, True])
return g
def _convert_miller(miller):
"""Convert miller specification to 3-tuple"""
if isinstance(miller, int):
miller = str(miller)
if isinstance(miller, str):
miller = [int(i) for i in miller]
if isinstance(miller, list):
miller = tuple(miller)
if len(miller) != 3:
raise ValueError(f"Invalid Miller indices, must have length 3")
return miller
def _slab_with_vacuum(func, *args, **kwargs):
"""Function to wrap `func` with vacuum in between """
layers = kwargs.pop("layers")
if layers is None or isinstance(layers, Integral):
return None
def is_vacuum(layer):
""" A vacuum is defined by one of these variables:
- None
- ' '
- 0
"""
if layer is None:
return True
if isinstance(layer, str):
return layer == ' '
if isinstance(layer, Integral):
return layer == 0
return False
# we are dealing either with a list of ints or str
if isinstance(layers, str):
nvacuums = layers.count(' ')
if nvacuums == 0:
return None
if layers.count(' ') > 0:
raise ValueError("Denoting several vacuum layers next to each other is not supported. "
"Please pass 'vacuum' as an array instead.")
# determine number of slabs
nslabs = len(layers.strip().split())
else:
# this must be a list of ints, fill in none between ints
def are_layers(a, b):
a_layer = not is_vacuum(a)
b_layer = not is_vacuum(b)
return a_layer and b_layer
# convert list correctly
layers = [[p, None] if are_layers(p, n) else [p]
for p, n in zip(layers[:-1], layers[1:])] + [[layers[-1]]]
layers = [l for ls in layers for l in ls]
nvacuums = sum([1 if is_vacuum(l) else 0 for l in layers])
nslabs = sum([0 if is_vacuum(l) else 1 for l in layers])
# Now we need to ensure that `start` and `end` are the same
# length as nslabs
def ensure_length(var, nslabs, name):
if var is None:
return [None] * nslabs
if isinstance(var, (Integral, str)):
return [var] * nslabs
if len(var) > nslabs:
raise ValueError(f"Specification of {name} has too many elements compared to the "
f"number of slabs {nslabs}, please reduce length from {len(var)}.")
# it must be an array of some sorts
out = [None] * nslabs
out[:len(var)] = var[:]
if len(var) < len(out):
# a list requires a list on the rhs
for i in range(len(var), len(out)):
out[i] = var[-1]
return out
start = ensure_length(kwargs.pop("start"), nslabs, "start")
end = ensure_length(kwargs.pop("end"), nslabs, "end")
vacuum = np.asarray(kwargs.pop("vacuum"))
vacuums = np.full(nvacuums, 0.)
if vacuum.ndim == 0:
vacuums[:] = vacuum
else:
vacuums[:len(vacuum)] = vacuum
vacuums[len(vacuum):] = vacuum[-1]
vacuums = vacuums.tolist()
# We are now sure that there is a vacuum!
def iter_func(key, layer):
if key == 0:
return None
# layer is an iterator, convert to list
layer = list(layer)
if isinstance(layer[0], str):
layer = ''.join(layer)
elif len(layer) > 1:
raise ValueError(f"Grouper returned long list {layer}")
else:
layer = layer[0]
if is_vacuum(layer):
return None
return layer
# group stuff
layers = [
iter_func(key, group)
for key, group in groupby(layers,
# group by vacuum positions and not vacuum positions
lambda l: 0 if is_vacuum(l) else 1)
]
# Now we need to loop and create the things
reduce_nsc_c = layers[0] is None or layers[-1] is None
ivacuum = 0
islab = 0
if layers[0] is None:
layers.pop(0) # vacuum specification
out = func(*args,
layers=layers.pop(0),
start=start.pop(0),
end=end.pop(0),
vacuum=None, **kwargs)
# add vacuum
vacuum = SuperCell([0, 0, vacuums.pop(0)])
out = out.add(vacuum, offset=(0, 0, vacuum.cell[2, 2]))
ivacuum += 1
islab += 1
else:
out = func(*args,
layers=layers.pop(0),
start=start.pop(0),
end=end.pop(0),
vacuum=None, **kwargs)
islab += 1
while len(layers) > 0:
layer = layers.pop(0)
if layer is None:
dx = out.cell[2, 2] - out.xyz[:, 2].max()
# this ensures the vacuum is exactly vacuums[iv]
vacuum = SuperCell([0, 0, vacuums.pop(0) - dx])
ivacuum += 1
out = out.add(vacuum)
else:
geom = func(*args,
layers=layer,
start=start.pop(0),
end=end.pop(0),
vacuum=None, **kwargs)
out = out.append(geom, 2)
islab += 1
assert islab == nslabs, "Error in determining correct slab counts"
assert ivacuum == nvacuums, "Error in determining correct vacuum counts"
if reduce_nsc_c:
out.set_nsc(c=1)
return out
@set_module("sisl.geom")
def fcc_slab(alat, atoms, miller, layers=None, vacuum=20., *, orthogonal=False, start=None, end=None):
r""" Construction of a surface slab from a face-centered cubic (FCC) crystal
The slab layers are stacked along the :math:`z`-axis. The default stacking is the first
layer as an A-layer, defined as the plane containing an atom at :math:`(x,y)=(0,0)`.
Parameters
----------
alat : float
lattice constant of the fcc crystal
atoms : Atom
the atom that the crystal consists of
miller : int or str or (3,)
Miller indices of the surface facet
layers : int or str or array_like of ints, optional
Number of layers in the slab or explicit layer specification.
An array like can either use ints for layer size, or str's as layer specification.
An empty character `' '` will be denoted as a vacuum slot, see examples.
Currently the layers cannot have stacking faults.
See examples for details.
vacuum : float or array_like, optional
distance added to the third lattice vector to separate
the slab from its periodic images. If this is None, the slab will be a fully
periodic geometry but with the slab layers. Useful for appending geometries together.
If an array layers should be a str, it should be no longer than the number of spaces
in `layers`. If shorter the last item will be repeated (like `zip_longest`).
orthogonal : bool, optional
if True returns an orthogonal lattice
start : int or string, optional
sets the first layer in the slab. Only one of `start` or `end` must be specified.
Discouraged to pass if `layers` is a str.
end : int or string, optional
sets the last layer in the slab. Only one of `start` or `end` must be specified.
Discouraged to pass if `layers` is a str.
Examples
--------
111 surface, starting with the A layer
>>> fcc_slab(alat, atoms, "111", start=0)
111 surface, starting with the B layer
>>> fcc_slab(alat, atoms, "111", start=1)
111 surface, ending with the B layer
>>> fcc_slab(alat, atoms, "111", end='B')
111 surface, with explicit layers in a given order
>>> fcc_slab(alat, atoms, "111", layers='BCABCA')
111 surface, with (1 Ang vacuum)BCA(2 Ang vacuum)ABC(3 Ang vacuum)
>>> fcc_slab(alat, atoms, "111", layers=' BCA ABC ', vacuum=(1, 2, 3))
111 surface, with (20 Ang vacuum)BCA
>>> fcc_slab(alat, atoms, "111", layers=' BCA', vacuum=20)
111 surface, with (2 Ang vacuum)BCA(1 Ang vacuum)ABC(1 Ang vacuum)
The last item in `vacuum` gets repeated.
>>> fcc_slab(alat, atoms, "111", layers=' BCA ABC ', vacuum=(2, 1))
111 periodic structure with ABC(20 Ang vacuum)BC
The unit cell parameters will be periodic in this case, and it will not be
a slab.
>>> fcc_slab(alat, atoms, "111", layers='ABC BC', vacuum=20.)
111 surface in an orthogonal (4x5) cell, maintaining the atom ordering
according to `lattice=[2, 1, 0]`:
>>> fcc_slab(alat, atoms, "111", orthogonal=True).repeat(5, axis=1).repeat(4, axis=0)
111 surface with number specifications of layers together with start
Between each number an implicit vacuum is inserted, only the first and last
are required if vacuum surrounding the slab is needed. The following two calls
are equivalent.
Structure: (10 Ang vacuum)(ABC)(1 Ang vacuum)(BCABC)(2 Ang vacuum)(CAB)
>>> fcc_slab(alat, atoms, "111", layers=(' ', 3, 5, 3), start=(0, 1, 2), vacuum=(10, 1, 2))
>>> fcc_slab(alat, atoms, "111", layers=' ABC BCABC CAB', vacuum=(10, 1, 2))
Raises
------
NotImplementedError
In case the Miller index has not been implemented or a stacking fault is
introduced in `layers`.
See Also
--------
fcc : Fully periodic equivalent of this slab structure
bcc_slab : Slab in BCC structure
rocksalt_slab : Slab in rocksalt/halite structure
"""
geom = _slab_with_vacuum(fcc_slab, alat, atoms, miller,
vacuum=vacuum, orthogonal=orthogonal,
layers=layers,
start=start, end=end)
if geom is not None:
return geom
miller = _convert_miller(miller)
if miller == (1, 0, 0):
info = _calc_info(start, end, layers, 2)
sc = SuperCell(np.array([0.5 ** 0.5, 0.5 ** 0.5, 0.5]) * alat)
g = Geometry([0, 0, 0], atoms=atoms, sc=sc)
g = g.tile(info.nlayers, 2)
# slide AB layers relative to each other
B = (info.offset + 1) % 2
g.xyz[B::2] += (sc.cell[0] + sc.cell[1]) / 2
elif miller == (1, 1, 0):
info = _calc_info(start, end, layers, 2)
sc = SuperCell(np.array([1., 0.5, 0.125]) ** 0.5 * alat)
g = Geometry([0, 0, 0], atoms=atoms, sc=sc)
g = g.tile(info.nlayers, 2)
# slide AB layers relative to each other
B = (info.offset + 1) % 2
g.xyz[B::2] += (sc.cell[0] + sc.cell[1]) / 2
elif miller == (1, 1, 1):
info = _calc_info(start, end, layers, 3)
if orthogonal:
sc = SuperCell(np.array([0.5, 4 * 0.375, 1 / 3]) ** 0.5 * alat)
g = Geometry(np.array([[0, 0, 0],
[0.125, 0.375, 0]]) ** 0.5 * alat,
atoms=atoms, sc=sc)
g = g.tile(info.nlayers, 2)
# slide ABC layers relative to each other
B = 2 * (info.offset + 1) % 6
C = 2 * (info.offset + 2) % 6
vec = (3 * sc.cell[0] + sc.cell[1]) / 6
g.xyz[B::6] += vec
g.xyz[B+1::6] += vec
g.xyz[C::6] += 2 * vec
g.xyz[C+1::6] += 2 * vec
else:
sc = SuperCell(np.array([[0.5, 0, 0],
[0.125, 0.375, 0],
[0, 0, 1 / 3]]) ** 0.5 * alat)
g = Geometry([0, 0, 0], atoms=atoms, sc=sc)
g = g.tile(info.nlayers, 2)
# slide ABC layers relative to each other
B = (info.offset + 1) % 3
C = (info.offset + 2) % 3
vec = (sc.cell[0] + sc.cell[1]) / 3
g.xyz[B::3] += vec
g.xyz[C::3] += 2 * vec
else:
raise NotImplementedError(f"fcc_slab: miller={miller} is not implemented")
g = _finish_slab(g, vacuum)
return g
def bcc_slab(alat, atoms, miller, layers=None, vacuum=20., *, orthogonal=False, start=None, end=None):
r""" Construction of a surface slab from a body-centered cubic (BCC) crystal
The slab layers are stacked along the :math:`z`-axis. The default stacking is the first
layer as an A-layer, defined as the plane containing an atom at :math:`(x,y)=(0,0)`.
Parameters
----------
alat : float
lattice constant of the fcc crystal
atoms : Atom
the atom that the crystal consists of
miller : int or str or (3,)
Miller indices of the surface facet
layers : int or str or array_like of ints, optional
Number of layers in the slab or explicit layer specification.
An array like can either use ints for layer size, or str's as layer specification.
An empty character `' '` will be denoted as a vacuum slot, see examples.
Currently the layers cannot have stacking faults.
See examples for details.
vacuum : float or array_like, optional
distance added to the third lattice vector to separate
the slab from its periodic images. If this is None, the slab will be a fully
periodic geometry but with the slab layers. Useful for appending geometries together.
If an array layers should be a str, it should be no longer than the number of spaces
in `layers`. If shorter the last item will be repeated (like `zip_longest`).
orthogonal : bool, optional
if True returns an orthogonal lattice
start : int or string, optional
sets the first layer in the slab. Only one of `start` or `end` must be specified.
Discouraged to pass if `layers` is a str.
end : int or string, optional
sets the last layer in the slab. Only one of `start` or `end` must be specified.
Discouraged to pass if `layers` is a str.
Examples
--------
Please see `fcc_slab` for examples, they are equivalent to this method.
Raises
------
NotImplementedError
In case the Miller index has not been implemented or a stacking fault is
introduced in `layers`.
See Also
--------
bcc : Fully periodic equivalent of this slab structure
fcc_slab : Slab in FCC structure
rocksalt_slab : Slab in rocksalt/halite structure
"""
geom = _slab_with_vacuum(bcc_slab, alat, atoms, miller,
vacuum=vacuum, orthogonal=orthogonal,
layers=layers,
start=start, end=end)
if geom is not None:
return geom
miller = _convert_miller(miller)
if miller == (1, 0, 0):
info = _calc_info(start, end, layers, 2)
sc = SuperCell(np.array([1, 1, 0.5]) * alat)
g = Geometry([0, 0, 0], atoms=atoms, sc=sc)
g = g.tile(info.nlayers, 2)
# slide AB layers relative to each other
B = (info.offset + 1) % 2
g.xyz[B::2] += (sc.cell[0] + sc.cell[1]) / 2
elif miller == (1, 1, 0):
info = _calc_info(start, end, layers, 2)
if orthogonal:
sc = SuperCell(np.array([1, 2, 0.5]) ** 0.5 * alat)
g = Geometry(np.array([[0, 0, 0],
[0.5, 0.5 ** 0.5, 0]]) * alat,
atoms=atoms, sc=sc)
g = g.tile(info.nlayers, 2)
# slide ABC layers relative to each other
B = 2 * (info.offset + 1) % 4
vec = sc.cell[1] / 2
g.xyz[B::4] += vec
g.xyz[B+1::4] += vec
else:
sc = SuperCell(np.array([[1, 0, 0],
[0.5, 0.5 ** 0.5, 0],
[0, 0, 0.5 ** 0.5]]) * alat)
g = Geometry([0, 0, 0], atoms=atoms, sc=sc)
g = g.tile(info.nlayers, 2)
# slide AB layers relative to each other
B = (info.offset + 1) % 2
g.xyz[B::2] += sc.cell[0] / 2
elif miller == (1, 1, 1):
info = _calc_info(start, end, layers, 3)
if orthogonal:
sc = SuperCell(np.array([2, 4 * 1.5, 1 / 12]) ** 0.5 * alat)
g = Geometry(np.array([[0, 0, 0],
[0.5, 1.5, 0]]) ** 0.5 * alat,
atoms=atoms, sc=sc)
g = g.tile(info.nlayers, 2)
# slide ABC layers relative to each other
B = 2 * (info.offset + 1) % 6
C = 2 * (info.offset + 2) % 6
vec = (sc.cell[0] + sc.cell[1]) / 3
for i in range(2):
g.xyz[B+i::6] += vec
g.xyz[C+i::6] += 2 * vec
else:
sc = SuperCell(np.array([[2, 0, 0],
[0.5, 1.5, 0],
[0, 0, 1 / 12]]) ** 0.5 * alat)
g = Geometry([0, 0, 0], atoms=atoms, sc=sc)
g = g.tile(info.nlayers, 2)
# slide ABC layers relative to each other
B = (info.offset + 1) % 3
C = (info.offset + 2) % 3
vec = (sc.cell[0] + sc.cell[1]) / 3
g.xyz[B::3] += vec
g.xyz[C::3] += 2 * vec
else:
raise NotImplementedError(f"bcc_slab: miller={miller} is not implemented")
g = _finish_slab(g, vacuum)
return g
def rocksalt_slab(alat, atoms, miller, layers=None, vacuum=20., *, orthogonal=False, start=None, end=None):
r""" Construction of a surface slab from a two-element rock-salt crystal
This structure is formed by two interlocked fcc crystals for each of the two elements.
The slab layers are stacked along the :math:`z`-axis. The default stacking is the first
layer as an A-layer, defined as the plane containing the first atom in the atoms list
at :math:`(x,y)=(0,0)`.
This is equivalent to the NaCl crystal structure (halite).
Parameters
----------
alat : float
lattice constant of the rock-salt crystal
atoms : list
a list of two atoms that the crystal consist of
miller : int or str or (3,)
Miller indices of the surface facet
layers : int or str or array_like of ints, optional
Number of layers in the slab or explicit layer specification.
An array like can either use ints for layer size, or str's as layer specification.
An empty character `' '` will be denoted as a vacuum slot, see examples.
Currently the layers cannot have stacking faults.
See examples for details.
vacuum : float or array_like, optional
distance added to the third lattice vector to separate
the slab from its periodic images. If this is None, the slab will be a fully
periodic geometry but with the slab layers. Useful for appending geometries together.
If an array layers should be a str, it should be no longer than the number of spaces
in `layers`. If shorter the last item will be repeated (like `zip_longest`).
orthogonal : bool, optional
if True returns an orthogonal lattice
start : int or string, optional
sets the first layer in the slab. Only one of `start` or `end` must be specified.
Discouraged to pass if `layers` is a str.
end : int or string, optional
sets the last layer in the slab. Only one of `start` or `end` must be specified.
Discouraged to pass if `layers` is a str.
Examples
--------
NaCl(100) slab, starting with A-layer
>>> rocksalt_slab(5.64, ['Na', 'Cl'], 100)
6-layer NaCl(100) slab, ending with A-layer
>>> rocksalt_slab(5.64, ['Na', 'Cl'], 100, layers=6, end='A')
6-layer NaCl(100) slab, starting with Cl A layer and with a vacuum
gap of 20 Å on both sides of the slab
>>> rocksalt_slab(5.64, ['Cl', 'Na'], 100, layers=' ABAB ')
For more examples see `fcc_slab`, the vacuum displacements are directly
translateable to this function.
Raises
------
NotImplementedError
In case the Miller index has not been implemented or a stacking fault is
introduced in `layers`.
See Also
--------
rocksalt : Basic structure of this one
fcc_slab : Slab in FCC structure (this slab is a combination of fcc slab structures)
bcc_slab : Slab in BCC structure
"""
geom = _slab_with_vacuum(rocksalt_slab, alat, atoms, miller,
vacuum=vacuum, orthogonal=orthogonal,
layers=layers,
start=start, end=end)
if geom is not None:
return geom
if isinstance(atoms, str):
atoms = [atoms, atoms]
if len(atoms) != 2:
raise ValueError(f"Invalid list of atoms, must have length 2")
miller = _convert_miller(miller)
g1 = fcc_slab(alat, atoms[0], miller, layers=layers, vacuum=None, orthogonal=orthogonal, start=start, end=end)
g2 = fcc_slab(alat, atoms[1], miller, layers=layers, vacuum=None, orthogonal=orthogonal, start=start, end=end)
if miller == (1, 0, 0):
g2 = g2.move(np.array([0.5, 0.5, 0]) ** 0.5 * alat / 2)
elif miller == (1, 1, 0):
g2 = g2.move(np.array([1, 0, 0]) * alat / 2)
elif miller == (1, 1, 1):
g2 = g2.move(np.array([0, 2 / 3, 1 / 3]) ** 0.5 * alat / 2)
else:
raise NotImplementedError(f"rocksalt_slab: miller={miller} is not implemented")
g = g1.add(g2)
g = _finish_slab(g, vacuum)
return g
| zerothi/sisl | sisl/geom/surfaces.py | Python | mpl-2.0 | 25,395 | [
"CRYSTAL"
] | b3cf63e64f569c916083f59b051e8d0fb746140c108eb02eb0cee6e7d16a0529 |
######################################################################
##
## Copyright 2010-2011 Ondrej Certik <ondrej@certik.cz>
## Copyright 2010-2011 Mateusz Paprocki <mattpap@gmail.com>
## Copyright 2011 Christian Iversen <ci@sikkerhed.org>
##
## Permission is hereby granted, free of charge, to any person
## obtaining a copy of this software and associated documentation
## files (the "Software"), to deal in the Software without
## restriction, including without limitation the rights to use,
## copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the
## Software is furnished to do so, subject to the following
## conditions:
##
## The above copyright notice and this permission notice shall be
## included in all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
## OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
## NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
## HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
## WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
## OTHER DEALINGS IN THE SOFTWARE.
##
######################################################################
import pyjaco.compiler
import ast
from pyjaco.compiler import JSError
class Compiler(pyjaco.compiler.BaseCompiler):
unary_op = {
'Invert' : '~',
'Not' : '!',
'UAdd' : '+',
'USub' : '-',
}
bool_op = {
'And' : '&&',
'Or' : '||',
}
binary_op = {
'Add' : '+',
'Sub' : '-',
'Mult' : '*',
'Div' : '/',
'Mod' : '%',
'LShift' : '<<',
'RShift' : '>>',
'BitOr' : '|',
'BitXor' : '^',
'BitAnd' : '&',
}
comparison_op = {
'Eq' : "==",
'NotEq' : "!=",
'Lt' : "<",
'LtE' : "<=",
'Gt' : ">",
'GtE' : ">=",
'Is' : "===",
'IsNot' : "is not", # Not implemented yet
}
def __init__(self, opts):
super(Compiler, self).__init__(opts)
self.name_map = self.name_map.copy()
self.name_map.update({"True": "true", "False": "false", "None": "null"})
self.opts = opts
def get_bool_op(self, node):
return self.bool_op[node.op.__class__.__name__]
def get_unary_op(self, node):
return self.unary_op[node.op.__class__.__name__]
def get_binary_op(self, node):
return self.binary_op[node.op.__class__.__name__]
def get_comparison_op(self, node):
return self.comparison_op[node.__class__.__name__]
def visit_Name(self, node):
name = self.name_map.get(node.id, node.id)
if (name in self.builtin) and not (name in self._scope):
name = "__builtins__." + name
return name
def visit_Global(self, node):
self._scope.extend(node.names)
return []
def visit_FunctionDef(self, node):
raise JSError("Javascript compiler does not support function definitions")
def visit_ClassDef(self, node):
raise JSError("Javascript compiler does not support class definitions")
def visit_Delete(self, node):
return ["delete %s;" % ", ".join([self.visit(x) for x in node.targets])]
def visit_AssignSimple(self, left, right):
target = left
value = right
if isinstance(target, (ast.Tuple, ast.List)):
part = self.alloc_var()
js = ["var %s = %s;" % (part, value)]
for i, target in enumerate(target.elts):
var = self.visit(target)
declare = ""
if isinstance(target, ast.Name):
if not (var in self._scope):
self._scope.append(var)
declare = "var "
js.append("%s%s = %s[%d];" % (declare, var, part, i))
elif isinstance(target, ast.Subscript) and isinstance(target.slice, ast.Index):
# found index assignment
if isinstance(target.slice, ast.Str):
i = self.visit(target.slice)
else:
i = '"%s"' % self.visit(target.slice)
js = ["%s[%s] = %s;" % (self.visit(target.value), self.visit(target.slice), value)]
elif isinstance(target, ast.Subscript) and isinstance(target.slice, ast.Slice):
raise JSError("Javascript does not support slice assignments")
else:
var = self.visit(target)
if isinstance(target, ast.Name):
if not (var in self._scope):
self._scope.append(var)
declare = "var "
else:
declare = ""
js = ["%s%s = %s;" % (declare, var, value)]
elif isinstance(target, ast.Attribute):
js = ["%s.%s = %s;" % (self.visit(target.value), str(target.attr), value)]
else:
raise JSError("Unsupported assignment type")
return js
def visit_AugAssign(self, node):
target = self.visit(node.target)
value = self.visit(node.value)
if isinstance(node.op, ast.Pow):
return ["%s = Math.pow(%s, %s);" % (target, target, value)]
if isinstance(node.op, ast.FloorDiv):
return ["%s = Math.floor((%s)/(%s));" % (target, target, value)]
return ["%s %s= %s;" % (target, self.get_binary_op(node), value)]
def visit_For(self, node):
if not isinstance(node.target, ast.Name):
raise JSError("argument decomposition in 'for' loop is not supported")
js = []
for_target = self.visit(node.target)
for_iter = self.visit(node.iter)
iter_dummy = self.alloc_var()
orelse_dummy = self.alloc_var()
exc_dummy = self.alloc_var()
if isinstance(node.iter, ast.Call) and isinstance(node.iter.func, ast.Name) and node.iter.func.id == "range" and not node.orelse:
counter = self.visit(node.target)
end_var = self.alloc_var()
assert(len(node.iter.args) in (1,2,3))
if len(node.iter.args) == 1:
start = "0"
end = self.visit(node.iter.args[0])
step = "1"
elif len(node.iter.args) == 2:
start = self.visit(node.iter.args[0])
end = self.visit(node.iter.args[1])
step = "1"
else:
start = self.visit(node.iter.args[0])
end = self.visit(node.iter.args[1])
step = self.visit(node.iter.args[2])
js.append("for (%s = %s; %s < %s; %s += %s) {" % (counter, start, counter, end, counter, step))
for stmt in node.body:
js.extend(self.indent(self.visit(stmt)))
js.append("}")
return js
js.append("var %s = iter(%s);" % (iter_dummy, for_iter))
js.append("var %s = false;" % orelse_dummy)
js.append("while (1) {")
js.append(" var %s;" % for_target)
js.append(" try {")
js.append(" %s = %s.PY$next();" % (for_target, iter_dummy))
js.append(" } catch (%s) {" % exc_dummy)
js.append(" if (__builtins__.isinstance(%s, __builtins__.StopIteration)) {" % exc_dummy)
js.append(" %s = true;" % orelse_dummy)
js.append(" break;")
js.append(" } else {")
js.append(" throw %s;" % exc_dummy)
js.append(" }")
js.append(" }")
for stmt in node.body:
js.extend(self.indent(self.visit(stmt)))
js.append("}")
if node.orelse:
js.append("if (%s) {" % orelse_dummy)
for stmt in node.orelse:
js.extend(self.indent(self.visit(stmt)))
js.append("}")
return js
def visit_While(self, node):
js = []
if not node.orelse:
js.append("while (%s) {" % self.visit(node.test))
else:
orelse_dummy = self.alloc_var()
js.append("var %s = false;" % orelse_dummy)
js.append("while (1) {")
js.append(" if (!(%s)) {" % self.visit(node.test))
js.append(" %s = true;" % orelse_dummy)
js.append(" break;")
js.append(" }")
for stmt in node.body:
js.extend(self.indent(self.visit(stmt)))
js.append("}")
if node.orelse:
js.append("if (%s) {" % orelse_dummy)
for stmt in node.orelse:
js.extend(self.indent(self.visit(stmt)))
js.append("}")
return js
def visit_If(self, node):
js = ["if (%s) {" % self.visit(node.test)]
for stmt in node.body:
js.extend(self.indent(self.visit(stmt)))
if node.orelse:
js.append("} else {")
for stmt in node.orelse:
js.extend(self.indent(self.visit(stmt)))
return js + ["}"]
def _visit_With(self, node):
pass
def _visit_Raise(self, node):
pass
def _visit_TryExcept(self, node):
pass
def _visit_TryFinally(self, node):
pass
def _visit_Import(self, node):
pass
def _visit_ImportFrom(self, node):
pass
def visit_Lambda(self, node):
return "\n function(%s) {%s}" % (self.visit(node.args), self.visit(node.body))
def visit_BoolOp(self, node):
return self.get_bool_op(node).join([ "(%s)" % self.visit(val) for val in node.values ])
def visit_UnaryOp(self, node):
return "%s(%s)" % (self.get_unary_op(node), self.visit(node.operand))
def visit_BinOp(self, node):
if isinstance(node.op, ast.Mod) and isinstance(node.left, ast.Str):
left = self.visit(node.left)
if isinstance(node.right, (ast.Tuple, ast.List)):
right = self.visit(node.right)
return "sprintf(str(%s), tuple(%s))" % (left, right)
else:
right = self.visit(node.right)
return "sprintf(str(%s), str(%s))" % (left, right)
left = self.visit(node.left)
right = self.visit(node.right)
if isinstance(node.op, ast.Pow):
return "Math.pow(%s, %s)" % (left, right)
if isinstance(node.op, ast.FloorDiv):
return "Math.floor((%s)/(%s))" % (left, right)
return "(%s) %s (%s)" % (left, self.get_binary_op(node), right)
def visit_Compare(self, node):
assert len(node.ops) == 1
assert len(node.comparators) == 1
op = node.ops[0]
comp = node.comparators[0]
if isinstance(op, ast.In):
return "%s.__contains__(%s)" % (self.visit(comp), self.visit(node.left))
elif isinstance(op, ast.NotIn):
return "!(%s.__contains__(%s))" % (self.visit(comp), self.visit(node.left))
elif isinstance(op, ast.Eq):
return "(%s) === (%s)" % (self.visit(node.left), self.visit(comp))
elif isinstance(op, ast.NotEq):
return "(%s) !== (%s)" % (self.visit(node.left), self.visit(comp))
else:
return "%s %s %s" % (self.visit(node.left), self.get_comparison_op(op), self.visit(comp))
def visit_Num(self, node):
return str(node.n)
def visit_Str(self, node):
# Uses the Python builtin repr() of a string and the strip string type
# from it. This is to ensure Javascriptness, even when they use things
# like b"\\x00" or u"\\u0000".
return '"%s"' % repr(node.s).lstrip("urb")[1:-1]
def visit_Call(self, node):
func = self.visit(node.func)
if node.keywords:
keywords = []
for kw in node.keywords:
keywords.append("%s: %s" % (kw.arg, self.visit(kw.value)))
keywords = "{" + ", ".join(keywords) + "}"
js_args = ", ".join([ self.visit(arg) for arg in node.args ])
return "%s.args([%s], %s)" % (func, js_args,
keywords)
else:
if node.starargs is not None:
raise JSError("star arguments are not supported")
if node.kwargs is not None:
raise JSError("keyword arguments are not supported")
js_args = ", ".join([ self.visit(arg) for arg in node.args ])
return "%s(%s)" % (func, js_args)
def visit_Raise(self, node):
assert node.inst is None
assert node.tback is None
return ["throw %s;" % self.visit(node.type)]
def visit_Attribute(self, node):
return "%s.%s" % (self.visit(node.value), node.attr)
def visit_Tuple(self, node):
els = [self.visit(e) for e in node.elts]
return "[%s]" % (", ".join(els))
def visit_Dict(self, node):
els = []
for k, v in zip(node.keys, node.values):
els.append("%s: %s" % (self.visit(k), self.visit(v)))
return "{%s}" % (", ".join(els))
def visit_List(self, node):
els = [self.visit(e) for e in node.elts]
return "[%s]" % (", ".join(els))
def visit_Slice(self, node):
if node.step:
raise JSError("Javascript does not support slicing in steps")
if node.lower and node.upper:
return ".slice(%s, %s)" % (self.visit(node.lower), self.visit(node.upper))
if node.lower:
return "[%s]" % (self.visit(node.lower))
if node.upper:
return ".slice(0, %s)" % (self.visit(node.upper))
raise NotImplementedError("Slice")
def visit_Subscript(self, node):
# print node.value, node.slice
if isinstance(node.slice, ast.Index):
return "%s[%s]" % (self.visit(node.value), self.visit(node.slice))
else:
return "%s%s" % (self.visit(node.value), self.visit(node.slice))
def visit_Index(self, node):
return self.visit(node.value)
| buchuki/pyjaco | pyjaco/compiler/javascript.py | Python | mit | 14,252 | [
"VisIt"
] | 19da70e6327407482af986fc1d58c2a494397d302c3e7c2723d60167b7122064 |
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute resource tracking."""
import copy
import six
import uuid
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from nova.compute.monitors import base as monitor_base
from nova.compute import resource_tracker
from nova.compute import resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
from nova.objects import pci_device_pool
from nova import rpc
from nova import test
from nova.tests.unit.pci import fakes as pci_fakes
from nova.virt import driver
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_MEMORY_OVERHEAD = 1
FAKE_VIRT_MEMORY_WITH_OVERHEAD = (
FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD)
FAKE_VIRT_NUMA_TOPOLOGY = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([1, 2]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([3, 4]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([]))])
FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
ROOT_GB = 5
EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
FAKE_VIRT_STATS = {'virt_stat': 10}
FAKE_VIRT_STATS_COERCED = {'virt_stat': '10'}
FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
RESOURCE_NAMES = ['vcpu']
CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
class FakeVirtDriver(driver.ComputeDriver):
def __init__(self, pci_support=False, stats=None,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY):
super(FakeVirtDriver, self).__init__(None)
self.memory_mb = FAKE_VIRT_MEMORY_MB
self.local_gb = FAKE_VIRT_LOCAL_GB
self.vcpus = FAKE_VIRT_VCPUS
self.numa_topology = numa_topology
self.memory_mb_used = 0
self.local_gb_used = 0
self.pci_support = pci_support
self.pci_devices = [
{
'label': 'label_8086_0443',
'dev_type': fields.PciDeviceType.SRIOV_VF,
'compute_node_id': 1,
'address': '0000:00:01.1',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0443',
'dev_type': fields.PciDeviceType.SRIOV_VF,
'compute_node_id': 1,
'address': '0000:00:01.2',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0443',
'dev_type': fields.PciDeviceType.SRIOV_PF,
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0123',
'dev_type': 'type-PCI',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0123',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_7891',
'dev_type': fields.PciDeviceType.SRIOV_VF,
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '7891',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': None
},
] if self.pci_support else []
self.pci_stats = [
{
'count': 2,
'vendor_id': '8086',
'product_id': '0443',
'numa_node': 1
},
{
'count': 1,
'vendor_id': '8086',
'product_id': '7891',
'numa_node': None
},
] if self.pci_support else []
if stats is not None:
self.stats = stats
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
'numa_topology': (
self.numa_topology._to_json() if self.numa_topology else None),
}
if self.pci_support:
d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
if hasattr(self, 'stats'):
d['stats'] = self.stats
return d
def estimate_instance_overhead(self, instance_info):
instance_info['memory_mb'] # make sure memory value is present
overhead = {
'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
}
return overhead # just return a constant value for testing
class BaseTestCase(test.TestCase):
@mock.patch('stevedore.enabled.EnabledExtensionManager')
def setUp(self, _mock_ext_mgr):
super(BaseTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self.context = context.get_admin_context()
self.flags(pci_passthrough_whitelist=[
'{"vendor_id": "8086", "product_id": "0443"}',
'{"vendor_id": "8086", "product_id": "7891"}'])
self.flags(use_local=True, group='conductor')
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self._instances = {}
self._instance_types = {}
self.stubs.Set(objects.InstanceList, 'get_by_host_and_node',
self._fake_instance_get_by_host_and_node)
self.stubs.Set(self.conductor.db,
'flavor_get', self._fake_flavor_get)
self.host = 'fakehost'
self.compute = self._create_compute_node()
self.updated = False
self.deleted = False
self.update_call_count = 0
def _create_compute_node(self, values=None):
# This creates a db representation of a compute_node.
compute = {
"id": 1,
"service_id": 1,
"host": "fakehost",
"vcpus": 1,
"memory_mb": 1,
"local_gb": 1,
"vcpus_used": 1,
"memory_mb_used": 1,
"local_gb_used": 1,
"free_ram_mb": 1,
"free_disk_gb": 1,
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
"numa_topology": None,
"stats": '{"num_instances": "1"}',
"hypervisor_hostname": "fakenode",
'hypervisor_version': 1,
'hypervisor_type': 'fake-hyp',
'disk_available_least': None,
'host_ip': None,
'metrics': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'cpu_allocation_ratio': None,
'ram_allocation_ratio': None,
}
if values:
compute.update(values)
return compute
def _create_compute_node_obj(self, context):
# Use the db representation of a compute node returned
# by _create_compute_node() to create an equivalent compute
# node object.
compute = self._create_compute_node()
compute_obj = objects.ComputeNode()
compute_obj = objects.ComputeNode._from_db_object(
context, compute_obj, compute)
return compute_obj
def _create_service(self, host="fakehost", compute=None):
if compute:
compute = [compute]
service = {
"id": 1,
"host": host,
"binary": "nova-compute",
"topic": "compute",
"compute_node": compute,
"report_count": 0,
'disabled': False,
'disabled_reason': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'last_seen_up': None,
'forced_down': False,
'version': 0,
}
return service
def _fake_instance_obj(self, stash=True, flavor=None, **kwargs):
# Default to an instance ready to resize to or from the same
# instance_type
flavor = flavor or self._fake_flavor_create()
if not isinstance(flavor, objects.Flavor):
flavor = objects.Flavor(**flavor)
instance_uuid = str(uuid.uuid1())
instance = objects.Instance(context=self.context, uuid=instance_uuid,
flavor=flavor)
instance.update({
'vm_state': vm_states.RESIZED,
'task_state': None,
'ephemeral_key_uuid': None,
'os_type': 'Linux',
'project_id': '123456',
'host': None,
'node': None,
'instance_type_id': flavor['id'],
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'root_gb': flavor['root_gb'],
'ephemeral_gb': flavor['ephemeral_gb'],
'launched_on': None,
'system_metadata': {},
'availability_zone': None,
'vm_mode': None,
'reservation_id': None,
'display_name': None,
'default_swap_device': None,
'power_state': None,
'access_ip_v6': None,
'access_ip_v4': None,
'key_name': None,
'updated_at': None,
'cell_name': None,
'locked': None,
'locked_by': None,
'launch_index': None,
'architecture': None,
'auto_disk_config': None,
'terminated_at': None,
'ramdisk_id': None,
'user_data': None,
'cleaned': None,
'deleted_at': None,
'id': 333,
'disable_terminate': None,
'hostname': None,
'display_description': None,
'key_data': None,
'deleted': None,
'default_ephemeral_device': None,
'progress': None,
'launched_at': None,
'config_drive': None,
'kernel_id': None,
'user_id': None,
'shutdown_terminate': None,
'created_at': None,
'image_ref': None,
'root_device_name': None,
})
if stash:
instance.old_flavor = flavor
instance.new_flavor = flavor
instance.numa_topology = kwargs.pop('numa_topology', None)
instance.update(kwargs)
self._instances[instance_uuid] = instance
return instance
def _fake_flavor_create(self, **kwargs):
instance_type = {
'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'disabled': False,
'is_public': True,
'name': 'fakeitype',
'memory_mb': FAKE_VIRT_MEMORY_MB,
'vcpus': FAKE_VIRT_VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'flavorid': 'fakeflavor',
'extra_specs': {},
}
instance_type.update(**kwargs)
instance_type = objects.Flavor(**instance_type)
id_ = instance_type['id']
self._instance_types[id_] = instance_type
return instance_type
def _fake_instance_get_by_host_and_node(self, context, host, nodename,
expected_attrs=None):
return objects.InstanceList(
objects=[i for i in self._instances.values() if i['host'] == host])
def _fake_flavor_get(self, ctxt, id_):
return self._instance_types[id_]
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _driver(self):
return FakeVirtDriver()
def _tracker(self, host=None):
if host is None:
host = self.host
node = "fakenode"
driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
tracker.compute_node = self._create_compute_node_obj(self.context)
tracker.ext_resources_handler = \
resources.ResourceHandler(RESOURCE_NAMES, True)
return tracker
class UnsupportedDriverTestCase(BaseTestCase):
"""Resource tracking should be disabled when the virt driver doesn't
support it.
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
self.tracker = self._tracker()
# seed tracker with data:
self.tracker.update_available_resource(self.context)
def _driver(self):
return UnsupportedVirtDriver()
def test_disabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
self.assertIsNone(self.tracker.compute_node)
def test_disabled_claim(self):
# basic claim:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_claim(self):
# instance variation:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
@mock.patch('nova.objects.Instance.save')
def test_disabled_instance_context_claim(self, mock_save):
# instance context manager variation:
instance = self._fake_instance_obj()
self.tracker.instance_claim(self.context, instance)
with self.tracker.instance_claim(self.context, instance) as claim:
self.assertEqual(0, claim.memory_mb)
def test_disabled_updated_usage(self):
instance = self._fake_instance_obj(host='fakehost', memory_mb=5,
root_gb=10)
self.tracker.update_usage(self.context, instance)
def test_disabled_resize_claim(self):
instance = self._fake_instance_obj()
instance_type = self._fake_flavor_create()
claim = self.tracker.resize_claim(self.context, instance,
instance_type)
self.assertEqual(0, claim.memory_mb)
self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
self.assertEqual(instance_type['id'],
claim.migration['new_instance_type_id'])
def test_disabled_resize_context_claim(self):
instance = self._fake_instance_obj()
instance_type = self._fake_flavor_create()
with self.tracker.resize_claim(self.context, instance, instance_type) \
as claim:
self.assertEqual(0, claim.memory_mb)
class MissingComputeNodeTestCase(BaseTestCase):
def setUp(self):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stubs.Set(db, 'compute_node_create',
self._fake_create_compute_node)
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def _fake_create_compute_node(self, context, values):
self.created = True
return self._create_compute_node(values)
def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
return service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
# return no compute node
raise exception.ComputeHostNotFound(host=host)
def test_create_compute_node(self):
self.tracker.compute_node = None
self.tracker.update_available_resource(self.context)
self.assertTrue(self.created)
def test_enabled(self):
self.tracker.update_available_resource(self.context)
self.assertFalse(self.tracker.disabled)
class BaseTrackerTestCase(BaseTestCase):
def setUp(self):
# setup plumbing for a working resource tracker with required
# database models and a compatible compute driver:
super(BaseTrackerTestCase, self).setUp()
self.tracker = self._tracker()
self._migrations = {}
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
self.stubs.Set(db, 'compute_node_delete',
self._fake_compute_node_delete)
self.stubs.Set(db, 'migration_update',
self._fake_migration_update)
self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
# Note that this must be called before the call to _init_tracker()
patcher = pci_fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self._init_tracker()
self.limits = self._limits()
def _fake_service_get_by_compute_host(self, ctx, host):
self.service = self._create_service(host, compute=self.compute)
return self.service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
self.compute = self._create_compute_node()
return self.compute
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _fake_compute_node_delete(self, ctx, compute_node_id):
self.deleted = True
self.compute.update({'deleted': 1})
return self.compute
def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
node):
status = ['confirmed', 'reverted', 'error']
migrations = []
for migration in self._migrations.values():
migration = obj_base.obj_to_primitive(migration)
if migration['status'] in status:
continue
uuid = migration['instance_uuid']
migration['instance'] = self._instances[uuid]
migrations.append(migration)
return migrations
def _fake_migration_update(self, ctxt, migration_id, values):
# cheat and assume there's only 1 migration present
migration = self._migrations.values()[0]
migration.update(values)
return migration
def _init_tracker(self):
self.tracker.update_available_resource(self.context)
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD,
disk_gb=FAKE_VIRT_LOCAL_GB,
vcpus=FAKE_VIRT_VCPUS,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD):
"""Create limits dictionary used for oversubscribing resources."""
return {
'memory_mb': memory_mb,
'disk_gb': disk_gb,
'vcpu': vcpus,
'numa_topology': numa_topology,
}
def assertEqualNUMAHostTopology(self, expected, got):
attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
if None in (expected, got):
if expected != got:
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
else:
return
if len(expected) != len(got):
raise AssertionError("Topologies don't match due to different "
"number of cells. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
for exp_cell, got_cell in zip(expected.cells, got.cells):
for attr in attrs:
if getattr(exp_cell, attr) != getattr(got_cell, attr):
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def assertEqualPciDevicePool(self, expected, observed):
self.assertEqual(expected.product_id, observed.product_id)
self.assertEqual(expected.vendor_id, observed.vendor_id)
self.assertEqual(expected.tags, observed.tags)
self.assertEqual(expected.count, observed.count)
def assertEqualPciDevicePoolList(self, expected, observed):
ex_objs = expected.objects
ob_objs = observed.objects
self.assertEqual(len(ex_objs), len(ob_objs))
for i in range(len(ex_objs)):
self.assertEqualPciDevicePool(ex_objs[i], ob_objs[i])
def _assert(self, value, field, tracker=None):
if tracker is None:
tracker = self.tracker
if field not in tracker.compute_node:
raise test.TestingException(
"'%(field)s' not in compute node." % {'field': field})
x = tracker.compute_node[field]
if field == 'numa_topology':
self.assertEqualNUMAHostTopology(
value, objects.NUMATopology.obj_from_db_obj(x))
else:
self.assertEqual(value, x)
class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node.free_ram_mb)
def test_free_disk_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node.free_disk_gb)
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node.current_workload)
expected = pci_device_pool.from_pci_stats(driver.pci_stats)
self.assertEqual(expected,
self.tracker.compute_node.pci_device_pools)
def test_set_instance_host_and_node(self):
inst = objects.Instance()
with mock.patch.object(inst, 'save') as mock_save:
self.tracker._set_instance_host_and_node(self.context, inst)
mock_save.assert_called_once_with()
self.assertEqual(self.tracker.host, inst.host)
self.assertEqual(self.tracker.nodename, inst.node)
self.assertEqual(self.tracker.host, inst.launched_on)
class SchedulerClientTrackerTestCase(BaseTrackerTestCase):
def setUp(self):
super(SchedulerClientTrackerTestCase, self).setUp()
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def test_update_resource(self):
# NOTE(pmurray): we are not doing a full pass through the resource
# trackers update path, so safest to do two updates and look for
# differences then to rely on the initial state being the same
# as an update
urs_mock = self.tracker.scheduler_client.update_resource_stats
self.tracker._update(self.context)
urs_mock.reset_mock()
# change a compute node value to simulate a change
self.tracker.compute_node.local_gb_used += 1
self.tracker._update(self.context)
urs_mock.assert_called_once_with(self.tracker.compute_node)
def test_no_update_resource(self):
# NOTE(pmurray): we are not doing a full pass through the resource
# trackers update path, so safest to do two updates and look for
# differences then to rely on the initial state being the same
# as an update
self.tracker._update(self.context)
update = self.tracker.scheduler_client.update_resource_stats
update.reset_mock()
self.tracker._update(self.context)
self.assertFalse(update.called, "update_resource_stats should not be "
"called when there is no change")
class TrackerPciStatsTestCase(BaseTrackerTestCase):
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node.current_workload)
expected_pools = pci_device_pool.from_pci_stats(driver.pci_stats)
observed_pools = self.tracker.compute_node.pci_device_pools
self.assertEqualPciDevicePoolList(expected_pools, observed_pools)
def _driver(self):
return FakeVirtDriver(pci_support=True)
class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
def test_set_empty_ext_resources(self):
resources = self._create_compute_node_obj(self.context)
del resources.stats
self.tracker._write_ext_resources(resources)
self.assertEqual({}, resources.stats)
def test_set_extra_resources(self):
def fake_write_resources(resources):
resources['stats']['resA'] = '123'
resources['stats']['resB'] = 12
self.stubs.Set(self.tracker.ext_resources_handler,
'write_resources',
fake_write_resources)
resources = self._create_compute_node_obj(self.context)
del resources.stats
self.tracker._write_ext_resources(resources)
expected = {"resA": "123", "resB": "12"}
self.assertEqual(sorted(expected),
sorted(resources.stats))
class InstanceClaimTestCase(BaseTrackerTestCase):
def _instance_topology(self, mem):
mem = mem * 1024
return objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([1]), memory=mem),
objects.InstanceNUMACell(
id=1, cpuset=set([3]), memory=mem)])
def _claim_topology(self, mem, cpus=1):
if self.tracker.driver.numa_topology is None:
return None
mem = mem * 1024
return objects.NUMATopology(
cells=[objects.NUMACell(
id=0, cpuset=set([1, 2]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([])),
objects.NUMACell(
id=1, cpuset=set([3, 4]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([]))])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_claim_with_oversubscription(self, mock_get):
memory_mb = FAKE_VIRT_MEMORY_MB * 2
root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
vcpus = FAKE_VIRT_VCPUS * 2
claim_topology = self._claim_topology(3)
instance_topology = self._instance_topology(3)
limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
'disk_gb': root_gb * 2,
'vcpu': vcpus,
'numa_topology': FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD}
instance = self._fake_instance_obj(memory_mb=memory_mb,
root_gb=root_gb, ephemeral_gb=ephemeral_gb,
numa_topology=instance_topology)
with mock.patch.object(instance, 'save'):
self.tracker.instance_claim(self.context, instance, limits)
self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(root_gb * 2,
self.tracker.compute_node.local_gb_used)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_additive_claims(self, mock_save, mock_get):
self.limits['vcpu'] = 2
claim_topology = self._claim_topology(2, cpus=2)
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=1, ephemeral_gb=0)
instance_topology = self._instance_topology(1)
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD),
self.tracker.compute_node.memory_mb_used)
self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']),
self.tracker.compute_node.local_gb_used)
self.assertEqual(2 * flavor['vcpus'],
self.tracker.compute_node.vcpus_used)
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_context_claim_with_exception(self, mock_save, mock_get):
instance = self._fake_instance_obj(memory_mb=1, root_gb=1,
ephemeral_gb=1)
try:
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
raise test.TestingException()
except test.TestingException:
pass
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_instance_context_claim(self, mock_get_all, mock_save, mock_get):
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=2, ephemeral_gb=3)
claim_topology = self._claim_topology(1)
instance_topology = self._instance_topology(1)
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node.local_gb_used)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
# after exiting claim context, build is marked as finished. usage
# totals should be same:
mock_get_all.return_value = [instance]
self.tracker.update_available_resource(self.context)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node.local_gb_used)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_load_stats_for_instance(self, mock_get):
instance = self._fake_instance_obj(task_state=task_states.SCHEDULING)
with mock.patch.object(instance, 'save'):
with self.tracker.instance_claim(self.context, instance):
pass
self.assertEqual(1, self.tracker.compute_node.current_workload)
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
instance['host'] = 'fakehost'
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node.current_workload)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_cpu_stats(self, mock_save, mock_get):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node.vcpus_used)
vcpus = 1
instance = self._fake_instance_obj(vcpus=vcpus)
# should not do anything until a claim is made:
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node.vcpus_used)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
# instance state can change without modifying vcpus in use:
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
add_vcpus = 10
vcpus += add_vcpus
instance = self._fake_instance_obj(vcpus=add_vcpus)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
vcpus -= add_vcpus
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
def test_skip_deleted_instances(self):
# ensure that the audit process skips instances that have vm_state
# DELETED, but the DB record is not yet deleted.
self._fake_instance_obj(vm_state=vm_states.DELETED, host=self.host)
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_deleted_instances_with_migrations(self, mock_migration_list):
migration = objects.Migration(context=self.context,
migration_type='resize',
instance_uuid='invalid')
mock_migration_list.return_value = [migration]
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_instances_with_live_migrations(self, mock_migration_list):
instance = self._fake_instance_obj()
migration = objects.Migration(context=self.context,
migration_type='live-migration',
instance_uuid=instance.uuid)
mock_migration_list.return_value = [migration]
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
@mock.patch('nova.compute.claims.Claim')
@mock.patch('nova.objects.Instance.save')
def test_claim_saves_numa_topology(self, mock_save, mock_claim):
def fake_save():
self.assertEqual(set(['numa_topology', 'host', 'node',
'launched_on']),
inst.obj_what_changed())
mock_save.side_effect = fake_save
inst = objects.Instance(host=None, node=None, memory_mb=1024)
inst.obj_reset_changes()
numa = objects.InstanceNUMATopology()
claim = mock.MagicMock()
claim.claimed_numa_topology = numa
mock_claim.return_value = claim
with mock.patch.object(self.tracker, '_update_usage_from_instance'):
self.tracker.instance_claim(self.context, inst)
mock_save.assert_called_once_with()
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_sets_instance_host_and_node(self, mock_get):
instance = self._fake_instance_obj()
self.assertIsNone(instance['host'])
self.assertIsNone(instance['launched_on'])
self.assertIsNone(instance['node'])
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertNotEqual(0, claim.memory_mb)
self.assertEqual('fakehost', instance['host'])
self.assertEqual('fakehost', instance['launched_on'])
self.assertEqual('fakenode', instance['node'])
class _MoveClaimTestCase(BaseTrackerTestCase):
def setUp(self):
super(_MoveClaimTestCase, self).setUp()
self.instance = self._fake_instance_obj()
self.instance_type = self._fake_flavor_create()
self.claim_method = self.tracker._move_claim
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_abort(self, mock_get):
try:
with self.claim_method(self.context, self.instance,
self.instance_type, limits=self.limits):
raise test.TestingException("abort")
except test.TestingException:
pass
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get):
limits = self._limits(
2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
2 * FAKE_VIRT_LOCAL_GB,
2 * FAKE_VIRT_VCPUS)
self.claim_method(
self.context, self.instance, self.instance_type, limits=limits)
instance2 = self._fake_instance_obj()
self.claim_method(
self.context, instance2, self.instance_type, limits=limits)
self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_revert(self, mock_get):
self.claim_method(
self.context, self.instance, self.instance_type,
image_meta={}, limits=self.limits)
self.tracker.drop_move_claim(self.context, self.instance)
self.assertEqual(0, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_move_type_not_tracked(self, mock_get):
self.claim_method(self.context, self.instance,
self.instance_type, limits=self.limits, move_type="evacuation")
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
@mock.patch.object(objects.Migration, 'save')
def test_existing_migration(self, save_mock):
migration = objects.Migration(self.context,
instance_uuid=self.instance.uuid,
status='accepted',
migration_type='evacuation')
self.claim_method(self.context, self.instance, self.instance_type,
migration=migration)
self.assertEqual(self.tracker.host, migration.dest_compute)
self.assertEqual(self.tracker.nodename, migration.dest_node)
self.assertEqual("pre-migrating", migration.status)
self.assertEqual(0, len(self.tracker.tracked_migrations))
save_mock.assert_called_once_with()
class ResizeClaimTestCase(_MoveClaimTestCase):
def setUp(self):
super(ResizeClaimTestCase, self).setUp()
self.claim_method = self.tracker.resize_claim
def test_move_type_not_tracked(self):
self.skipTest("Resize_claim does already sets the move_type.")
def test_existing_migration(self):
self.skipTest("Resize_claim does not support having existing "
"migration record.")
class OrphanTestCase(BaseTrackerTestCase):
def _driver(self):
class OrphanVirtDriver(FakeVirtDriver):
def get_per_instance_usage(self):
return {
'1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '1-2-3-4-5'},
'2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '2-3-4-5-6'},
}
return OrphanVirtDriver()
def test_usage(self):
self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
def test_find(self):
# create one legit instance and verify the 2 orphans remain
self._fake_instance_obj()
orphans = self.tracker._find_orphaned_instances()
self.assertEqual(2, len(orphans))
class ComputeMonitorTestCase(BaseTestCase):
def setUp(self):
super(ComputeMonitorTestCase, self).setUp()
self.tracker = self._tracker()
self.node_name = 'nodename'
self.user_id = 'fake'
self.project_id = 'fake'
self.info = {}
self.context = context.RequestContext(self.user_id,
self.project_id)
def test_get_host_metrics_none(self):
self.tracker.monitors = []
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertEqual(len(metrics), 0)
@mock.patch.object(resource_tracker.LOG, 'warning')
def test_get_host_metrics_exception(self, mock_LOG_warning):
monitor = mock.MagicMock()
monitor.add_metrics_to_list.side_effect = Exception
self.tracker.monitors = [monitor]
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_LOG_warning.assert_called_once_with(
u'Cannot get the metrics from %s.', mock.ANY)
self.assertEqual(0, len(metrics))
def test_get_host_metrics(self):
class FakeCPUMonitor(monitor_base.MonitorBase):
NOW_TS = timeutils.utcnow()
def __init__(self, *args):
super(FakeCPUMonitor, self).__init__(*args)
self.source = 'FakeCPUMonitor'
def get_metric_names(self):
return set(["cpu.frequency"])
def get_metric(self, name):
return 100, self.NOW_TS
self.tracker.monitors = [FakeCPUMonitor(None)]
mock_notifier = mock.Mock()
with mock.patch.object(rpc, 'get_notifier',
return_value=mock_notifier) as mock_get:
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_get.assert_called_once_with(service='compute',
host=self.node_name)
expected_metrics = [
{
'timestamp': timeutils.strtime(
FakeCPUMonitor.NOW_TS),
'name': 'cpu.frequency',
'value': 100,
'source': 'FakeCPUMonitor'
},
]
payload = {
'metrics': expected_metrics,
'host': self.tracker.host,
'host_ip': CONF.my_ip,
'nodename': self.node_name
}
mock_notifier.info.assert_called_once_with(
self.context, 'compute.metrics.update', payload)
self.assertEqual(metrics, expected_metrics)
class TrackerPeriodicTestCase(BaseTrackerTestCase):
def test_periodic_status_update(self):
# verify update called on instantiation
self.assertEqual(1, self.update_call_count)
# verify update not called if no change to resources
self.tracker.update_available_resource(self.context)
self.assertEqual(1, self.update_call_count)
# verify update is called when resources change
driver = self.tracker.driver
driver.memory_mb += 1
self.tracker.update_available_resource(self.context)
self.assertEqual(2, self.update_call_count)
def test_update_available_resource_calls_locked_inner(self):
@mock.patch.object(self.tracker, 'driver')
@mock.patch.object(self.tracker,
'_update_available_resource')
@mock.patch.object(self.tracker, '_verify_resources')
@mock.patch.object(self.tracker, '_report_hypervisor_resource_view')
def _test(mock_rhrv, mock_vr, mock_uar, mock_driver):
resources = {'there is someone in my head': 'but it\'s not me'}
mock_driver.get_available_resource.return_value = resources
self.tracker.update_available_resource(self.context)
mock_uar.assert_called_once_with(self.context, resources)
_test()
class StatsDictTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a dictionary.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS)
def test_virt_stats(self):
# start with virt driver stats
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
# adding an instance should keep virt driver stats
self._fake_instance_obj(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
# compute node stats are coerced to strings
expected_stats = copy.deepcopy(FAKE_VIRT_STATS_COERCED)
for k, v in self.tracker.stats.iteritems():
expected_stats[k] = six.text_type(v)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
class StatsJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a json string.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS_JSON)
def test_virt_stats(self):
# start with virt driver stats
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
# adding an instance should keep virt driver stats
# and add rt stats
self._fake_instance_obj(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
# compute node stats are coerced to strings
expected_stats = copy.deepcopy(FAKE_VIRT_STATS_COERCED)
for k, v in self.tracker.stats.iteritems():
expected_stats[k] = six.text_type(v)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
class StatsInvalidJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats='this is not json')
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for string that does not parse as json
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
class StatsInvalidTypeTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats=10)
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for incorrect stats value type
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
| isyippee/nova | nova/tests/unit/compute/test_resource_tracker.py | Python | apache-2.0 | 55,312 | [
"exciting"
] | 2ead65a9f3beb73d887f997f8baa70407a284e12603175633ae04f0ba0ce1593 |
"""
PRMS Vegetation Change Modeling API
Date: Feb 25 2016
"""
import datetime
import json
import math
import netCDF4
import os
from flask import jsonify, request, Response
from flask import current_app as app
from urllib import urlretrieve
from uuid import uuid4
from . import api
from ..models import Scenario, Hydrograph, Inputs, Outputs
from util import get_veg_map_by_hru, model_run_name
@api.route('/api/scenarios/<scenario_id>', methods=['GET', 'DELETE'])
def scenario_by_id(scenario_id):
"""
Look up or delete a scenario by its id
"""
if request.method == 'GET':
scenario = Scenario.objects(id=scenario_id).first()
if scenario:
return jsonify(scenario=scenario.to_json())
else:
return Response(
json.dumps(
{'message': 'no scenario id found! ' +
'currently the scenario id must be 1 or 0!'}
), 400, mimetype='application/json'
)
if request.method == 'DELETE':
scenario = Scenario.objects(id=scenario_id).first()
if scenario:
try:
scenario.delete()
return jsonify(
message='scenario with id ' + scenario_id + ' removed!'
)
except:
return Response(
json.dumps(
{'message': 'error deleting scenario ' + scenario_id}
), 400, mimetype='application/json'
)
else:
return Response(
json.dumps(
{'message': 'scenario_id' + scenario_id + 'not found'}
), 400, mimetype='application/json'
)
@api.route('/api/scenarios/finished_modelruns')
def display_modelruns():
temp_list = model_run_name(
auth_host=app.config['AUTH_HOST'],
model_host=app.config['MODEL_HOST'],
app_username=app.config['APP_USERNAME'],
app_password=app.config['APP_PASSWORD']
)
return temp_list
@api.route('/api/scenarios', methods=['GET', 'POST'])
def scenarios():
"""
Handle get and push requests for list of all finished scenarios and submit
a new scenario, respectively.
"""
if request.method == 'GET':
scenarios = Scenario.objects
# this is for the first three scenarios only
if app.config['DEBUG'] and len(scenarios) < 3:
for loop_counter in range(3):
_init_dev_db(app.config['BASE_PARAMETER_NC'], loop_counter)
scenarios = Scenario.objects
return jsonify(scenarios=scenarios)
else:
BASE_PARAMETER_NC = app.config['BASE_PARAMETER_NC']
# assemble parts of a new scenario record
vegmap_json = request.json['veg_map_by_hru']
name = request.json['name']
time_received = datetime.datetime.now()
new_scenario = Scenario(
name=name,
time_received=time_received,
)
scenario_runner = new_scenario.initialize_runner(BASE_PARAMETER_NC)
# using vegmap sent by client, update coverage type variables
scenario_runner.update_cov_type(vegmap_json['bare_ground'], 0)
scenario_runner.update_cov_type(vegmap_json['grasses'], 1)
scenario_runner.update_cov_type(vegmap_json['shrubs'], 2)
scenario_runner.update_cov_type(vegmap_json['trees'], 3)
scenario_runner.update_cov_type(vegmap_json['conifers'], 4)
# commit changes to coverage type in preparation for scenario run
scenario_runner.finalize_run()
# now that changes to scenario_file are committed, we update Scenario
new_scenario.veg_map_by_hru =\
get_veg_map_by_hru(scenario_runner.scenario_file)
new_scenario.save()
modelserver_run = scenario_runner.run(
auth_host=app.config['AUTH_HOST'],
model_host=app.config['MODEL_HOST'],
app_username=app.config['APP_USERNAME'],
app_password=app.config['APP_PASSWORD']
)
time_finished = datetime.datetime.now()
new_scenario.time_finished = time_finished
# extract URLs pointing to input/output resources stored on modelserver
resources = modelserver_run.resources
control =\
filter(lambda x: 'control' == x.resource_type, resources
).pop().resource_url
parameter =\
filter(lambda x: 'param' == x.resource_type, resources
).pop().resource_url
data =\
filter(lambda x: 'data' == x.resource_type, resources
).pop().resource_url
inputs = Inputs(control=control, parameter=parameter, data=data)
new_scenario.inputs = inputs
statsvar =\
filter(lambda x: 'statsvar' == x.resource_type, resources
).pop().resource_url
outputs = Outputs(statsvar=statsvar)
new_scenario.outputs = outputs
# extract hydrograph from the statsvar file and add to Scenario
if not os.path.isdir('.tmp'):
os.mkdir('.tmp')
tmp_statsvar = os.path.join('.tmp', 'statsvar-' + str(uuid4()))
urlretrieve(statsvar, tmp_statsvar)
d = netCDF4.Dataset(tmp_statsvar, 'r')
cfs = d['basin_cfs_1'][:]
t = d.variables['time']
# need to subtract 1...bug in generation of statsvar b/c t starts at 1
dates = netCDF4.num2date(t[:] - 1, t.units)
hydrograph = Hydrograph(time_array=dates, streamflow_array=cfs)
new_scenario.hydrograph = hydrograph
new_scenario.save()
# clean up temporary statsvar netCDF
d.close()
os.remove(tmp_statsvar)
return jsonify(scenario=new_scenario.to_json())
@api.route('/api/base-veg-map', methods=['GET'])
def hru_veg_json():
if request.method == 'GET':
"""generate json file from netcdf file"""
BASE_PARAMETER_NC = app.config['BASE_PARAMETER_NC']
return jsonify(
**json.loads(get_veg_map_by_hru(BASE_PARAMETER_NC).to_json())
)
def _init_dev_db(BASE_PARAMETER_NC, scenario_num=0):
"""
"""
name = 'Demo development scenario ' + str(scenario_num)
time_received = datetime.datetime.now()
updated_veg_map_by_hru = get_veg_map_by_hru(BASE_PARAMETER_NC)
time_finished = datetime.datetime.now()
inputs = Inputs()
outputs = Outputs()
# create two water years of fake data starting from 1 Oct 2010
begin_date = datetime.datetime(2010, 10, 1, 0)
time_array = [begin_date + datetime.timedelta(days=x) for x in
range(365*2)]
# use simple exponentials as the prototype data
x = range(365)
streamflow_array = [
pow(math.e, -pow(((i - 200.0 + 50*scenario_num)/100.0), 2))
for i in x
]
hydrograph = Hydrograph(
time_array=time_array,
streamflow_array=streamflow_array+streamflow_array
)
new_scenario = Scenario(
name=name,
time_received=time_received,
time_finished=time_finished,
veg_map_by_hru=updated_veg_map_by_hru,
inputs=inputs,
outputs=outputs,
hydrograph=hydrograph
)
new_scenario.save()
| VirtualWatershed/prms-vegetation-scenarios | app/api/views.py | Python | bsd-3-clause | 7,327 | [
"NetCDF"
] | f11c209226263e1890773ed93297b1fddd2e7622b1ce8a49e2d28748c1ae1fc3 |
from __future__ import print_function
import matplotlib
import numpy as np
import copy
import re
import warnings
from astropy import log
from astropy import units as u
from astropy.extern.six.moves import xrange
from astropy.extern.six import string_types
from ..config import mycfg
from ..config import ConfigDescriptor as cfgdec
from . import units
from . import models
from ..specwarnings import warn
from . import interactive
from . import history
from . import widgets
class Registry(object):
"""
This class is a simple wrapper to prevent fitter properties from being globals
"""
def __init__(self):
self.npars = {}
self.multifitters = {}
#to delete
self.peakbgfitters = {}
self.fitkeys = {}
self.associatedkeys = {}
self._interactive_help_message_root = """
'?' will print this help message again. The keys denoted by surrounding / / are
mnemonics.
1. Left-click or hit 'p' (/p/ick) with the cursor over the plot at both of the
two desired X-values to select a fitting range. You can e/x/clude parts of the
spectrum by hitting 'x' at two positions.
2. Then /m/iddle-click or hit 'm' twice to select (/m/ark) a peak and width -
the first mark should be on the peak of the line, the second should be at the
approximate half-max point on the curve.
3. When you're done, right-click or hit 'd' to perform the fit and disconnect
the mouse and keyboard (/d/isconnect because you're /d/one). Any time before
you're /d/one, you can select a different fitter (see below).
To /c/ancel or /c/lear all connections, press 'c'
'?' : get help (this message)
'c' : cancel / clear
'p','1' : pick / selection region for fitting
'm','2' : mark / identify a peak
'd','3' : done / do the fit, then disconnect the fitter
'i' : individual components / show each fitted component
You can select different fitters to use with the interactive fitting routine.
The default is gaussian ('g'), all options are listed below:
"""
self._make_interactive_help_message()
def __copy__(self):
# http://stackoverflow.com/questions/1500718/what-is-the-right-way-to-override-the-copy-deepcopy-operations-on-an-object-in-p
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
return result
def add_fitter(self, name, function, npars, override=False, key=None,
multisingle=None):
'''
Register a fitter function.
Parameters
----------
name: string
The fit function name.
function: function
The fitter function. Single-fitters should take npars + 1 input
parameters, where the +1 is for a 0th order baseline fit. They
should accept an X-axis and data and standard fitting-function
inputs (see, e.g., gaussfitter). Multi-fitters should take N *
npars, but should also operate on X-axis and data arguments.
npars: int
How many parameters does the function being fit accept?
Other Parameters
----------------
override: True | False
Whether to override any existing type if already present.
key: char
Key to select the fitter in interactive mode
'''
if multisingle is not None:
warn("The 'multisingle' keyword is no longer required.",
DeprecationWarning)
if not name in self.peakbgfitters or override:
self.peakbgfitters[name] = function
if not name in self.multifitters or override:
self.multifitters[name] = function
if key is not None:
self.fitkeys[key] = name
self._make_interactive_help_message()
self.npars[name] = npars
self.associated_keys = dict(zip(self.fitkeys.values(),self.fitkeys.keys()))
def _make_interactive_help_message(self):
"""
Generate the interactive help message from the fitkeys
"""
self.interactive_help_message = (
self._interactive_help_message_root +
"\n" +
"\n".join(["'%s' - select fitter %s" % (key,name) for key,name in self.fitkeys.items()]) +
"\n" # trailing \n so that users' input is on a fresh line
)
# Declare default registry built in for all spectra
default_Registry = Registry()
default_Registry.add_fitter('ammonia',models.ammonia_model(),6,key='a')
default_Registry.add_fitter('cold_ammonia',models.ammonia.cold_ammonia_model(),6)
default_Registry.add_fitter('ammonia_tau',models.ammonia_model_vtau(),6)
# not implemented default_Registry.add_fitter(Registry,'ammonia',models.ammonia_model( ),6, ,key='A')
default_Registry.add_fitter('formaldehyde',models.formaldehyde_fitter,3,key='F') # CAN'T USE f! reserved for fitting
# do'nt override default_Registry.add_fitter('formaldehyde',models.formaldehyde_vheight_fitter,3)
default_Registry.add_fitter('gaussian',models.gaussian_fitter(),3,key='g')
default_Registry.add_fitter('vheightgaussian',models.gaussian_vheight_fitter(),4)
default_Registry.add_fitter('voigt',models.voigt_fitter(),4,key='v')
default_Registry.add_fitter('lorentzian',models.lorentzian_fitter(),3,key='L')
#default_Registry.add_fitter('hill5',models.hill5infall.hill5_fitter,5)
#default_Registry.add_fitter('hcn',models.hcn.hcn_vtau_fitter,4)
class Specfit(interactive.Interactive):
def __init__(self, Spectrum, Registry=None):
super(Specfit, self).__init__(Spectrum,
interactive_help_message=Registry.interactive_help_message)
self.model = None
self.parinfo = None
self.modelpars = None
self.modelerrs = None
self.modelplot = []
self.modelcomponents = None
self._plotted_components = []
self.npeaks = 0
#self.nclicks_b1 = 0
#self.nclicks_b2 = 0
#self.xmin = 0
#self.xmax = Spectrum.data.shape[0]
self.button2action = self.guesspeakwidth
self.guesses = []
self.click = 0
self.fitkwargs = {}
self.auto = False
self.fitleg=None
self.residuals=None
self.setfitspec()
self.fittype = 'gaussian'
self.measurements = None
self.vheight=False # vheight must be a boolean, can't be none
self._component_kwargs = {}
self.Registry = Registry
self.autoannotate = mycfg['autoannotate']
self.EQW_plots = []
#self.seterrspec()
@cfgdec
def __call__(self, interactive=False, usemoments=True,
clear_all_connections=True, debug=False, guesses='moments',
parinfo=None, save=True, annotate=None, show_components=None,
use_lmfit=False, verbose=True, clear=True,
reset_selection=True,
fit_plotted_area=True, use_window_limits=None, vheight=None,
exclude=None, **kwargs):
"""
Fit model functions to a spectrum
Parameters
----------
interactive : boolean
The plotter window will go into interactive mode. See
self.interactive_help_message for details on how to use the
interactive fitter.
fittype : str
[passed to fitting codes; defaults to gaussian]
The model to use. Model must be registered in self.Registry.
gaussian, lorentzian, and voigt profiles are registered by default
guesses : list or 'moments'
A list of guesses. Guesses must have length = n*number of parameters
in model. Guesses are *required* for multifit fits (there is no
automated guessing for most models)
EXAMPLE: for single-fit gaussian
guesses = [height,amplitude,center,width]
for multi-fit gaussian, it is
[amplitude, center, width]
You can also pass the keyword string 'moments' to have the moments
be used to automatically determine the guesses for a *single* peak
parinfo : `pyspeckit.spectrum.parinfo.ParinfoList`
An alternative way to specify guesses. Supercedes guesses.
use_lmfit : boolean
If lmfit-py (https://github.com/newville/lmfit-py) is installed, you
can use it instead of the pure-python (but slow) mpfit.
reset_selection : boolean
Override any selections previously made using `fit_plotted_area` or
other keywords?
fit_plotted_area : boolean
If no other limits are specified, the plotter's xmin/xmax will be
used to define the fit region. Only respects the x-axis limits,
not the y-axis limits.
use_window_limits : boolean
If ``fit_plotted_area==True`` and no other limits are specified,
will use the displayed window area (as set by the zoom tools) as
the fitting range. Only respects the x-axis limits, not the y-axis
limits.
exclude : None or list
Passed to selectregion; specifies regions to exclude in xarr units
Plotter-related Parameters
--------------------------
annotate : None or boolean
If None, will use the default stored in self.annotate, otherwise
overwrites. Annotations will appear on the plot if a plotter
exists.
show_components : boolean
Show the individual components of a multi-component fit (defaults
to blue)
clear : boolean
Clear previous fitter plots before overplotting the fit?
Advanced Parameters
-------------------
clear_all_connections : boolean
Clear all of the interactive connections from a previous interactive
session (e.g., a baseline fitting session) before continuing?
usemoments : boolean
Use the moments of the spectrum as input guesses. Only works
for gaussian and gaussian-like models. Only works for single-fit
mode (not multifit)
DEPRECATED
debug : boolean
Print debug statements?
save : boolean
Save best fits in the FITS header as keywords? ONLY IMPLEMENTED
FOR GAUSSIANS
verbose : boolean
Print out extra stuff
vheight : None or boolean
if None, defaults to self.vheight, otherwise overrides
Determines whether a 0th order baseline will be fit along with the
line
"""
if clear:
self.clear()
if reset_selection:
self.selectregion(verbose=verbose, debug=debug,
fit_plotted_area=fit_plotted_area,
exclude=exclude,
use_window_limits=use_window_limits, **kwargs)
for arg in ['xmin','xmax','xtype','reset']:
if arg in kwargs:
kwargs.pop(arg)
if 'multifit' in kwargs:
kwargs.pop('multifit')
log.warning("The multifit keyword is no longer required. All fits "
"allow for multiple components.", DeprecationWarning)
if 'guess' in kwargs:
if guesses is None:
guesses = kwargs.pop('guess')
log.warning("The keyword 'guess' is nonstandard; please use 'guesses'")
else:
raise ValueError("Received keywords 'guess' and 'guesses'. "
"Please only use 'guesses'")
self.npeaks = 0
self.fitkwargs = kwargs
log.debug("Additional keyword arguments passed to fitter are: {0}"
.format(kwargs))
if interactive:
if self.Spectrum.plotter.axis is None:
raise Exception("Interactive fitting requires a plotter.")
# reset button count & guesses on every __call__
self.nclicks_b1 = 0
self.nclicks_b2 = 0
self.guesses = []
self.start_interactive(clear_all_connections=clear_all_connections,
reset_selection=True,
debug=debug, **kwargs)
elif (self.fittype in self.Registry.multifitters
or guesses is not None
or parinfo is not None):
if guesses is None and parinfo is None:
raise ValueError("You must input guesses when using multifit."
" Also, baseline (continuum fit) first!")
elif parinfo is not None:
self.guesses = parinfo.values
self.parinfo = parinfo
self.multifit(show_components=show_components, verbose=verbose,
debug=debug, use_lmfit=use_lmfit,
annotate=annotate, parinfo=parinfo,
guesses=None, **kwargs)
elif guesses is not None:
if isinstance(guesses, tuple):
guesses = list(guesses)
self.guesses = guesses
self.multifit(show_components=show_components, verbose=verbose,
debug=debug, use_lmfit=use_lmfit,
guesses=guesses, annotate=annotate, **kwargs)
else:
raise ValueError("Guess and parinfo were somehow invalid.")
else:
raise ValueError("Can't fit with given fittype {0}:"
" it is not Registered as a fitter.".format(self.fittype))
if save:
self.savefit()
def EQW(self, plot=False, plotcolor='g', fitted=True, continuum=None,
components=False, annotate=False, alpha=0.5, loc='lower left',
xmin=None, xmax=None, xunits='pixel', continuum_as_baseline=False,
midpt_location='plot-center'):
"""
Returns the equivalent width (integral of "baseline" or "continuum"
minus the spectrum) over the selected range
(the selected range defaults to self.xmin:self.xmax, so it may include
multiple lines!)
Parameters
----------
plot : bool
Plots a box indicating the EQW if plot==True (i.e., it will have a
width equal to the equivalent width, and a height equal to the
measured continuum)
fitted : bool
Use the fitted model? If false, uses the data
continuum : None or float
Can specify a fixed continuum with this keyword, otherwise will use
the fitted baseline. WARNING: continuum=0 will still "work", but
will give numerically invalid results. Similarly, a negative continuum
will work, but will yield results with questionable physical meaning.
continuum_as_baseline : bool
Replace the baseline with the specified continuum when computing
the absorption depth of the line
components : bool
If your fit is multi-component, will attempt to acquire centroids
for each component and print out individual EQWs
xmin : float
xmax : float
The range over which to compute the EQW
xunits : str
The units of xmin/xmax
midpt_location : 'fitted', 'plot-center'
If 'plot' is set, this determines where the EQW will be drawn. It
can be the fitted centroid or the plot-center, i.e. (xmin+xmax)/2
Returns
-------
Equivalent Width, or widths if components=True
"""
if continuum is not None:
# if continuum is specified, don't bother with checks
if np.median(self.Spectrum.baseline.basespec) == 0:
raise ValueError("Baseline / continuum is zero: equivalent width is undefined.")
elif np.median(self.Spectrum.baseline.basespec) < 0:
if mycfg.WARN: warn( "WARNING: Baseline / continuum is negative: equivalent width is poorly defined." )
# determine range to use
if xmin is None:
xmin = self.xmin #self.Spectrum.xarr.x_to_pix(self.xmin)
else:
xmin = self.Spectrum.xarr.x_to_pix(xmin, xval_units=xunits)
if xmax is None:
xmax = self.xmax #self.Spectrum.xarr.x_to_pix(self.xmax)
else:
xmax = self.Spectrum.xarr.x_to_pix(xmax, xval_units=xunits)
dx = np.abs(self.Spectrum.xarr[xmin:xmax].cdelt(approx=True).value)
log.debug("xmin={0} xmax={1} dx={2} continuum={3}"
.format(xmin, xmax, dx, continuum))
if components:
centroids = self.fitter.analytic_centroids()
integrals = self.fitter.component_integrals(self.Spectrum.xarr[xmin:xmax],dx=dx)
eqw = []
for cen,integ in zip(centroids,integrals):
center_pix = self.Spectrum.xarr.x_to_pix(cen)
if continuum is None:
continuum = self.Spectrum.baseline.basespec[center_pix]
elif continuum_as_baseline:
integrals[-1] += -(self.Spectrum.baseline.basespec[xmin:xmax] - continuum).sum() * dx
eqw.append(-integ / continuum)
if plot:
plot = False
if mycfg.WARN:
warn("Cannot plot multiple Equivalent Widths")
elif fitted:
model = self.get_model(self.Spectrum.xarr[xmin:xmax],
add_baseline=False)
# EQW is positive for absorption lines
# fitted components are assume to be continuum-subtracted
integral = (-model).sum() * dx
if continuum is None:
# centroid in data units
# (may fail if model has pos + neg values)
center = (model*self.Spectrum.xarr[xmin:xmax]).sum()/model.sum()
center_pix = self.Spectrum.xarr.x_to_pix(center)
continuum = self.Spectrum.baseline.basespec[center_pix]
elif continuum_as_baseline:
integral += -(self.Spectrum.baseline.basespec[xmin:xmax] - continuum).sum() * dx
eqw = integral / continuum
else:
if continuum_as_baseline:
diffspec = (continuum - self.Spectrum.data)
elif self.Spectrum.baseline.subtracted is False:
diffspec = (self.Spectrum.baseline.basespec - self.Spectrum.data)
else:
diffspec = -self.Spectrum.data
sumofspec = diffspec[xmin:xmax].sum() * dx
if continuum is None:
continuum = np.median(self.Spectrum.baseline.basespec)
eqw = sumofspec / continuum
if plot and self.Spectrum.plotter.axis:
if midpt_location == 'plot-center':
midpt_pixel = int(np.round((xmin+xmax)/2.0))
midpt = self.Spectrum.xarr[midpt_pixel].value
elif midpt_location == 'fitted':
try:
shifts = [self.Spectrum.specfit.parinfo[x].value
for x in self.Spectrum.specfit.parinfo.keys()
if 'SHIFT' in x]
except AttributeError:
raise AttributeError("Can only specify midpt_location="
"fitted if there is a SHIFT parameter"
"for the fitted model")
# We choose to display the eqw fit at the center of the fitted
# line set, closest to the passed window.
# Note that this has the potential to show a eqw "rectangle"
# centered on a fitted line other than the one measured for the
# eqw call, if there are more than one fitted lines within the
# window.
midpt_pixel = int((xmin+xmax)/2)
midval = self.Spectrum.xarr[midpt_pixel].value
midpt_index = np.argmin(np.abs(shifts-midval))
midpt = shifts[midpt_index]
midpt_pixel = self.Spectrum.xarr.x_to_pix(midpt)
else:
raise ValueError("midpt_location must be 'plot-center' or "
"fitted")
if continuum_as_baseline:
midpt_level = continuum
else:
midpt_level = self.Spectrum.baseline.basespec[midpt_pixel]
log.debug("EQW plotting: midpt={0}, midpt_pixel={1}, "
"midpt_level={2}, eqw={3}".format(midpt, midpt_pixel,
midpt_level, eqw))
self.EQW_plots.append(self.Spectrum.plotter.axis.fill_between(
[midpt-eqw/2.0,midpt+eqw/2.0], [0,0],
[midpt_level,midpt_level], color=plotcolor, alpha=alpha,
label='EQW: %0.3g' % eqw))
if annotate:
self.Spectrum.plotter.axis.legend(
[(matplotlib.collections.CircleCollection([0],facecolors=[plotcolor],edgecolors=[plotcolor]))],
[('EQW: %0.3g' % eqw)],
markerscale=0.01, borderpad=0.1, handlelength=0.1,
handletextpad=0.1, loc=loc)
if self.Spectrum.plotter.autorefresh:
self.Spectrum.plotter.refresh()
if hasattr(self.Spectrum,'header'):
history.write_history(self.Spectrum.header, "EQW for %s: %s" %
(self.fittype,eqw))
return eqw
def register_fitter(self,*args,**kwargs):
"""
Register a model fitter
"""
self.Registry.add_fitter(*args,**kwargs)
register_fitter.__doc__ += Registry.add_fitter.__doc__
def seterrspec(self, usestd=None, useresiduals=True):
"""
Simple wrapper function to set the error spectrum; will either use the
input spectrum or determine the error using the RMS of the residuals,
depending on whether the residuals exist.
"""
if (self.Spectrum.error is not None) and not usestd:
if (self.Spectrum.error == 0).all():
if self.residuals is not None and useresiduals:
residuals_std = self.residuals.std()
if residuals_std == 0:
self.errspec = np.ones(self.spectofit.shape[0])
warnings.warn("Residuals have 0 standard deviation. "
"That's probably too good to be true.")
else:
self.errspec = np.ones(self.spectofit.shape[0]) * residuals_std
elif type(self.Spectrum.error) is np.ma.masked_array:
# force errspec to be a non-masked array of ones
self.errspec = self.Spectrum.error.data + 1
else:
self.errspec = self.Spectrum.error + 1
else:
# this is the default behavior if spectrum.error is set
self.errspec = self.Spectrum.error.copy()
elif self.residuals is not None and useresiduals:
self.errspec = np.ones(self.spectofit.shape[0]) * self.residuals.std()
else:
self.errspec = np.ones(self.spectofit.shape[0]) * self.spectofit.std()
def setfitspec(self):
"""
Set the spectrum that will be fit. This is primarily to remove NANs
from consideration: if you simply remove the data from both the X-axis
and the Y-axis, it will not be considered for the fit, and a linear
X-axis is not needed for fitting.
However, it may be possible to do this using masked arrays instead of
setting errors to be 1e10....
"""
if self.Spectrum.data.sum() is np.ma.masked:
self.spectofit = np.zeros_like(self.Spectrum.data)
self.errspec = np.zeros_like(self.Spectrum.data)
self._valid = False
return
# see https://github.com/numpy/numpy/issues/3474
self.spectofit = np.ma.copy(self.Spectrum.data)
if hasattr(self.Spectrum.data, 'mask') and hasattr(self.spectofit,
'mask'):
assert np.all(self.Spectrum.data.mask == self.spectofit.mask)
self._valid = True
if hasattr(self.Spectrum,'baseline'):
if ((not self.Spectrum.baseline.subtracted and
self.Spectrum.baseline.basespec is not None and
len(self.spectofit) == len(self.Spectrum.baseline.basespec))):
self.spectofit -= self.Spectrum.baseline.basespec
OKmask = (self.spectofit==self.spectofit)
with warnings.catch_warnings():
# catch a specific np1.7 futurewarning relating to masks
warnings.simplefilter("ignore")
self.spectofit[~OKmask] = 0
self.seterrspec()
self.errspec[~OKmask] = 1e10
if self.includemask is not None and (self.includemask.shape == self.errspec.shape):
self.errspec[~self.includemask] = 1e10*self.errspec.max()
@property
def mask(self):
""" Mask: True means "exclude" """
if ((hasattr(self.spectofit, 'mask') and
self.spectofit.shape==self.spectofit.mask.shape)):
mask = self.spectofit.mask
else:
mask = np.zeros_like(self.spectofit, dtype='bool')
return mask
@property
def mask_sliced(self):
""" Sliced (subset) Mask: True means "exclude" """
return self.mask[self.xmin:self.xmax]
def multifit(self, fittype=None, renormalize='auto', annotate=None,
show_components=None, verbose=True, color=None,
guesses=None, parinfo=None, reset_fitspec=True,
use_window_limits=None, use_lmfit=False, plot=True, **kwargs):
"""
Fit multiple gaussians (or other profiles)
Parameters
----------
fittype : str
What function will be fit? fittype must have been Registryed in the
peakbgfitters dict. Uses default ('gaussian') if not specified
renormalize : 'auto' or bool
if 'auto' or True, will attempt to rescale small data (<1e-9) to be
closer to 1 (scales by the median) so that the fit converges better
parinfo : `~parinfo` structure
Guess structure; supercedes ``guesses``
guesses : list or 'moments'
Either a list of guesses matching the number of parameters * the
number of peaks for the model, or 'moments' to fit a single
spectrum with the moments as guesses
"""
if reset_fitspec:
self.setfitspec()
if not self._valid:
raise ValueError("Data are invalid; cannot be fit.")
#if self.fitkwargs.has_key('negamp'): self.fitkwargs.pop('negamp') # We now do this in gaussfitter.py
if fittype is not None:
self.fittype = fittype
bad_kws = ['fittype','plot']
for kw in bad_kws:
if kw in self.fitkwargs:
del self.fitkwargs[kw]
if guesses is not None and parinfo is not None:
raise ValueError("Both guesses and parinfo were specified, "
"but only one of these is allowed.")
if guesses is None:
if parinfo is not None:
guesses = list(parinfo.values)
else:
guesses = list(self.guesses)
elif isinstance(guesses, string_types) and guesses in ('moment', 'moments'):
guesses = self.moments(vheight=False, **kwargs)
else:
guesses = list(guesses) # needs to be mutable, but needs to be a copy!!
if len(guesses) < self.Registry.npars[self.fittype]:
raise ValueError("Too few parameters input. Need at least %i for %s models" % (self.Registry.npars[self.fittype],self.fittype))
self.npeaks = len(guesses)/self.Registry.npars[self.fittype]
self.fitter = self.Registry.multifitters[self.fittype]
self.vheight = False
if self.fitter.vheight:
# Need to reset the parinfo if vheight has previously been set,
# otherwise npars will disagree, which causes problems if
# renormalization happens
self.fitter.vheight = False
self.fitter.npeaks = self.npeaks
self.fitter._make_parinfo(npeaks=self.npeaks)
# add kwargs to fitkwargs
self.fitkwargs.update(kwargs)
if 'renormalize' in self.fitkwargs:
del self.fitkwargs['renormalize']
# if parinfo was specified, we use it and ignore guesses otherwise, we
# make a parinfo so we can test 'scaleable' below
if parinfo is not None:
pinf_for_scaling = parinfo
else:
pinf_for_scaling, _ = self.fitter._make_parinfo(parvalues=guesses,
npeaks=self.npeaks,
**self.fitkwargs)
scalefactor = 1.0
if renormalize in ('auto',True):
datarange = np.nanmax(self.spectofit[self.xmin:self.xmax]) - np.nanmin(self.spectofit[self.xmin:self.xmax])
if abs(datarange) < 1e-9:
scalefactor = np.nanmedian(np.abs(self.spectofit))
if not np.isfinite(scalefactor):
raise ValueError("non-finite scalefactor = {0} encountered.".format(scalefactor))
elif scalefactor == 0:
raise ValueError("scalefactor = {0} encountered, which will result "
"in divide-by-zero errors".format(scalefactor))
log.info("Renormalizing data by factor %e to improve fitting procedure"
% scalefactor)
self.spectofit /= scalefactor
self.errspec /= scalefactor
# this error should be unreachable, but is included as a sanity check
if self.fitter.npeaks * self.fitter.npars != len(pinf_for_scaling):
raise ValueError("Length of parinfo doesn't agree with "
" npeaks * npars = {0}"
.format(self.fitter.npeaks *
self.fitter.npars))
if len(guesses) != len(pinf_for_scaling):
raise ValueError("Length of parinfo doens't match length of guesses")
# zip guesses with parinfo: truncates parinfo if len(parinfo) > len(guesses)
# actually not sure how/when/if this should happen; this might be a bad hack
# revisit with tests!!
for jj,(guess,par) in enumerate(zip(guesses,pinf_for_scaling)):
if par.scaleable:
guesses[jj] /= scalefactor
# if parinfo was passed in, this will change it
# if it was not, it will change only the placeholder
# (becuase we are passing by reference above)
par.value /= scalefactor
par.limits = [lim / scalefactor for lim in par.limits]
log.debug("Rescaled guesses to {0}".format(guesses))
# all fit data must be float64, otherwise the optimizers may take steps
# less than the precision of the data and get stuck
xtofit = self.Spectrum.xarr[self.xmin:self.xmax][~self.mask_sliced].astype('float64')
spectofit = self.spectofit[self.xmin:self.xmax][~self.mask_sliced].astype('float64')
err = self.errspec[self.xmin:self.xmax][~self.mask_sliced].astype('float64')
if np.all(err == 0):
raise ValueError("Errors are all zero. This should not occur and "
"is a bug. (if you set the errors to all zero, "
"they should be overridden and set to 1)")
if parinfo is not None:
self._validate_parinfo(parinfo, mode='fix')
else:
pinf, _ = self.fitter._make_parinfo(parvalues=guesses,
npeaks=self.npeaks,
**self.fitkwargs)
new_guesses = self._validate_parinfo(pinf, 'guesses')
if any((x!=y) for x,y in zip(guesses, new_guesses)):
warn("Guesses have been changed from {0} to {1}"
.format(guesses, new_guesses))
guesses = new_guesses
mpp,model,mpperr,chi2 = self.fitter(xtofit, spectofit, err=err,
npeaks=self.npeaks,
parinfo=parinfo, # the user MUST be allowed to override parinfo.
params=guesses,
use_lmfit=use_lmfit,
**self.fitkwargs)
any_out_of_range = self._validate_parinfo(self.fitter.parinfo, mode='check')
if any(any_out_of_range):
warn("The fitter returned values that are outside the "
"parameter limits. DEBUG INFO: {0}".format(any_out_of_range))
self.spectofit *= scalefactor
self.errspec *= scalefactor
if hasattr(self.fitter.mp,'status'):
self.mpfit_status = models.mpfit_messages[self.fitter.mp.status]
if model is None:
raise ValueError("Model was not set by fitter. Examine your fitter.")
self.chi2 = chi2
self.model = model * scalefactor
self.parinfo = self.fitter.parinfo
self.dof = (self.includemask.sum() - self.mask.sum() - self.npeaks *
self.Registry.npars[self.fittype] +
np.sum(self.parinfo.fixed))
# rescale any scaleable parameters
for par in self.parinfo:
if par.scaleable:
par.value *= scalefactor
if par.error is not None:
par.error *= scalefactor
if par.limits is not None:
par.limits = [lim*scalefactor for lim in par.limits]
self.modelpars = self.parinfo.values
self.modelerrs = self.parinfo.errors
self.residuals = spectofit - self.model
if self.Spectrum.plotter.axis is not None and plot:
if color is not None:
kwargs.update({'composite_fit_color':color})
self.plot_fit(annotate=annotate,
show_components=show_components,
use_window_limits=use_window_limits,
**kwargs)
# Re-organize modelerrs so that any parameters that are tied to others inherit the errors of the params they are tied to
if 'tied' in self.fitkwargs:
for ii, element in enumerate(self.fitkwargs['tied']):
if not element.strip():
continue
if '[' in element and ']' in element:
i1 = element.index('[') + 1
i2 = element.index(']')
loc = int(element[i1:i2])
else: # assume lmfit version
varnames = re.compile('([a-zA-Z][a-zA-Z_0-9]*)').search(element).groups()
if not varnames:
continue
elif len(varnames) > 1:
warn("The 'tied' parameter {0} is not simple enough for error propagation".format(element))
continue
else:
varname = varnames[0]
loc = self.parinfo.names.index(varname)
self.modelerrs[ii] = self.modelerrs[loc]
# make sure the full model is populated
self._full_model()
self.history_fitpars()
def refit(self, use_lmfit=False):
""" Redo a fit using the current parinfo as input """
return self.multifit(parinfo=self.parinfo, use_lmfit=use_lmfit,
reset_fitspec=False)
def history_fitpars(self):
if hasattr(self.Spectrum,'header'):
history.write_history(self.Spectrum.header, "SPECFIT: Fitted "
"profile of type %s" % (self.fittype))
history.write_history(self.Spectrum.header, "Chi^2: %g DOF: %i" %
(self.chi2, self.dof))
for par in self.parinfo:
history.write_history(self.Spectrum.header, str(par))
def peakbgfit(self, usemoments=True, annotate=None, vheight=True, height=0,
negamp=None, fittype=None, renormalize='auto', color=None,
use_lmfit=False, show_components=None, debug=False,
use_window_limits=True, guesses=None,
nsigcut_moments=None, plot=True, parinfo=None, **kwargs):
"""
Fit a single peak (plus a background)
Parameters
----------
usemoments : bool
The initial guess will be set by the fitter's 'moments' function
(this overrides 'guesses')
annotate : bool
Make a legend?
vheight : bool
Fit a (constant) background as well as a peak?
height : float
initial guess for background
negamp : bool
If True, assumes amplitude is negative. If False, assumes positive. If
None, can be either.
fittype : bool
What function will be fit? fittype must have been Registryed in the
peakbgfitters dict
renormalize : 'auto' or bool
if 'auto' or True, will attempt to rescale small data (<1e-9) to be
closer to 1 (scales by the median) so that the fit converges better
nsigcut_moments : bool
pass to moment guesser; can do a sigma cut for moment guessing
"""
self.npeaks = 1
self.auto = True
self.setfitspec()
if fittype is not None:
self.fittype=fittype
NP = self.Registry.peakbgfitters[self.fittype].default_npars
if guesses is not None:
log.debug("Using user-specified guesses.")
self.guesses = guesses
if len(guesses) != NP + vheight:
raise ValueError("Invalid guesses specified for single-fitter."
"Expected {0}, got {1}. Perhaps you should "
"use the multifitter (multifit=True)?"
.format(NP+vheight, len(guesses)))
elif usemoments: # this can be done within gaussfit but I want to save them
# use this INDEPENDENT of fittype for now (voigt and gauss get same guesses)
log.debug("Using moment-based guesses.")
moments_f = self.Registry.peakbgfitters[self.fittype].moments
self.guesses = moments_f(self.Spectrum.xarr[self.xmin:self.xmax],
self.spectofit[self.xmin:self.xmax],
vheight=vheight,
negamp=negamp,
nsigcut=nsigcut_moments,
**kwargs)
else:
if negamp:
self.guesses = [height,-1,0,1]
else:
self.guesses = [height,1,0,1]
# If we're fitting anything but a simple Gaussian, we need the length
# of guesses to be right so we pad with appended zeros
# BUT, if the guesses from the moments have the right number of
# parameters, we don't need to do this.
if NP > len(self.guesses):
for ii in xrange(len(self.guesses),NP):
self.guesses += [0.0]
self.fitter = self.Registry.peakbgfitters[self.fittype]
log.debug("n(guesses): %s Guesses: %s vheight: %s " %
(len(self.guesses),self.guesses,vheight))
scalefactor = 1.0
if renormalize in ('auto',True):
datarange = self.spectofit[self.xmin:self.xmax].max() - self.spectofit[self.xmin:self.xmax].min()
if abs(datarange) < 1e-9:
scalefactor = np.median(np.abs(self.spectofit))
log.info("Renormalizing data by factor %e to improve fitting procedure"
% scalefactor)
self.spectofit /= scalefactor
self.errspec /= scalefactor
self.guesses[0] /= scalefactor
if vheight: self.guesses[1] /= scalefactor
log.debug("Guesses before fit: {0}".format(self.guesses))
if 'debug' in self.fitkwargs:
debug = self.fitkwargs['debug']
del self.fitkwargs['debug']
mpp,model,mpperr,chi2 = self.fitter(
self.Spectrum.xarr[self.xmin:self.xmax],
self.spectofit[self.xmin:self.xmax],
err=self.errspec[self.xmin:self.xmax],
vheight=vheight,
params=self.guesses,
parinfo=parinfo,
debug=debug,
use_lmfit=use_lmfit,
**self.fitkwargs)
log.debug("1. Guesses, fits after: {0}, {1}".format(self.guesses, mpp))
self.spectofit *= scalefactor
self.errspec *= scalefactor
if hasattr(self.fitter.mp,'status'):
self.mpfit_status = models.mpfit_messages[self.fitter.mp.status]
self.parinfo = self.fitter.parinfo
if model is None:
raise ValueError("Model was not set by fitter. Examine your fitter.")
self.chi2 = chi2
self.dof = self.includemask.sum()-self.npeaks*self.Registry.npars[self.fittype]-vheight+np.sum(self.parinfo.fixed)
self.vheight=vheight
if vheight:
self.Spectrum.baseline.order = 0
self.Spectrum.baseline.baselinepars = [mpp[0]*scalefactor] # first item in list form
self.Spectrum.baseline.basespec = self.Spectrum.data*0 + mpp[0]*scalefactor
self.model = model*scalefactor - mpp[0]*scalefactor
# I removed this recently for some reason, but more code depends on it being in place
# Need to figure out *WHY* anything would want an extra parameter
if len(mpp) == self.fitter.npars+1:
mpp = mpp[1:]
else: self.model = model*scalefactor
self.residuals = self.spectofit[self.xmin:self.xmax] - self.model*scalefactor
self.modelpars = mpp
self.modelerrs = mpperr
# rescale any scaleable parameters
for par in self.parinfo:
if par.scaleable:
par.value = par.value * scalefactor
par.error = par.error * scalefactor
if self.Spectrum.plotter.axis is not None and plot:
if color is not None:
kwargs.update({'composite_fit_color':color})
self.plot_fit(annotate=annotate,
use_window_limits=use_window_limits,
show_components=show_components,
**kwargs)
# make sure the full model is populated
self._full_model(debug=debug)
log.debug("2. Guesses, fits after vheight removal: {0},{1}"
.format(self.guesses, mpp))
self.history_fitpars()
def _full_model(self, debug=False, **kwargs):
"""
Compute the model for the whole spectrum
"""
self.fullmodel = self.get_full_model(debug=debug,**kwargs)
self.fullresiduals = self.Spectrum.data - self.fullmodel
def get_full_model(self, debug=False,**kwargs):
""" compute the model over the full axis """
return self.get_model(self.Spectrum.xarr, debug=debug,**kwargs)
def get_model(self, xarr, pars=None, debug=False, add_baseline=None):
""" Compute the model over a given axis """
if pars is None:
return self.get_model_frompars(xarr=xarr, pars=self.parinfo,
add_baseline=add_baseline, debug=debug)
else:
return self.get_model_frompars(xarr=xarr, pars=pars,
add_baseline=add_baseline, debug=debug)
def get_model_frompars(self, xarr, pars, debug=False, add_baseline=None):
""" Compute the model over a given axis """
if ((add_baseline is None and (self.Spectrum.baseline.subtracted or self.vheight))
or add_baseline is False):
return self.fitter.n_modelfunc(pars,**self.fitter.modelfunc_kwargs)(xarr)
else:
return (self.fitter.n_modelfunc(pars,
**self.fitter.modelfunc_kwargs)(xarr)
+ self.Spectrum.baseline.get_model(np.arange(xarr.size)))
def plot_model(self, pars, offset=0.0, annotate=False, clear=False, **kwargs):
"""
Plot a model from specified input parameters
(see plot_fit for kwarg specification)
annotate is set to "false" because arbitrary annotations are not yet implemented
"""
# really, plot_fit should be thin on top of plot_model, but that's
# not how I wrote it, so it will have to wait for a refactor
if clear: self.clear()
return self.plot_fit(pars=pars, offset=offset, annotate=False, **kwargs)
#def assess_npeaks(self):
# """
# Attempt to determine whether any of the peaks are unnecessary
# """
# if self.npeaks <= 1:
# return
# npars = self.fitter.npars
# perpeakpars = [self.parinfo.values[ii*npars:(ii+1)*npars] for ii in
# range(self.npeaks)]
# parsets = [((x[0][0],x[1][0]),x[0][1]+x[1][1]) for x in
# itertools.combinations(perpeakpars, self.npeaks-1)]
# parsets = [x
# for y in itertools.combinations(perpeakpars, self.npeaks-1)
# for x in y]
# chi2_without = [(self.spectofit[self.xmin:self.xmax] -
# self.get_model_frompars(self.xarr, self.pars[ii*npars:
def plot_fit(self, xarr=None, annotate=None, show_components=None,
composite_fit_color='red', lw=0.5,
composite_lw=0.75, pars=None, offset=None,
use_window_limits=None, show_hyperfine_components=None,
plotkwargs={}, **kwargs):
"""
Plot the fit. Must have fitted something before calling this!
It will be automatically called whenever a spectrum is fit (assuming an
axis for plotting exists)
kwargs are passed to the fitter's components attribute
Parameters
----------
xarr : None
If none, will use the spectrum's xarr. Otherwise, plot the
specified xarr. This is useful if you want to plot a well-sampled
model when the input spectrum is undersampled
annotate : None or bool
Annotate the plot? If not specified, defaults to self.autoannotate
show_components : None or bool
show_hyperfine_components : None or bool
Show the individual gaussian components overlaid on the composite fit
use_window_limits : None or bool
If False, will reset the window to include the whole spectrum. If
True, leaves the window as is. Defaults to self.use_window_limits
if None.
pars : parinfo
A parinfo structure or list of model parameters. If none, uses
best-fit
offset : None or float
Y-offset. If none, uses the default self.Spectrum.plotter offset, otherwise,
uses the specified float.
"""
#if self.Spectrum.baseline.subtracted is False and self.Spectrum.baseline.basespec is not None:
# # don't display baseline if it's included in the fit
# plot_offset = self.Spectrum.plotter.offset+(self.Spectrum.baseline.basespec * (~self.vheight))
#else:
if offset is None:
plot_offset = self.Spectrum.plotter.offset
else:
plot_offset = offset
if xarr is None:
xarr = self.Spectrum.xarr
if pars is not None:
model = self.get_model_frompars(xarr, pars)
else:
self._full_model()
model = self.fullmodel
self.modelplot += self.Spectrum.plotter.axis.plot(xarr,
model + plot_offset,
color=composite_fit_color,
linewidth=lw,
**plotkwargs)
# Plot components
if show_components or show_hyperfine_components:
self.plot_components(xarr=xarr,
show_hyperfine_components=show_hyperfine_components,
pars=pars, plotkwargs=plotkwargs)
uwl = use_window_limits if use_window_limits is not None else self.use_window_limits
# plotter kwargs are kwargs for the Spectrum.plotter,
# whereas plotkwargs are for the matplotlib plot command
plotterkwargs = {}
plotterkwargs.update(self.Spectrum.plotter.plotkwargs)
plotterkwargs['use_window_limits'] = uwl
self.Spectrum.plotter.reset_limits(**plotterkwargs)
if self.Spectrum.plotter.autorefresh:
self.Spectrum.plotter.refresh()
if annotate or ((annotate is None) and self.autoannotate):
self.annotate()
if self.vheight: self.Spectrum.baseline.annotate()
def plot_components(self, xarr=None, show_hyperfine_components=None,
component_yoffset=0.0, component_lw=0.75, pars=None,
component_fit_color='blue', component_kwargs={},
add_baseline=False, plotkwargs={}, **kwargs):
"""
Overplot the individual components of a fit
Parameters
----------
xarr : None
If none, will use the spectrum's xarr. Otherwise, plot the
specified xarr. This is useful if you want to plot a well-sampled
model when the input spectrum is undersampled
show_hyperfine_components : None | bool
Keyword argument to pass to component codes; determines whether to return
individual (e.g., hyperfine) components of a composite model
component_yoffset : float
Vertical (y-direction) offset to add to the components when plotting
component_lw : float
Line width of component lines
component_fitcolor : color
Color of component lines
component_kwargs : dict
Keyword arguments to pass to the fitter.components method
add_baseline : bool
Add the fit to the components before plotting. Makes sense to use
if self.Spectrum.baseline.subtracted == False
pars : parinfo
A parinfo structure or list of model parameters. If none, uses
best-fit
"""
plot_offset = self.Spectrum.plotter.offset
if xarr is None:
xarr = self.Spectrum.xarr
if show_hyperfine_components is not None:
component_kwargs['return_hyperfine_components'] = show_hyperfine_components
self._component_kwargs = component_kwargs
if pars is None:
pars = self.modelpars
self.modelcomponents = self.fitter.components(xarr, pars, **component_kwargs)
yoffset = plot_offset + component_yoffset
if add_baseline:
yoffset += self.Spectrum.baseline.basespec
for data in self.modelcomponents:
# can have multidimensional components
if len(data.shape) > 1:
for d in data:
self._plotted_components += self.Spectrum.plotter.axis.plot(xarr,
d + yoffset,
color=component_fit_color, linewidth=component_lw, **plotkwargs)
else:
self._plotted_components += self.Spectrum.plotter.axis.plot(xarr,
data + yoffset,
color=component_fit_color, linewidth=component_lw, **plotkwargs)
def fullsizemodel(self):
"""
If the model was fit to a sub-region of the spectrum, expand it (with
zeros wherever the model was not defined) to fill the spectrum.
Examples
--------
>>> noise = np.random.randn(100)
>>> xarr = np.linspace(-50,50,100)
>>> signal = np.exp(-(xarr-5)**2/(2*3.**2))
>>> sp = pyspeckit.Spectrum(data=noise + signal, xarr=xarr, xarrkwargs={'units':'km/s'})
>>> sp.specfit(xmin=-25,xmax=25)
>>> sp.specfit.model.shape
(48,)
>>> sp.specfit.fullsizemodel()
>>> sp.specfit.model.shape
(100,)
"""
if self.model.shape != self.Spectrum.data.shape:
temp = np.zeros(self.Spectrum.data.shape)
temp[self.xmin:self.xmax] = self.model
self.model = temp
self.residuals = self.spectofit - self.model
self.selectregion(reset=True)
def plotresiduals(self, fig=2, axis=None, clear=True, color='k',
linewidth=0.5, drawstyle='steps-mid', yoffset=0.0,
label=True, pars=None, zeroline=None,
set_limits=True, **kwargs):
"""
Plot residuals of the fit. Specify a figure or
axis; defaults to figure(2).
Parameters
----------
fig : int
Figure number. Overridden by axis
axis : axis
The axis to plot on
pars : None or parlist
If set, the residuals will be computed for the input parameters
zeroline : bool or None
Plot the "zero" line through the center of the residuals. If None,
defaults to "True if yoffset!=0, False otherwise"
kwargs are passed to matplotlib plot
"""
self._full_model(pars=pars)
if axis is None:
if isinstance(fig,int):
fig=matplotlib.pyplot.figure(fig)
self.residualaxis = matplotlib.pyplot.gca()
if clear:
self.residualaxis.clear()
else:
self.residualaxis = axis
if clear:
self.residualaxis.clear()
self.residualplot = self.residualaxis.plot(self.Spectrum.xarr,
self.fullresiduals+yoffset,
drawstyle=drawstyle,
linewidth=linewidth,
color=color, **kwargs)
if zeroline or (zeroline is None and yoffset != 0):
self.residualplot += self.residualaxis.plot(self.Spectrum.xarr,
(np.zeros_like(self.Spectrum.xarr.value)+yoffset),
linestyle='--',
color='k',
alpha=0.5)
if set_limits:
if ((self.Spectrum.plotter.xmin is not None) and
(self.Spectrum.plotter.xmax is not None)):
self.residualaxis.set_xlim(self.Spectrum.plotter.xmin.value,
self.Spectrum.plotter.xmax.value)
if ((self.Spectrum.plotter.ymin is not None) and
(self.Spectrum.plotter.ymax is not None)):
self.residualaxis.set_ylim(self.Spectrum.plotter.ymin,
self.Spectrum.plotter.ymax)
if label:
self.residualaxis.set_xlabel(self.Spectrum.plotter.xlabel)
self.residualaxis.set_ylabel(self.Spectrum.plotter.ylabel)
self.residualaxis.set_title("Residuals")
if self.Spectrum.plotter.autorefresh:
self.residualaxis.figure.canvas.draw()
def annotate(self,loc='upper right',labelspacing=0.25, markerscale=0.01,
borderpad=0.1, handlelength=0.1, handletextpad=0.1,
fontsize=10,
frameon=False, chi2=None, optimal_chi2_kwargs={}, **kwargs):
"""
Add a legend to the plot showing the fitted parameters
_clearlegend() will remove the legend
chi2 : {True or 'reduced' or 'optimal' or 'allthree'}
kwargs passed to legend
"""
self._clearlegend()
pl = matplotlib.collections.CircleCollection([0],edgecolors=['k'])
if hasattr(self.fitter,'annotations'):
self._annotation_labels = self.fitter.annotations()
else:
raise Exception("Fitter %s has no annotations." % self.fitter)
#xtypename = units.unit_type_dict[self.Spectrum.xarr.xtype]
xcharconv = units.SmartCaseNoSpaceDict({u.Hz.physical_type:'\\nu',
u.m.physical_type:'\\lambda',
(u.km/u.s).physical_type:'v',
'pixels':'x',
u.dimensionless_unscaled:'x',
'dimensionless':'x',
})
try:
xchar = xcharconv[self.Spectrum.xarr.unit.physical_type]
except AttributeError:
unit_key = self.Spectrum.xarr.unit
xchar = xcharconv[u.Unit(unit_key).physical_type]
self._annotation_labels = [L.replace('x',xchar) if L[1]=='x' else L for
L in self._annotation_labels]
if chi2 is not None:
chi2n_label = '$\\chi^2/\\nu = %0.2g$' % (self.chi2/self.dof)
chi2opt_label = '$\\chi^2_o/\\nu = %0.2g$' % self.optimal_chi2(**optimal_chi2_kwargs)
chi2_label = '$\\chi^2 = %0.2g$' % self.chi2
if chi2 == 'allthree':
self._annotation_labels.append("\n".join([chi2n_label,
chi2_label,
chi2opt_label]))
elif chi2 == 'reduced':
self._annotation_labels.append(chi2n_label)
elif chi2 == 'optimal':
self._annotation_labels.append(chi2opt_label)
else:
self._annotation_labels.append(chi2_label)
if self.Spectrum.plotter.axis:
self.fitleg = self.Spectrum.plotter.axis.legend(
tuple([pl]*len(self._annotation_labels)),
self._annotation_labels, loc=loc, markerscale=markerscale,
borderpad=borderpad, handlelength=handlelength,
handletextpad=handletextpad, labelspacing=labelspacing,
frameon=frameon, fontsize=fontsize, **kwargs)
self.Spectrum.plotter.axis.add_artist(self.fitleg)
self.fitleg.draggable(True)
if self.Spectrum.plotter.autorefresh:
self.Spectrum.plotter.refresh()
def print_fit(self, print_baseline=True, **kwargs):
"""
Print the best-fit parameters to the command line
"""
if self.Spectrum.baseline.baselinepars is not None and print_baseline:
print("Baseline: " + " + ".join(["%12g x^%i" % (x,i) for i,x in enumerate(self.Spectrum.baseline.baselinepars[::-1])]))
for i,p in enumerate(self.parinfo):
print("%15s: %12g +/- %12g" % (p['parname'],p['value'],p['error']))
def clear(self, legend=True, components=True):
"""
Remove the fitted model from the plot
Also removes the legend by default
"""
if self.Spectrum.plotter.axis is not None:
for p in self.modelplot:
p.set_visible(False)
if legend:
self._clearlegend()
if components:
self._clearcomponents()
if self.Spectrum.plotter.autorefresh:
self.Spectrum.plotter.refresh()
# Empty the modelplot array to free memory
self.modelplot = []
# remove residuals from self if they're there.
if hasattr(self,'residualplot'):
for L in self.residualplot:
if L in self.Spectrum.plotter.axis.lines:
self.Spectrum.plotter.axis.lines.remove(L)
def _clearcomponents(self):
for pc in self._plotted_components:
pc.set_visible(False)
if pc in self.Spectrum.plotter.axis.lines:
self.Spectrum.plotter.axis.lines.remove(pc)
if self.Spectrum.plotter.autorefresh:
self.Spectrum.plotter.refresh()
# Empty the plotted components array to free memory
self._plotted_components = []
def _clearlegend(self):
"""
Remove the legend from the plot window
"""
axis = self.Spectrum.plotter.axis
if axis and axis.legend_ == self.fitleg:
axis.legend_ = None
if axis and self.fitleg is not None:
# don't remove fitleg unless it's in the current axis
# self.fitleg.set_visible(False)
if self.fitleg in axis.artists:
axis.artists.remove(self.fitleg)
if self.Spectrum.plotter.autorefresh:
self.Spectrum.plotter.refresh()
def savefit(self):
"""
Save the fit parameters from a Gaussian fit to the FITS header
.. todo::
THESE SHOULD BE WRITTEN FOR EACH TYPE OF MODEL TO BE FIT
"""
if self.modelpars is not None and hasattr(self.Spectrum,'header'):
for ii,p in enumerate(self.modelpars):
try:
if ii % 3 == 0:
self.Spectrum.header['AMP%1i' % (ii/3)] = (p,"Gaussian best fit amplitude #%i" % (ii/3))
elif ii % 3 == 1:
self.Spectrum.header['CEN%1i' % (ii/3)] = (p,"Gaussian best fit center #%i" % (ii/3))
elif ii % 3 == 2:
self.Spectrum.header['WID%1i' % (ii/3)] = (p,"Gaussian best fit width #%i" % (ii/3))
except ValueError as ex:
log.info("Failed to save fit to header: {0}".format(ex))
def downsample(self,factor):
"""
Downsample the model spectrum (and the spectofit spectra)
This should only be done when Spectrum.smooth is called
"""
if self.model is not None:
self.model = self.model[::factor]
if self.residuals is not None:
self.residuals = self.residuals[::factor]
self.spectofit = self.spectofit[::factor]
self.errspec = self.errspec[::factor]
self.includemask = self.includemask[::factor]
def crop(self,x1pix,x2pix):
"""
When spectrum.crop is called, this must be too
"""
if self.model is not None:
self.model = self.model[x1pix:x2pix]
if hasattr(self,'fullmodel'):
self.fullmodel = self.fullmodel[x1pix:x2pix]
self.includemask = self.includemask[x1pix:x2pix]
self.setfitspec()
def integral(self, analytic=False, direct=False, threshold='auto',
integration_limits=None, integration_limit_units='pixels',
return_error=False, **kwargs):
"""
Return the integral of the fitted spectrum
Parameters
----------
analytic : bool
Return the analytic integral of the fitted function?
.. WARNING:: This approach is only implemented for some models
.. todo:: Implement error propagation for this approach
direct : bool
Return the integral of the *spectrum* (as opposed to the *fit*)
over a range defined by the `integration_limits` if specified or
`threshold` otherwise
threshold : 'auto' or 'error' or float
Determines what data to be included in the integral based off of where
the model is greater than this number
If 'auto', the threshold will be set to peak_fraction * the peak
model value.
If 'error', uses the error spectrum as the threshold
See `self.get_model_xlimits` for details
integration_limits : None or 2-tuple
Manually specify the limits in `integration_limit_units` units
return_error : bool
Return the error on the integral if set.
The error computed by
sigma = sqrt(sum(sigma_i^2)) * dx
kwargs :
passed to `self.fitter.integral` if ``not(direct)``
Returns
-------
np.scalar or np.ndarray with the integral or integral & error
"""
if analytic:
return self.fitter.analytic_integral(modelpars=self.parinfo.values)
xmin,xmax = self.get_model_xlimits(units='pixels', threshold=threshold)
if integration_limits is None:
integration_limits = [xmin,xmax]
integration_limits = [
self.Spectrum.xarr.x_to_pix(x,xval_units=integration_limit_units)
for x in integration_limits]
if xmax - xmin > 1: # can only get cdelt if there's more than 1 pixel
dx = self.Spectrum.xarr[xmin:xmax].cdelt().value
else:
dx = None
if dx is None:
#dx = np.abs(np.concatenate([np.diff(self.Spectrum.xarr),[0]]))
#warn("Irregular X-axis. The last pixel is ignored.")
self.Spectrum.xarr.make_dxarr()
dx = self.Spectrum.xarr.dxarr.value
else:
# shouldn't shape be a 'property'?
dx = np.repeat(np.abs(dx), self.Spectrum.shape)
if direct:
integrand = self.Spectrum.data[xmin:xmax]
if not self.Spectrum.baseline.subtracted:
integrand -= self.Spectrum.baseline.basespec[xmin:xmax]
integ = (integrand * dx[xmin:xmax]).sum()
if return_error:
# compute error assuming a "known mean" (not a sample mean). If sample mean, multiply
# by sqrt(len(dx)/(len(dx)-1)) (which should be very near 1)
error = np.sqrt((dx[xmin:xmax] * self.Spectrum.error[xmin:xmax]**2).sum() / dx[xmin:xmax].sum())
return np.array([integ,error])
else:
return integ
#OK = np.abs( fullmodel ) > threshold
#integ = (self.spectofit[OK] * dx[OK]).sum()
#error = np.sqrt((self.errspec[OK]**2 * dx[OK]).sum()/dx[OK].sum())
else:
if not hasattr(self.fitter,'integral'):
raise AttributeError("The fitter %s does not have an integral implemented" % self.fittype)
# the model considered here must NOT include the baseline!
# if it does, you'll get the integral of the continuum
#fullmodel = self.get_full_model(add_baseline=False)
if self.Spectrum.xarr.cdelt() is not None:
dx = np.median(dx)
integ = self.fitter.integral(self.modelpars, dx=dx, **kwargs)
if return_error:
if mycfg.WARN:
warn("WARNING: The computation of the error "
"on the integral is not obviously "
"correct or robust... it's just a guess.")
OK = self.model_mask(threshold=threshold, add_baseline=False)
error = np.sqrt((self.errspec[OK]**2).sum()) * dx
#raise NotImplementedError("We haven't written up correct error estimation for integrals of fits")
else:
integ = 0
error = 0
warn("An analytic integal could not be computed because the X-axis is irregular. Try direct=True when integrating, or find a way to linearize the X-axis")
if return_error:
return integ,error
else:
return integ
def model_mask(self, **kwargs):
"""
Get a mask (boolean array) of the region where the fitted model is
significant
Parameters
----------
threshold : 'auto' or 'error' or float
The threshold to compare the model values to for selecting the mask
region.
* auto: uses `peak_fraction` times the model peak
* error: use the spectrum error
* float: any floating point number as an absolute threshold
peak_fraction : float
Parameter used if ``threshold=='auto'`` to determine fraction of
model peak to set threshold at
add_baseline : bool
Add the fitted baseline to the model before comparing to threshold?
Returns
-------
mask : `~numpy.ndarray`
A boolean mask array with the same size as the spectrum, set to
``True`` where the fitted model has values above a specified
threshold
"""
return self._compare_to_threshold(**kwargs)
def _compare_to_threshold(self, threshold='auto', peak_fraction=0.01,
add_baseline=False):
"""
Identify pixels that are above some threshold
"""
model = self.get_full_model(add_baseline=add_baseline)
# auto-set threshold from some fraction of the model peak
if threshold=='auto':
threshold = peak_fraction * np.abs(model).max()
elif threshold=='error':
threshold = self.errspec
OK = np.abs(model) > threshold
return OK
def get_model_xlimits(self, threshold='auto', peak_fraction=0.01,
add_baseline=False, units='pixels'):
"""
Return the x positions of the first and last points at which the model
is above some threshold
Parameters
----------
threshold : 'auto' or 'error' or float
If 'auto', the threshold will be set to peak_fraction * the peak
model value.
If 'error', uses the error spectrum as the threshold
peak_fraction : float
ignored unless threshold == 'auto'
add_baseline : bool
Include the baseline when computing whether the model is above the
threshold? default FALSE. Passed to get_full_model.
units : str
A valid unit type, e.g. 'pixels' or 'angstroms'
"""
OK = self._compare_to_threshold(threshold=threshold,
peak_fraction=peak_fraction,
add_baseline=add_baseline)
# find the first & last "True" values
xpixmin = OK.argmax()
xpixmax = len(OK) - OK[::-1].argmax() - 1
if units == 'pixels':
return [xpixmin,xpixmax]
else:
return self.Spectrum.xarr[[xpixmin,xpixmax]].as_unit(units)
def shift_pars(self, frame=None):
"""
Shift the velocity / wavelength / frequency of the fitted parameters
into a different frame
Right now this only takes care of redshift and only if redshift is defined.
It should be extended to do other things later
"""
for ii,pi in enumerate(self.parinfo):
for partype in ('shift','offset','velo'):
if partype in str.lower(pi['parname']):
if frame is not None:
self.modelpars[ii] = self.Spectrum.xarr.x_in_frame(self.modelpars[ii], frame)
def moments(self, fittype=None, **kwargs):
"""
Return the moments
see the :mod:`~pyspeckit.spectrum.moments` module
Parameters
----------
fittype : None or str
The registered fit type to use for moment computation
"""
if fittype is None:
fittype = self.fittype
return list(self.Registry.multifitters[fittype].moments(
self.Spectrum.xarr[self.xmin:self.xmax],
self.spectofit[self.xmin:self.xmax], **kwargs))
def button3action(self, event, debug=False, nwidths=1):
"""
Disconnect the interactiveness
Perform the fit (or die trying)
Hide the guesses
"""
self.Spectrum.plotter.figure.canvas.mpl_disconnect(self.click)
self.Spectrum.plotter.figure.canvas.mpl_disconnect(self.keyclick)
npars = 2+nwidths
if self.npeaks > 0:
log.info("{0} Guesses : {1} X channel range: {2}-{3}"
.format(len(self.guesses)/npars, self.guesses, self.xmin,
self.xmax))
if len(self.guesses) % npars == 0:
self.multifit(use_window_limits=True)
for p in self.button2plot + self.button1plot:
p.set_visible(False)
else:
log.error("Wrong # of parameters")
# disconnect interactive window (and more importantly, reconnect to
# original interactive cmds)
self.clear_all_connections()
def copy(self, parent=None, registry=None):
"""
Create a copy of the spectral fit - includes copies of the _full_model,
the registry, the fitter, parinfo, modelpars, modelerrs, model, npeaks
Parameters
----------
parent : `pyspeckit.classes.Spectrum`
A `~Spectrum` instance that is the parent of the specfit
instance. This needs to be specified at some point, but defaults
to None to prevent overwriting a previous plot.
"""
if registry is None:
if hasattr(parent, 'Registry'):
registry = parent.Registry
else:
# only make a copy if we're not already given a specific registry
# to inherit
copy.deepcopy(self.Registry)
newspecfit = Specfit(parent, Registry=registry)
newspecfit.parinfo = copy.deepcopy(self.parinfo)
if newspecfit.parinfo is None:
newspecfit.modelpars = None
newspecfit.modelerrs = None
else:
newspecfit.modelpars = newspecfit.parinfo.values
newspecfit.modelerrs = newspecfit.parinfo.errors
newspecfit.includemask = self.includemask.copy()
newspecfit.model = copy.copy(self.model)
newspecfit.npeaks = self.npeaks
if hasattr(self,'fitter'):
newspecfit.fitter = copy.deepcopy(self.fitter)
newspecfit.fitter.parinfo = newspecfit.parinfo
if hasattr(self,'fullmodel'):
newspecfit._full_model()
# this is ridiculous, absurd, and infuriating...
newspecfit.button2action = newspecfit.guesspeakwidth
if parent is not None:
newspecfit.Spectrum.plotter = parent.plotter
else:
newspecfit.Spectrum.plotter = None
return newspecfit
def __copy__(self):
return self.copy(parent=self.Spectrum)
def add_sliders(self, parlimitdict=None, **kwargs):
"""
Add a Sliders window in a new figure appropriately titled
Parameters
----------
parlimitdict: dict
Each parameter needs to have displayed limits; these are set in
min-max pairs. If this is left empty, the widget will try to guess
at reasonable limits, but the guessing is not very sophisticated
yet.
.. todo:: Add a button in the navbar that makes this window pop up
http://stackoverflow.com/questions/4740988/add-new-navigate-modes-in-matplotlib
"""
if parlimitdict is None:
# try to create a reasonable parlimit dict
parlimitdict = {}
for param in self.parinfo:
if not param.parname in parlimitdict:
if any( (x in param['parname'].lower() for x in ('shift','xoff')) ):
lower, upper = (self.Spectrum.xarr[self.includemask].min().value,
self.Spectrum.xarr[self.includemask].max().value)
elif any( (x in param['parname'].lower() for x in ('width','fwhm')) ):
xvalrange = (self.Spectrum.xarr[self.includemask].max().value -
self.Spectrum.xarr[self.includemask].min().value)
lower,upper = (0,xvalrange)
elif any( (x in param['parname'].lower() for x in ('amp','peak','height')) ):
datarange = self.spectofit.max() - self.spectofit.min()
lower,upper = (param['value']-datarange, param['value']+datarange)
else:
lower = param['value'] * 0.1
upper = param['value'] * 10
# override guesses with limits
if param.limited[0]:
# nextafter -> next representable float
lower = np.nextafter(param.limits[0], param.limits[0]+1)
if param.limited[1]:
upper = np.nextafter(param.limits[1], param.limits[1]-1)
parlimitdict[param.parname] = (lower,upper)
if hasattr(self,'fitter'):
self.SliderWidget = widgets.FitterSliders(self,
self.Spectrum.plotter.figure,
npars=self.fitter.npars,
parlimitdict=parlimitdict,
**kwargs)
else:
log.error("Must have a fitter instantiated before creating sliders")
def optimal_chi2(self, reduced=True, threshold='error', **kwargs):
"""
Compute an "optimal" :math:`\chi^2` statistic, i.e. one in which only pixels in
which the model is statistically significant are included
Parameters
----------
reduced : bool
Return the reduced :math:`\chi^2`
threshold : 'auto' or 'error' or float
If 'auto', the threshold will be set to peak_fraction * the peak
model value, where peak_fraction is a kwarg passed to
get_model_xlimits reflecting the fraction of the model peak
to consider significant
If 'error', uses the error spectrum as the threshold
kwargs : dict
passed to :meth:`get_model_xlimits`
Returns
-------
chi2 : float
:math:`\chi^2` statistic or reduced :math:`\chi^2` statistic (:math:`\chi^2/n`)
.. math::
\chi^2 = \sum( (d_i - m_i)^2 / e_i^2 )
"""
modelmask = self._compare_to_threshold(threshold=threshold, **kwargs)
chi2 = np.sum((self.fullresiduals[modelmask]/self.errspec[modelmask])**2)
if reduced:
# vheight included here or not? assuming it should be...
dof = (modelmask.sum() -
self.fitter.npars - self.vheight +
np.sum(self.parinfo.fixed))
return chi2/dof
else:
return chi2
def get_pymc(self, **kwargs):
"""
Create a pymc MCMC sampler from the current fitter. Defaults to 'uninformative' priors
`kwargs` are passed to the fitter's get_pymc method, with parameters defined below.
Parameters
----------
data : np.ndarray
error : np.ndarray
use_fitted_values : bool
Each parameter with a measured error will have a prior defined by
the Normal distribution with sigma = par.error and mean = par.value
Examples
--------
>>> x = pyspeckit.units.SpectroscopicAxis(np.linspace(-10,10,50), unit='km/s')
>>> e = np.random.randn(50)
>>> d = np.exp(-np.asarray(x)**2/2.)*5 + e
>>> sp = pyspeckit.Spectrum(data=d, xarr=x, error=np.ones(50)*e.std())
>>> sp.specfit(fittype='gaussian')
>>> MCuninformed = sp.specfit.get_pymc()
>>> MCwithpriors = sp.specfit.get_pymc(use_fitted_values=True)
>>> MCuninformed.sample(1000)
>>> MCuninformed.stats()['AMPLITUDE0']
>>> # WARNING: This will fail because width cannot be set <0, but it may randomly reach that...
>>> # How do you define a likelihood distribution with a lower limit?!
>>> MCwithpriors.sample(1000)
>>> MCwithpriors.stats()['AMPLITUDE0']
"""
if hasattr(self.fitter,'get_pymc'):
return self.fitter.get_pymc(self.Spectrum.xarr, self.spectofit,
self.errspec, **kwargs)
else:
raise AttributeError("Fitter %r does not have pymc implemented." % self.fitter)
def get_emcee(self, nwalkers=None, **kwargs):
"""
Get an emcee walker ensemble for the data & model using the current model type
Parameters
----------
data : np.ndarray
error : np.ndarray
nwalkers : int
Number of walkers to use. Defaults to 2 * self.fitters.npars
Examples
--------
>>> import pyspeckit
>>> x = pyspeckit.units.SpectroscopicAxis(np.linspace(-10,10,50), unit='km/s')
>>> e = np.random.randn(50)
>>> d = np.exp(-np.asarray(x)**2/2.)*5 + e
>>> sp = pyspeckit.Spectrum(data=d, xarr=x, error=np.ones(50)*e.std())
>>> sp.specfit(fittype='gaussian')
>>> emcee_ensemble = sp.specfit.get_emcee()
>>> p0 = emcee_ensemble.p0 * (np.random.randn(*emcee_ensemble.p0.shape) / 10. + 1.0)
>>> pos,logprob,state = emcee_ensemble.run_mcmc(p0,100)
"""
import emcee
if hasattr(self.fitter,'get_emcee_ensemblesampler'):
nwalkers = (self.fitter.npars * self.fitter.npeaks + self.fitter.vheight) * 2
emc = self.fitter.get_emcee_ensemblesampler(self.Spectrum.xarr,
self.spectofit,
self.errspec, nwalkers)
emc.nwalkers = nwalkers
emc.p0 = np.array([self.parinfo.values] * emc.nwalkers)
return emc
def get_components(self, **kwargs):
"""
If a model has been fitted, return the components of the model
Parameters
----------
kwargs are passed to self.fitter.components
"""
if self.modelpars is not None:
self.modelcomponents = self.fitter.components(self.Spectrum.xarr,
self.modelpars, **kwargs)
return self.modelcomponents
def measure_approximate_fwhm(self, threshold='error', emission=True,
interpolate_factor=1, plot=False,
grow_threshold=2, **kwargs):
"""
Measure the FWHM of a fitted line
This procedure is designed for multi-component *blended* lines; if the
true FWHM is known (i.e., the line is well-represented by a single
gauss/voigt/lorentz profile), use that instead. Do not use this for
multiple independently peaked profiles.
This MUST be run AFTER a fit has been performed!
Parameters
----------
threshold : 'error' | float
The threshold above which the spectrum will be interpreted as part
of the line. This threshold is applied to the *model*. If it is
'noise', self.error will be used.
emission : bool
Is the line absorption or emission?
interpolate_factor : integer
Magnification factor for determining sub-pixel FWHM. If used,
"zooms-in" by using linear interpolation within the line region
plot : bool
Overplot a line at the FWHM indicating the FWHM. kwargs
are passed to matplotlib.plot
grow_threshold : int
Minimum number of valid points. If the total # of points above the
threshold is <= to this number, it will be grown by 1 pixel on each side
Returns
-------
The approximated FWHM, if it can be computed
If there are <= 2 valid pixels, a fwhm cannot be computed
"""
if threshold == 'error':
threshold = self.Spectrum.error
if np.all(self.Spectrum.error==0):
threshold = 1e-3*self.Spectrum.data.max()
if self.Spectrum.baseline.subtracted is False:
data = self.Spectrum.data - self.Spectrum.baseline.basespec
else:
data = self.Spectrum.data * 1
model = self.get_full_model(add_baseline=False)
if np.count_nonzero(model) == 0:
raise ValueError("The model is all zeros. No FWHM can be "
"computed.")
# can modify inplace because data is a copy of self.Spectrum.data
if not emission:
data *= -1
model *= -1
line_region = model > threshold
if line_region.sum() == 0:
raise ValueError("No valid data included in FWHM computation")
if line_region.sum() <= grow_threshold:
line_region[line_region.argmax()-1:line_region.argmax()+1] = True
reverse_argmax = len(line_region) - line_region.argmax() - 1
line_region[reverse_argmax-1:reverse_argmax+1] = True
log.warning("Fewer than {0} pixels were identified as part of the fit."
" To enable statistical measurements, the range has been"
" expanded by 2 pixels including some regions below the"
" threshold.".format(grow_threshold))
# determine peak (because data is neg if absorption, always use max)
peak = data[line_region].max()
xarr = self.Spectrum.xarr[line_region]
xarr.make_dxarr()
cd = xarr.dxarr.min()
if interpolate_factor > 1:
newxarr = units.SpectroscopicAxis(np.arange(xarr.min().value-cd.value,
xarr.max().value+cd.value,
cd.value /
float(interpolate_factor)
),
unit=xarr.unit,
equivalencies=xarr.equivalencies
)
# load the metadata from xarr
# newxarr._update_from(xarr)
data = np.interp(newxarr,xarr,data[line_region])
xarr = newxarr
else:
data = data[line_region]
# need the peak location so we can find left/right half-max locations
peakloc = data.argmax()
hm_left = np.argmin(np.abs(data[:peakloc]-peak/2.))
hm_right = np.argmin(np.abs(data[peakloc:]-peak/2.)) + peakloc
deltax = xarr[hm_right]-xarr[hm_left]
if plot:
# for plotting, use a negative if absorption
sign = 1 if emission else -1
# shift with baseline if baseline is plotted
if not self.Spectrum.baseline.subtracted:
basespec = self.Spectrum.baseline.get_model(xarr)
yoffleft = self.Spectrum.plotter.offset + basespec[hm_left]
yoffright = self.Spectrum.plotter.offset + basespec[hm_right]
else:
yoffleft = yoffright = self.Spectrum.plotter.offset
log.debug("peak={2} yoffleft={0} yoffright={1}".format(yoffleft, yoffright, peak))
log.debug("hm_left={0} hm_right={1} xarr[hm_left]={2} xarr[hm_right]={3}".format(hm_left, hm_right, xarr[hm_left], xarr[hm_right]))
self.Spectrum.plotter.axis.plot([xarr[hm_right].value,
xarr[hm_left].value],
np.array([sign*peak/2.+yoffleft,
sign*peak/2.+yoffright]),
**kwargs)
self.Spectrum.plotter.refresh()
# debug print hm_left,hm_right,"FWHM: ",deltax
# debug self.Spectrum.plotter.axis.plot(xarr,data,color='magenta')
# debug self.Spectrum.plotter.refresh()
# debug raise TheDead
return deltax
def _validate_parinfo(self, parinfo, mode='fix'):
assert mode in ('fix','raise','check','guesses')
any_out_of_range = []
for param in parinfo:
if (param.limited[0] and (param.value < param.limits[0])):
if (np.allclose(param.value, param.limits[0])):
# nextafter -> next representable float
if mode in ('fix', 'guesses'):
warn("{0} is less than the lower limit {1}, but very close."
" Converting to {1}+ULP".format(param.value,
param.limits[0]))
param.value = np.nextafter(param.limits[0], param.limits[0]+1)
elif mode == 'raise':
raise ValueError("{0} is less than the lower limit {1}, but very close."
.format(param.value, param.limits[1]))
elif mode == 'check':
any_out_of_range.append("lt:close",)
else:
raise ValueError("{0} is less than the lower limit {1}"
.format(param.value, param.limits[0]))
elif mode == 'check':
any_out_of_range.append(False)
if (param.limited[1] and (param.value > param.limits[1])):
if (np.allclose(param.value, param.limits[1])):
if mode in ('fix', 'guesses'):
param.value = np.nextafter(param.limits[1], param.limits[1]-1)
warn("{0} is greater than the upper limit {1}, but very close."
" Converting to {1}-ULP".format(param.value,
param.limits[1]))
elif mode == 'raise':
raise ValueError("{0} is greater than the upper limit {1}, but very close."
.format(param.value, param.limits[1]))
elif mode == 'check':
any_out_of_range.append("gt:close")
else:
raise ValueError("{0} is greater than the upper limit {1}"
.format(param.value, param.limits[1]))
elif mode == 'check':
any_out_of_range.append(False)
if mode == 'guesses':
return parinfo.values
return any_out_of_range
| allisony/pyspeckit | pyspeckit/spectrum/fitters.py | Python | mit | 91,957 | [
"Gaussian"
] | 260d7ae411b91a15e9e83f6cc8254648d3292bf83b3b730d29fe70728bbfb92c |
# -*- coding: utf-8 -*-
#########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2013 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import numpy
import moose
useY = False
def makeModel():
# create container for model
moose.Neutral( 'model' )
compartment = moose.CubeMesh( '/model/compartment' )
compartment.volume = 1e-15
# the mesh is created automatically by the compartment
moose.element( '/model/compartment/mesh' )
# create molecules and reactions
# a <----> b
# b + 10c ---func---> d
a = moose.Pool( '/model/compartment/a' )
b = moose.Pool( '/model/compartment/b' )
c = moose.Pool( '/model/compartment/c' )
d = moose.BufPool( '/model/compartment/d' )
reac = moose.Reac( '/model/compartment/reac' )
func = moose.Function( '/model/compartment/d/func' )
func.numVars = 2
#func.x.num = 2
# connect them up for reactions
moose.connect( reac, 'sub', a, 'reac' )
moose.connect( reac, 'prd', b, 'reac' )
if useY:
moose.connect( func, 'requestOut', b, 'getN' )
moose.connect( func, 'requestOut', c, 'getN' )
else:
moose.connect( b, 'nOut', func.x[0], 'input' )
moose.connect( c, 'nOut', func.x[1], 'input' )
moose.connect( func, 'valueOut', d, 'setN' )
if useY:
func.expr = "y0 + 10*y1"
else:
func.expr = "x0 + 10*x1"
# connect them up to the compartment for volumes
#for x in ( a, b, c, cplx1, cplx2 ):
# moose.connect( x, 'mesh', mesh, 'mesh' )
# Assign parameters
a.concInit = 1
b.concInit = 0.5
c.concInit = 0.1
reac.Kf = 0.001
reac.Kb = 0.01
# Create the output tables
moose.Neutral( '/model/graphs' )
outputA = moose.Table2 ( '/model/graphs/concA' )
outputB = moose.Table2 ( '/model/graphs/concB' )
outputC = moose.Table2 ( '/model/graphs/concC' )
outputD = moose.Table2 ( '/model/graphs/concD' )
# connect up the tables
moose.connect( outputA, 'requestOut', a, 'getConc' );
moose.connect( outputB, 'requestOut', b, 'getConc' );
moose.connect( outputC, 'requestOut', c, 'getConc' );
moose.connect( outputD, 'requestOut', d, 'getConc' );
def test_func_change_expr():
makeModel()
ksolve = moose.Ksolve( '/model/compartment/ksolve' )
stoich = moose.Stoich( '/model/compartment/stoich' )
stoich.compartment = moose.element( '/model/compartment' )
stoich.ksolve = ksolve
stoich.path = "/model/compartment/##"
moose.reinit()
moose.start( 100.0 )
func = moose.element( '/model/compartment/d/func' )
if useY:
func.expr = "-y0 + 10*y1"
else:
func.expr = "-x0 + 10*x1"
moose.start( 100.0 )
b = moose.element('/model/compartment/b')
assert int(b.n) == int(106384558.57472235), b.n
xs = func.x
assert len(xs.value) == 2, (len(xs.value), xs.value)
assert (xs.value == [0, 0]).all(), xs.value
if __name__ == '__main__':
test_func_change_expr()
| dilawar/moose-core | tests/core/test_function_change_expr.py | Python | gpl-3.0 | 3,325 | [
"MOOSE"
] | 30da864222340dbefcd5ff5a98c97b1e7202053a4037920f7374b25a10c8f4ee |
########################################################################
# File : ComputingElementFactory.py
# Author : Stuart Paterson
########################################################################
""" The Computing Element Factory has one method that instantiates a given Computing Element
from the CEUnique ID specified in the JobAgent configuration section.
"""
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Resources.Computing.ComputingElement import getCEConfigDict
from DIRAC.Core.Utilities import ObjectLoader
__RCSID__ = "$Id$"
class ComputingElementFactory(object):
#############################################################################
def __init__(self, ceType=''):
""" Standard constructor
"""
self.ceType = ceType
self.log = gLogger.getSubLogger(self.ceType)
#############################################################################
def getCE(self, ceType='', ceName='', ceParametersDict={}):
"""This method returns the CE instance corresponding to the supplied
CEUniqueID. If no corresponding CE is available, this is indicated.
"""
self.log = gLogger.getSubLogger(ceType)
self.log.verbose('Creating CE of %s type with the name %s' % (ceType, ceName))
ceTypeLocal = ceType
if not ceTypeLocal:
ceTypeLocal = self.ceType
ceNameLocal = ceName
if not ceNameLocal:
ceNameLocal = self.ceType
ceConfigDict = getCEConfigDict(ceNameLocal)
self.log.verbose('CEConfigDict', ceConfigDict)
if 'CEType' in ceConfigDict:
ceTypeLocal = ceConfigDict['CEType']
if not ceTypeLocal:
error = 'Can not determine CE Type'
self.log.error(error)
return S_ERROR(error)
subClassName = "%sComputingElement" % (ceTypeLocal)
objectLoader = ObjectLoader.ObjectLoader()
result = objectLoader.loadObject('Resources.Computing.%s' % subClassName, subClassName)
if not result['OK']:
gLogger.error('Failed to load object', '%s: %s' % (subClassName, result['Message']))
return result
ceClass = result['Value']
try:
computingElement = ceClass(ceNameLocal)
# Always set the CEType parameter according to instantiated class
ceDict = {'CEType': ceTypeLocal}
if ceParametersDict:
ceDict.update(ceParametersDict)
computingElement.setParameters(ceDict)
except BaseException as x:
msg = 'ComputingElementFactory could not instantiate %s object: %s' % (subClassName, str(x))
self.log.exception()
self.log.warn(msg)
return S_ERROR(msg)
return S_OK(computingElement)
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| andresailer/DIRAC | Resources/Computing/ComputingElementFactory.py | Python | gpl-3.0 | 2,678 | [
"DIRAC"
] | 02ce9360bc47d5085c9a2278e062f76ae928bf83a78986f844a0669a62ce5f41 |
""" Hiding calls to PYPIT used in arclines
Avoid double dependency if possible
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import pdb
from arclines import utils as al_utils
def find_peaks(censpec):
"""
Parameters
----------
censpec
siglev
bpfit : int, optional
Order for background continuum
Returns
-------
tampl, tcent, twid, w, detns
"""
fitp = 7 #slf._argflag['arc']['calibrate']['nfitpix']
if len(censpec.shape) == 3:
detns = censpec[:, 0].flatten()
else:
detns = censpec.copy()
xrng = np.arange(float(detns.size))
# Find all significant detections
pixt = np.where((detns > 0.0) &
(detns > np.roll(detns, 1)) & (detns >= np.roll(detns, -1)) &
(np.roll(detns, 1) > np.roll(detns, 2)) & (np.roll(detns, -1) > np.roll(detns, -2)) &
(np.roll(detns, 2) > np.roll(detns, 3)) & (np.roll(detns, -2) > np.roll(detns, -3)))[0]
tampl, tcent, twid = fit_arcspec(xrng, detns, pixt, fitp)
w = np.where((np.isnan(twid) == False) & (twid > 0.0) & (twid < 10.0/2.35) & (tcent > 0.0) & (tcent < xrng[-1]))
# Return
return tampl, tcent, twid, w, detns
def fit_arcspec(xarray, yarray, pixt, fitp):
# Setup the arrays with fit parameters
sz_p = pixt.size
sz_a = yarray.size
ampl, cent, widt = -1.0*np.ones(sz_p, dtype=np.float),\
-1.0*np.ones(sz_p, dtype=np.float),\
-1.0*np.ones(sz_p, dtype=np.float)
for p in range(sz_p):
pmin = pixt[p]-(fitp-1)//2
pmax = pixt[p]-(fitp-1)//2 + fitp
if pmin < 0:
pmin = 0
if pmax > sz_a:
pmax = sz_a
if pmin == pmax:
continue
if pixt[p]-pmin <= 1 or pmax-pixt[p] <= 1:
continue # Probably won't be a good solution
# Fit the gaussian
try:
popt = al_utils.func_fit(xarray[pmin:pmax], yarray[pmin:pmax], "gaussian", 3)
ampl[p] = popt[0]
cent[p] = popt[1]
widt[p] = popt[2]
except RuntimeError:
pass
return ampl, cent, widt
| PYPIT/arclines | arclines/pypit_utils.py | Python | bsd-3-clause | 2,208 | [
"Gaussian"
] | 52426af0c26e8ccd74d73a416acdbdd7ebc35e0407cb0f48159541fbe68cfc54 |
# coding: utf-8
# Copyright 2016 Thomas Schatz, Xuan-Nga Cao, Mathieu Bernard
#
# This file is part of abkhazia: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abkhazia is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with abkhazia. If not, see <http://www.gnu.org/licenses/>.
"""Data preparation for the revised Buckeye corpus"""
import collections
import joblib
import os
import re
import abkhazia.utils as utils
from abkhazia.corpus.prepare import AbstractPreparator
def _split_raw_utt(raw_utt):
split_re = ('<CUTOFF-.+?>|<EXCLUDE-.+?>|<HES-.+?>|<Hes-.+?>|'
'<EXT-.+?>|<EXt-.+?>|<EXT_.+?>|<LAUGH-.+?>|<LAUGH>|<ERROR.*?>')
return [u.strip() for u in re.split(split_re, raw_utt)]
_Word = collections.namedtuple('Word', 'word, time')
def _load_word(line):
"""Return a pair (word/time) for a line from a *.words_fold file"""
match = re.match(
r'\s*(.*)\s+(121|122)\s(.*);(.*); (.*); (.*)', line)
assert match, line
return _Word(word=match.group(3), time=float(match.group(1)))
def _parse_utterances(txt_file, phn_file, log):
segments = dict()
utt2spk = dict()
text = dict()
# log.info('loading %s', os.path.basename(txt_file))
# /path/to/.../s2202b.txt -> s2202b
speaker_id = os.path.splitext(os.path.basename(txt_file))[0]
# load the current files
txt_data = [l.strip() for l in open(txt_file, 'r') if l.strip()]
phn_data = [_load_word(l)
for l in open(phn_file, 'r').readlines()[9:]
if l.strip()]
# check we have the same number of words in txt and phn
if len(phn_data) != sum(len(l.split()) for l in txt_data if l):
raise AssertionError(
'{} and {} have a different word count, exiting'
.format(txt_file, phn_file))
# split raw utterances (lines) into cleaned subutterances
utterances = [
l for l in
sum([_split_raw_utt(line) for line in txt_data], []) if l]
phn_index = 0
for utt_index, utt_txt in enumerate(utterances, start=1):
utt_id = '{}-sent{}'.format(speaker_id, utt_index)
utt_words = utt_txt.split()
while utt_words[0] != phn_data[phn_index].word:
# self.log.warning('skipping %s', phn_data[phn_index].word)
phn_index += 1
tstart = phn_data[phn_index-1].time if phn_index else 0
phn_index += len(utt_words) - 1
assert phn_data[phn_index].word == utt_words[-1], \
'match error: {}\t{}'.format(
phn_data[phn_index].word, utt_words[-1])
tstop = phn_data[phn_index].time
phn_index += 1
segments[utt_id] = (speaker_id, tstart, tstop)
utt2spk[utt_id] = speaker_id
text[utt_id] = re.sub('{(B|E)_TRANS}', '', utt_txt)
return (segments, utt2spk, text)
class BuckeyePreparator(AbstractPreparator):
"""Convert the Buckeye corpus to the abkhazia format"""
name = 'buckeye'
url = 'http://buckeyecorpus.osu.edu'
audio_format = 'wav'
description = 'Buckeye Corpus of conversational speech'
long_description = '''
The Buckeye Corpus of conversational speech contains high-quality
recordings from 40 speakers in Columbus OH conversing freely with
an interviewer. The speech has been orthographically transcribed
and phonetically labeled. The audio and text files, together with
time-aligned phonetic labels, are stored in a format for use with
speech analysis software (Xwaves and Wavesurfer). Software for
searching the transcription files is currently being written. The
corpus is FREE for noncommercial uses.
This project is funded by the National Institute on Deafness and
other Communication Disorders and the Office of Research at Ohio
State University.'''
# IPA transcriptions for all phones in the Buckeye corpus. The
# following phones are never found in the transcriptions: set([u'own',
# u'ahn', u'ihn', u'ayn', u'NSN', u'eyn', u'oyn', u'ehn', u'iyn',
# u'B', u'E', u'uhn', u'aon', u'awn', u'uwn', u'aan', u'ern', u'aen'])
# Reason: we already collapsed them in the foldings_version
# 20th March 2017 update :
# Some tags are removed or mapped differently to keep
# a coherence between different corpora. :
# - {B_TRANS} and {E_TRANS} are removed sinc they only mark the
# begining and end of the transcription
# - VOCNOISE and LAUGH are mapped to SPN (spoken noise)
# - NOISE and IVER are mapped to NSN (non spoken noise)
#
# June 2017 update :
# - The following folds are folded: em -> m, en -> n, eng -> ng
# and el -> l
phones = {
'aa': u'ɑː',
'ae': u'æ',
'ah': u'ʌ',
'ao': u'ɔː',
'aw': u'aʊ',
'ay': u'aɪ',
'eh': u'ɛ',
'er': u'ɝ',
'ey': u'eɪ',
'iy': u'iː',
'ih': u'ɪ',
'oy': u'ɔɪ',
'ow': u'oʊ',
'uh': u'ʊ',
'uw': u'uː',
'jh': u'ʤ',
'ch': u'ʧ',
'b': u'b',
'd': u'd',
'g': u'g',
'p': u'p',
't': u't',
'k': u'k',
'dx': u'ɾ',
's': u's',
'sh': u'ʃ',
'z': u'z',
'zh': u'ʒ',
'f': u'f',
'th': u'θ',
'v': u'v',
'dh': u'ð',
'm': u'm',
'n': u'n',
'ng': u'ŋ',
'l': u'l',
'r': u'r',
'w': u'w',
'y': u'j',
'hh': u'h',
'tq': u'ʔ',
'CUTOFF': u'CUTOFF',
'ERROR': u'ERROR',
'EXCLUDE': u'EXCLUDE',
'UNKNOWN_WW': u'UNKNOWN_WW',
'UNKNOWN': u'UNKNOWN',
'HESITATION_TAG': u'HESITATION_TAG',
'LENGTH_TAG': u'LENGTH_TAG',
}
silences = [u"NSN"] # SPN and SIL will be added automatically
variants = [] # could use lexical stress variants...
def __init__(self, input_dir, log=utils.logger.null_logger(),
copy_wavs=False, njobs=4):
super(BuckeyePreparator, self).__init__(input_dir, log=log)
self.copy_wavs = copy_wavs
self.segments = dict()
self.text = dict()
self.utt2spk = dict()
# the input text and lexicon files we will parse
txt_files = self._list_files('.txt', exclude=['readme'])
phn_files = [f.replace('.txt', '.words_fold') for f in txt_files]
# for each pair of text/lexicon files, update the
# segments/text/utt2spk dictionaries
res = joblib.Parallel(
n_jobs=njobs, verbose=0, backend="threading")(
joblib.delayed(_parse_utterances)
(txt_file, phn_file, self.log)
for txt_file, phn_file in zip(txt_files, phn_files))
for s, u, t in res:
self.segments.update(s)
self.utt2spk.update(u)
self.text.update(t)
def _list_files(self, ext, exclude=None, abspath=False, realpath=False):
files = utils.list_files_with_extension(
self.input_dir, ext, abspath=abspath, realpath=realpath)
if exclude is not None:
files = [f for f in files for e in exclude if e not in f]
return files
def list_audio_files(self):
return self._list_files('.wav', abspath=True, realpath=True)
def make_segment(self):
return self.segments
def make_speaker(self):
return self.utt2spk
def make_transcription(self):
t = self.text
t['s2202b-sent29'] = t['s2202b-sent29'].replace("p's", "p")
return t
def make_lexicon(self):
"""Build the buckeye lexicon from the *.words_fold files"""
lexicon = dict()
no_lexicon = set()
files = self._list_files('.words_fold')
for line in (l for f in files for l in open(f, 'r')):
match = re.match(
r'\s\s+(.*)\s+(121|122)\s(.*);(.*); (.*); (.*)', line)
if match:
word = match.group(3)
phones = match.group(5)
# merge phones together
phones = phones.replace('em', 'm')
phones = phones.replace('el', 'l')
phones = phones.replace('en', 'n')
phones = phones.replace('eng', 'ng')
phones = phones.replace('nx', 'dx')
# replace VOCNOISE/VOCNOISE_WW/LAUGH by SPN
phones = phones.replace('UNKNOWN_WW', 'SPN')
phones = phones.replace('UNKNOWN', 'SPN')
phones = phones.replace('VOCNOISE_WW', 'SPN')
phones = phones.replace('VOCNOISE', 'SPN')
phones = phones.replace('LAUGH', 'SPN')
# replace IVER/NOISE/NOISE_WW by NSN
phones = phones.replace('NOISE_WW', 'NSN')
phones = phones.replace('NOISE', 'NSN')
phones = phones.replace('IVER', 'NSN')
# add the word to lexicon
if phones:
# TODO Here we can check if (and when) we have
# several transcriptions per word (alternate
# pronunciations) and choose the most FREQUENT
# one. Here we are keeping only the most RECENT.
lexicon[word] = phones
else:
no_lexicon.add(word)
# detect the words with no transcription
really_no_lexicon = [t for t in no_lexicon if t not in lexicon]
if really_no_lexicon:
self.log.debug(
'following words have no transcription in lexicon: {}'
.format(really_no_lexicon))
# retain only the words present in the text
corpus_words = set(w for u in self.text.values() for w in u.split())
return {k: v for k, v in lexicon.items() if k in corpus_words}
# TODO The following code is dedicated to manual alignments. It should
# be more integrated with abkhazia (mayebe have a
# BuckeyeAlignedPreparator child class?). See also
# abkhazia/egs/triphones_buckeye.py for an exemple of how to use the
# GetAlignment class
#
# For now the preparator works on word alignments to extract segments
# (utterances boundaries). But there is a lot of little differences in
# words/phones levels alignments in Buckeye, about 1/3 of utterances
# are concerned.
class GetAlignment(object):
"""Extract Buckeye manual phone alignments at utterance level"""
def __init__(self, buckeye_dir):
self.alignment = {}
self.buckeye_dir = buckeye_dir
def __call__(self, record, tstart, tstop):
"""Return phones alignment for a given record interval"""
if record not in self.alignment:
self._load_record(record)
return list(self._yield_utt(record, tstart, tstop))
def _load_record(self, record):
"""Init self.alignment with a given record, load the file"""
record_file = os.path.join(
self.buckeye_dir, record[:3], record, record + '.phones_fold')
self.alignment[record] = [a for a in self._yield_file(record_file)]
def _yield_file(self, record_file):
"""Yield (tstart, tstop, phone) from a phones alignment file"""
tstart = 0.0
for line in (
l[2:] for l in open(record_file, 'r') if l.startswith(' ')):
tstop, _, phone = line.split()
yield float(tstart), float(tstop), phone
tstart = tstop
def _yield_utt(self, record, tstart, tstop):
"""Yield (tstart, tstop, phone) for a given record interval"""
for begin, end, phone in self.alignment[record]:
if end >= tstop:
yield begin, end, phone
break
if begin >= tstart:
yield begin, end, phone
def validate_phone_alignment(corpus, alignment, log=utils.logger.get_log()):
"""Return True if the phone alignment is coherent with the corpus
Return False on any other case, send a log message for all
suspicious alignments.
"""
error_utts = set()
# check all utterances one by one
for utt in corpus.utts():
# corpus side
_, utt_tstart, utt_tstop = corpus.segments[utt]
# alignment side
ali_tstart = alignment[utt][0][0]
ali_tstop = alignment[utt][-1][1]
# validation
if utt_tstart != ali_tstart:
error_utts.add(utt)
log.warn(
'%s tstart error in corpus and alignment (%s != %s)',
utt, utt_tstart, ali_tstart)
if utt_tstop != ali_tstop:
error_utts.add(utt)
log.warn(
'%s : tstop error in corpus and alignment: %s != %s',
utt, utt_tstop, ali_tstop)
if error_utts:
log.error(
'timestamps are not valid for %s/%s utterances',
len(error_utts), len(corpus.utts()))
return False
log.info('alignment is valid for all utterances')
return True
| bootphon/abkhazia | abkhazia/corpus/prepare/buckeye_preparator.py | Python | gpl-3.0 | 13,370 | [
"COLUMBUS"
] | 80a7238b4ad94e6748a738b409ed85281f8383bf38202e5db22ab5edd4137198 |
# -*- coding: utf-8 -*-
"""Mocks for PyBEL testing."""
import itertools as itt
import os
from unittest import mock
from .constants import bel_dir_path, belanno_dir_path, belns_dir_path
from .utils import get_uri_name
__all__ = [
"MockResponse",
"MockSession",
"mock_bel_resources",
]
_responses = [
("go.belns", os.path.join(belns_dir_path, "go-names.belns")),
(
"hgnc-human-genes-20170725.belns",
os.path.join(belns_dir_path, "hgnc-names.belns"),
),
("chebi-20170725.belns", os.path.join(belns_dir_path, "chebi-names.belns")),
(
"species-taxonomy-id-20170511.belanno",
os.path.join(belanno_dir_path, "species-taxonomy-id.belanno"),
),
(
"confidence-1.0.0.belanno",
os.path.join(belanno_dir_path, "confidence-1.0.0.belanno"),
),
]
class MockResponse:
"""See http://stackoverflow.com/questions/15753390/python-mock-requests-and-the-response."""
def __init__(self, url_to_mock: str):
"""Build a mock for the requests Response object."""
_r = [
(".belns", os.path.join(belns_dir_path, get_uri_name(url_to_mock))),
(".belanno", os.path.join(belanno_dir_path, get_uri_name(url_to_mock))),
(".bel", os.path.join(bel_dir_path, get_uri_name(url_to_mock))),
]
self.path = None
for suffix, path in itt.chain(_responses, _r):
if url_to_mock.endswith(suffix):
self.path = path
break
if self.path is None:
raise ValueError("missing file")
if not os.path.exists(self.path):
raise ValueError("file doesn't exist: {}".format(self.path))
def iter_lines(self):
"""Iterate the lines of the mock file."""
with open(self.path, "rb") as file:
yield from file
def raise_for_status(self):
"""Mock raising an error, by not doing anything at all."""
class MockSession:
"""Patches the session object so requests can be redirected through the filesystem without rewriting BEL files."""
def mount(self, prefix, adapter):
"""Mock mounting an adapter by not doing anything."""
@staticmethod
def get(url: str):
"""Mock getting a URL by returning a mock response."""
return MockResponse(url)
def close(self):
"""Mock closing a connection by not doing anything."""
mock_bel_resources = mock.patch("bel_resources.utils.requests.Session", side_effect=MockSession)
| pybel/pybel | src/pybel/testing/mocks.py | Python | mit | 2,504 | [
"Pybel"
] | dbbc39a21bc6ba164f289e361d13d05609d03e136da0e25238cbd178764722b7 |
# coding: utf-8
# # Functional connectivity
# Read data path and define result path, Create a list of atlas names, Extract time series and compute network connectivities per particiapnt.
#
# All done on YNiC server.
# pandas version >= 0.17.0
# # Matching behavioural and FC data
import csv
import glob
import os
import pickle
import sys
import numpy as np
import pandas as pd
from src.utils import imputedata
'''
load data
'''
FC_subj = np.load('./data/interim/data_cross_corr_Yeo17_preprocessed_pptID.npy')
PATHS_FC = glob.glob('./data/interim/data_*_preprocessed.npy')
PATHS_ROI = glob.glob('./data/interim/data_*_ROI.npy')
def load_csv_pd(path, header_row_n):
df = pd.read_csv(path, header=header_row_n ,na_values= ' ')
return df.sort_values(by=['Anonymized ID'])
def select_data(df):
prev_subj = None
df['include'] = 0
for idx, row in df.iterrows():
cur_subj = row['Anonymized ID']
if cur_subj != prev_subj and row['Anonymized ID'] in FC_subj:
df.set_value(idx, 'include', 1)
elif cur_subj == prev_subj and row['Anonymized ID'] in FC_subj:
pass
else:
pass
prev_subj = cur_subj
return df.query('include == 1')
# get Assesment csv path
PATHS = ['./data/interim//NKI_MRIQ_Age_merged.csv', './data/interim/NKI_MotionParameters.csv'] + \
sorted(glob.glob('./data/raw/CognitiveTasks/*.csv')) + \
['./data/raw/Questionnaires/8100_BDI-II_20161025.csv',
'./data/raw/Questionnaires/8100_STAI_20161025.csv',
'./data/raw/Questionnaires/8100_UPPS-P_20161025.csv',
'./data/raw/Questionnaires/8100_Demos_20161025.csv']
# load variable name txt
task_names = []
var_names = []
with open('./data/raw/selected_CognitiveTasks_labels.txt', 'rb') as f:
for line in f:
task_names.append(line.split()[0])
var_names.append(line.split()[1])
'''
filter data by missing meaures
'''
frame_df = []
print 'Number of participants with RS scan and task'
for i, path in enumerate(PATHS):
task_name = path.split('/')[-1].split('.csv')[0].split('_')[1]
if task_name in task_names:
# for anything other than MRIQ and Motion
df = load_csv_pd(path, header_row_n=1)
df = select_data(df) # select participant
df = df.set_index('Anonymized ID') # set ID as index for concatenation
if task_name == 'Demos':
df = df[~df.index.duplicated(keep='first')]
# select the varables to save
var_name = [var_names[i] for i, name in enumerate(task_names) if name == task_name]
df = df[var_name]
df = df.apply(pd.to_numeric, errors='coerce') # convert data to numerical values; if error, return nan
frame_df.append(df)
else:
df = load_csv_pd(path, header_row_n=0)
df = select_data(df) # select participant
df = df.set_index('Anonymized ID')
frame_df.append(df.iloc[:, :-1])
print '{:55}:{:5}'.format(path.split('/')[-1], df.shape[0])
# filter by age
df_cog_measure = pd.concat(frame_df, axis=1).query('55 >= AGE >= 18')
print '='
print '{:55}:{:5}'.format('Number of included participants between age 18 - 55', df_cog_measure.shape[0])
# drop cases with more than 5 missings - listwise
null_cases_per_subj = np.sum(pd.isnull(df_cog_measure.iloc[:, 3:]).values, axis=1)
excludeIdx = np.where(null_cases_per_subj>5)
df_cog_measure = df_cog_measure.drop(df_cog_measure.index[excludeIdx])
print '='
print '{:55}:{:5}'.format('Number of participants selected', df_cog_measure.shape[0])
# next use the ID information to find the appropriate FC data
dict_data = {}
for cur in PATHS_FC:
data_FC = np.load(cur)
data_FC_include = []
set_lab = cur.split('/')[-1].split('_')[3]
for i, ID in enumerate(FC_subj):
if ID in list(df_cog_measure.index):
data_FC_include.append(data_FC[i, :])
data_FC_include = np.array(data_FC_include)
print '{:10}:{:5}'.format(set_lab, data_FC_include.shape[0])
dict_data['FC_' + set_lab] = data_FC_include
# drop useless info
df_cog_measure = df_cog_measure.drop(['SubjectType', 'Visit'], axis=1)
print 'participants: ', df_cog_measure.shape[0]
'''
Preprocess data
'''
# impute outliers and missing values with mean, transform to z score
all_data = imputedata(df_cog_measure.values, 'mean')
# save every numerical variables in z score aside from age
dict_data['Age'] = np.reshape(df_cog_measure.values[:, 0], newshape=(df_cog_measure.values[:, 0].shape[0], 1))
dict_data['MRIQ'] = all_data[:, 1:32]
dict_data['Motion_power'] = np.reshape(all_data[:, 32], newshape=(all_data[:, 32].shape[0], 1))
dict_data['Motion_Jenkinson'] = np.reshape(all_data[:, 33], newshape=(all_data[:, 33].shape[0], 1))
dict_data['CognitiveMeasures'] = all_data[:, 34:-1]
dict_data['CognitiveMeasures_labels'] = list(df_cog_measure.columns)[34:-1]
dict_data['Gender'] = np.reshape(df_cog_measure.values[:, -1], newshape=(df_cog_measure.values[:, -1].shape[0], 1))
dict_data['IDs'] = list(df_cog_measure.index)
# load labels and save
MRIQ_labels = []
with open('./references/8100_MRIQ_QuestionKeys.csv', 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
MRIQ_labels.append(row[-1])
dict_data['MRIQ_labels'] = MRIQ_labels
# FC data ROI labes
for p in PATHS_ROI:
ROIlabs = list(np.load(p))
dat_name = p.split('_')[3] + '_ROIs'
dict_data[dat_name] = ROIlabs
# save all cognitive measures for later use. Need to be preprocessed
with open('./data/dict_SCCA_data_prepro_node-node.pkl', 'wb') as handle:
pickle.dump(dict_data, handle, protocol=pickle.HIGHEST_PROTOCOL)
| htwangtw/Patterns-of-Thought | notebooks/1.3-DataSelection.py | Python | mit | 5,632 | [
"VisIt"
] | a9150413bdb43241a8f12aa27f2c4514737dd1c19e3771a1427a303eed268b9d |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 15 12:05:40 2016
@author: sjjoo
"""
import sys
import mne
import matplotlib.pyplot as plt
from mne.utils import run_subprocess, logger
import os
from os import path as op
import copy
import shutil
import numpy as np
from numpy.random import randn
from scipy import stats as stats
import time
from functools import partial
from mne import set_config
set_config('MNE_MEMMAP_MIN_SIZE', '1M')
set_config('MNE_CACHE_DIR', '.tmp')
mne.set_config('MNE_USE_CUDA', 'true')
this_env = copy.copy(os.environ)
#fs_dir = '/mnt/diskArray/projects/freesurfer'
fs_dir = '/mnt/diskArray/projects/avg_fsurfer'
this_env['SUBJECTS_DIR'] = fs_dir
#this_env['FREESURFER_HOME'] = '/usr/local/freesurfer'
raw_dir = '/mnt/scratch/NLR_MEG4'
os.chdir(raw_dir)
subs = ['NLR_102_RS','NLR_103_AC','NLR_105_BB','NLR_110_HH','NLR_127_AM',
'NLR_130_RW','NLR_132_WP','NLR_133_ML','NLR_145_AC','NLR_150_MG',
'NLR_151_RD','NLR_152_TC','NLR_160_EK','NLR_161_AK','NLR_163_LF',
'NLR_164_SF','NLR_170_GM','NLR_172_TH','NLR_174_HS','NLR_179_GM',
'NLR_180_ZD','NLR_187_NB','NLR_201_GS','NLR_203_AM',
'NLR_204_AM','NLR_205_AC','NLR_206_LM','NLR_207_AH','NLR_211_LB',
'NLR_GB310','NLR_KB218','NLR_JB423','NLR_GB267','NLR_JB420',
'NLR_HB275','NLR_197_BK','NLR_GB355','NLR_GB387','NLR_HB205',
'NLR_IB217','NLR_IB319','NLR_JB227','NLR_JB486','NLR_KB396',
'NLR_IB357']
#for n, s in enumerate(subs):
# run_subprocess(['mne', 'watershed_bem', '--subject', subs[n],'--overwrite'], env=this_env)
# mne.bem.make_watershed_bem(subject = subs[n],subjects_dir=fs_dir,overwrite=True,preflood=20, show=True)
"""USE above code
mri_watershed -h 3 -useSRAS -surf /mnt/diskArray/projects/avg_fsurfer/NLR_205_AC/bem/watershed/NLR_205_AC /mnt/diskArray/projects/avg_fsurfer/NLR_205_AC/mri/T1.mgz /mnt/diskArray/projects/avg_fsurfer/NLR_205_AC/bem/watershed/ws
"""
"""
Run head_surf.m
"""
# Let's take a look...
#for n, s in enumerate(subs):
# mne.viz.plot_bem(subject=subs[n],subjects_dir=fs_dir,brain_surfaces='white', orientation='coronal')
#for n, s in enumerate(subs):
## os.chdir(os.path.join(fs_dir,subs[n],'bem'))
# run_subprocess(['mne', 'make_scalp_surfaces', '--subject', subs[n],
# '--overwrite','--no-decimate']) # Disable medium and sparse decimations (dense only)
# # otherwise, it gives errors
""" Co-register...
mne.gui.coregistration(tabbed=False,subject=subs[45],subjects_dir=fs_dir)
# Recommended way is to use mne coreg from terminal
"""
# Session 1
# subs are synced up with session1 folder names...
#
session1 = ['102_rs160618','103_ac150609','105_bb150713','110_hh160608','127_am151022',
'130_rw151221','132_wp160919','133_ml151124','145_ac160621','150_mg160606',
'151_rd160620','152_tc160422','160_ek160627','161_ak160627','163_lf160707',
'164_sf160707','170_gm160613','172_th160614','174_hs160620','179_gm160701',
'180_zd160621','187_nb161017','201_gs150818','203_am150831',
'204_am150829','205_ac151123','206_lm151119','207_ah160608','211_lb160617',
'nlr_gb310170614','nlr_kb218170619','nlr_jb423170620','nlr_gb267170620','nlr_jb420170621',
'nlr_hb275170622','197_bk170622','nlr_gb355170606','nlr_gb387170608','nlr_hb205170825',
'nlr_ib217170831','nlr_ib319170825','nlr_jb227170811','nlr_jb486170803','nlr_kb396170808',
'nlr_ib357170912']
#subs = ['NLR_205_AC','NLR_206_LM',
# 'NLR_207_AH','NLR_210_SB','NLR_211_LB'
# ]
#session1 = ['205_ac151208','205_ac160202',
# '206_lm151119',
# '206_lm160113','207_ah160608','207_ah160809','210_sb160822','211_lb160617','211_lb160823'
# ]
#n_subjects = len(subs)
"""
Forward model...
"""
#sourceFlag = np.ones((n_subjects,1))
#%%
#for n, s in enumerate(session1):
# os.chdir(os.path.join(raw_dir,session1[n]))
#
# if s[0:3] == 'nlr':
# subject = s[0:9].upper()
# else:
# subject = 'NLR_' + s[0:6].upper()
#
# os.chdir('inverse')
# fn = 'All_40-sss_eq_'+session1[n]+'-ave.fif'
# evoked = mne.read_evokeds(fn, condition=0,
# baseline=(None,0), kind='average', proj=True)
#
# info = evoked.info
#
# if os.path.isdir('../forward'):
# os.chdir('../forward')
## else:
## temp_src = '/mnt/scratch/NLR_MEG2/' + session1[n] + '/forward'
## temp_dest = '/mnt/scratch/NLR_MEG3/' + session1[n] + '/forward'
## shutil.copytree(temp_src, temp_dest)
# trans = session1[n] + '-trans.fif'
## Take a look at the sensors
# mne.viz.plot_trans(info, trans, subject=subs[n], dig=True,
# meg_sensors=True, subjects_dir=fs_dir)
#%%
#n = 0
#os.chdir(os.path.join(raw_dir,session1[n]))
#os.chdir('raw_fif')
#pos = mne.chpi.read_head_pos('102_rs160618_1_raw.pos')
#mne.viz.plot_head_positions(pos, mode='traces')
#%%
for n, s in enumerate(session1):
os.chdir(os.path.join(raw_dir,session1[n]))
if s[0:3] == 'nlr':
subject = s[0:9].upper()
else:
subject = 'NLR_' + s[0:6].upper()
os.chdir('inverse')
fn = 'All_40-sss_eq_'+session1[n]+'-ave.fif'
evoked = mne.read_evokeds(fn, condition=0,
baseline=(None,0), kind='average', proj=True)
info = evoked.info
if os.path.isdir('../forward'):
os.chdir('../forward')
else:
temp_src = '/mnt/scratch/NLR_MEG2/' + session1[n] + '/forward'
temp_dest = '/mnt/scratch/NLR_MEG3/' + session1[n] + '/forward'
shutil.copytree(temp_src, temp_dest)
trans = session1[n] + '-trans.fif'
# Take a look at the sensors
# mne.viz.plot_trans(info, trans, subject=subs[n], dig=True,
# meg_sensors=True, subjects_dir=fs_dir)
### Read source space
# spacing='oct6' #'ico5' # 10242 * 2
fn2 = subject + '-' + 'ico-5' + '-src.fif' # ico-5
if s == '205_ac151123' or s == '205_ac160202' or s == 'nlr_jb227170811': # NLR_205 has too small head for ico-5
fn2 = subject + '-' + 'oct-6' + '-src.fif'
os.chdir(os.path.join(fs_dir,subject,'bem'))
src = mne.read_source_spaces(fn2)
os.chdir(os.path.join(raw_dir,session1[n]))
os.chdir('forward')
#import numpy as np # noqa
#from mayavi import mlab # noqa
#from surfer import Brain # noqa
#
#brain = Brain('sample', 'lh', 'inflated', subjects_dir=subjects_dir)
#surf = brain._geo
#
#vertidx = np.where(src[0]['inuse'])[0]
#
#mlab.points3d(surf.x[vertidx], surf.y[vertidx],
# surf.z[vertidx], color=(1, 1, 0), scale_factor=1.5)
# Create BEM model
conductivity = (0.3,) # for single layer
#conductivity = (0.3, 0.006, 0.3) # for three layers
model = mne.make_bem_model(subject=subject, ico=5, # 5=20484, 4=5120
conductivity=conductivity,
subjects_dir=fs_dir)
bem = mne.make_bem_solution(model)
fn = session1[n] + '-bem-sol.fif'
mne.write_bem_solution(fn,bem)
# Now create forward model
fwd = mne.make_forward_solution(info, trans=trans, src=src, bem=bem,
meg=True, eeg=False, mindist=3.0, n_jobs=18)
fwd = mne.convert_forward_solution(fwd, surf_ori=True, force_fixed=True, copy=True)
fn = session1[n] + '-sss-fwd.fif'
mne.write_forward_solution(fn,fwd,overwrite=True)
#Inverse here
# os.chdir('../covariance')
# fn = session1[n] + '-40-sss-cov.fif'
# cov = mne.read_cov(fn)
#
# os.chdir('../inverse')
# # Free: loose = 1; Loose: loose = 0.2
# inv = mne.minimum_norm.make_inverse_operator(info, fwd, cov, loose=0., depth=0.8, use_cps=True)
#
# fn = session1[n] + '-fixed-depth8-inv.fif'
# mne.minimum_norm.write_inverse_operator(fn,inv)
| yeatmanlab/BrainTools | projects/NLR_MEG/forward_session1.py | Python | bsd-3-clause | 7,954 | [
"Mayavi"
] | d015c024aeebb37436cf01de5a32f99ea662a6a9fd9baab30710401b2c1724db |
#
# co_co_type_of_buffer_unique.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from pynestml.cocos.co_co import CoCo
from pynestml.meta_model.ast_input_line import ASTInputLine
from pynestml.meta_model.ast_neuron import ASTNeuron
from pynestml.utils.logger import Logger, LoggingLevel
from pynestml.utils.messages import Messages
from pynestml.visitors.ast_visitor import ASTVisitor
class CoCoTypeOfBufferUnique(CoCo):
"""
This coco ensures that each spike buffer has at most one type of modifier inhibitory and excitatory.
Allowed:
spike <- inhibitory spike
Not allowed:
spike <- inhibitory inhibitory spike
"""
name = 'type of buffer valid'
description = 'TODO'
def __init__(self):
self.neuronName = None
def check_co_co(self, node):
"""
Ensures the coco for the handed over neuron.
:param node: a single neuron instance.
:type node: ASTNeuron
"""
self.neuronName = node.get_name()
node.accept(TypeOfBufferUniqueVisitor())
class TypeOfBufferUniqueVisitor(ASTVisitor):
"""
This visitor ensures that all buffers are specified uniquely by keywords.
"""
def visit_input_line(self, node):
"""
Checks the coco on the current node.
:param node: a single input line.
:type node: ASTInputLine
"""
if node.is_spike():
if node.has_input_types() and len(node.get_input_types()) > 1:
inh = 0
ext = 0
for typ in node.get_input_types():
if typ.is_excitatory:
ext += 1
if typ.is_inhibitory:
inh += 1
if inh > 1:
code, message = Messages.get_multiple_keywords('inhibitory')
Logger.log_message(error_position=node.get_source_position(), code=code, message=message,
log_level=LoggingLevel.ERROR)
if ext > 1:
code, message = Messages.get_multiple_keywords('excitatory')
Logger.log_message(error_position=node.get_source_position(), code=code, message=message,
log_level=LoggingLevel.ERROR)
return
| kperun/nestml | pynestml/cocos/co_co_type_of_buffer_unique.py | Python | gpl-2.0 | 2,956 | [
"NEURON"
] | d452820abc2d218dc41f21892590c20b5830e6f5432c2c891098f579eb07e13a |
import iotkit
import sys
import time
import math
# Authentication/Account info
username = "bbaltz@yahoo.com"
password = "Passw0rd"
hostname = "dashboard.us.enableiot.com"
account_name = "Brian Baltz"
# Device/component info
device_id = "foobie3"
device_name = device_id + "-Device"
component_name = "response"
# make connection to IOTKit dashboard
comm = iotkit.connect(username, password, hostname)
acct = iotkit.account(comm, account_name)
device_info = {
"deviceId": str(device_id),
"gatewayId": str(device_id),
"name": device_name,
"tags": ["US", "California", "San Francisco"],
# if the device will be static, use this
# to remember where you put it
#"loc": [37.783944, -122.401289, 17],
"attributes": {
"vendor": "intel",
"platform": "x86",
"os": "linux"
}
}
try:
device = iotkit.device(comm, acct, device_id, device_info)
except NameError:
print "Error creating device"
sys.exit(1)
component = iotkit.component(device)
component.create_component("response.v1.0", component_name)
data = iotkit.data(device)
print "Submitting data..."
# generate data end submit
t0 = 0
for i in range(1,5):
x = (time.time() - t0) * 2.0 * 3.14 / 60.0
value = math.sin(x)
print x, value
now = int(time.time() * 1000)
data.save_data(component, now, value)
time.sleep(1)
t1 = time.time()
#retrieve data series
search = {
"from": 0,
#"to": time1,
"targetFilter": {
"deviceList": [device.device_id]
},
"metrics": [
{
"id": component.component_id,
"op": "none"
}
]#,
#"queryMeasureLocation": True
}
print "Reading data..."
data.get_data(component, search, None)
| bbaltz505/iotkit-libpy | getdata2.py | Python | bsd-3-clause | 1,792 | [
"Brian"
] | 6b8fe06219168881ad7b2ec2668669b12b71bad431601eb1e29608c792af5784 |
# (C) 2015 - Jaguar Land Rover.
#
# Mozilla Public License 2.0
#
# Python dbus service that faces SOTA interface
# of Software Loading manager.
import gtk
import dbus
import dbus.service
from dbus.mainloop.glib import DBusGMainLoop
import getopt
import sys
import time
import swm
import traceback
# Default command line arguments
update_id='media_player_1_2_3'
description='Media Player Update'
signature='d2889ee6bc1fe1f3d7c5cdaca78384776bf437b7c6ca4db0e2c8b1d22cdb8f4e'
update_file=''
active=True
class SOTAClientService(dbus.service.Object):
def __init__(self, image_file, signature):
# Store where we have the image file
self.image_file = image_file
# Store signature
self.signature = signature
# Define our own bus name
bus_name = dbus.service.BusName('org.genivi.SotaClient', bus=dbus.SessionBus())
# Define our own object on the sota_client bus
dbus.service.Object.__init__(self, bus_name, '/org/genivi/SotaClient')
@dbus.service.method('org.genivi.SotaClient',
async_callbacks=('send_reply', 'send_error'))
def initiateDownload(self,
update_id,
send_reply,
send_error):
global target
global command
global size
global description
global vendor
global path
print "Got initiateDownload"
print " ID: {}".format(update_id)
print "---"
# Send back an immediate reply since DBUS
# doesn't like python dbus-invoked methods to do
# their own calls (nested calls).
#
send_reply(True)
# Simulate download
print "Downloading"
for i in xrange(1,10):
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(0.1)
print
print "Done."
swm.dbus_method('org.genivi.SoftwareLoadingManager', 'downloadComplete', self.image_file, self.signature)
return None
@dbus.service.method('org.genivi.SotaClient')
def updateReport(self,
update_id,
results):
global active
print "Update report"
print " ID: {}".format(update_id)
for result in results:
print " operation_id: {}".format(result['id'])
print " code: {}".format(result['result_code'])
print " text: {}".format(result['result_text'])
print " ---"
print "---"
active = False
return None
def usage():
print "Usage:", sys.argv[0], "-u update_id -i image_file -d description \\"
print " -s signature [-c]"
print
print " -u update_id Pacakage id string. Default: 'media_player_1_2_3'"
print " -i image_file Path to update squashfs image."
print " -s signature RSA encrypted sha256um of image_file."
print " -c Request user confirmation."
print " -d description Description of update."
print
print "Example:", sys.argv[0],"-u boot_loader_2.10.9\\"
print " -i boot_loader.img \\"
print " -s 2889ee...4db0ed22cdb8f4e -c"
sys.exit(255)
try:
opts, args= getopt.getopt(sys.argv[1:], "u:d:i:s:c")
except getopt.GetoptError:
print "Could not parse arguments."
usage()
image_file = None
request_confirmation = False
for o, a in opts:
if o == "-u":
update_id = a
elif o == "-d":
description = a
elif o == "-i":
image_file = a
elif o == "-s":
signature = a
elif o == "-c":
request_confirmation = True
else:
print "Unknown option: {}".format(o)
usage()
if not image_file:
print
print "No -i image_file provided."
print
usage()
# Can we open the confirmation file?
try:
image_desc = open(image_file, "r")
except IOError as e:
print "Could not open {} for reading: {}".format(image_file, e)
sys.exit(255)
image_desc.close()
print "Will simulate downloaded update:"
print "Update ID: {}".format(update_id)
print "Description: {}".format(description)
print "Image file: {}".format(image_file)
print "User Confirmation: {}".format(request_confirmation)
try:
DBusGMainLoop(set_as_default=True)
sota_svc = SOTAClientService(image_file, signature)
# USE CASE
#
# This sota_client will send a update_available() call to the
# software loading manager (SLM).
#
# If requested, SWLM will pop an operation confirmation dialog on the HMI.
#
# If confirmed, SWLM will make an initiate_download() callback to
# this sota_client.
#
# The sota_client will, on simulated download completion, make a
# download_complete() call to the SLM to indicate that the update is
# ready to be processed.
#
# The SLM will mount the provided image file as a loopback file system
# and execute its update_manifest.json file. Each software operation in
# the manifest file will be fanned out to its correct target (PackMgr,
# ML, PartMgr)
#
# Once the update has been processed by SLM, an update operation
# report will be sent back to SC and HMI.
#
swm.dbus_method('org.genivi.SoftwareLoadingManager', 'updateAvailable',
update_id, description, signature, request_confirmation)
active = True
# Active will be set to false by installation_report()
while active:
gtk.main_iteration()
except Exception as e:
print "Exception: {}".format(e)
traceback.print_exc()
| magnusfeuer/genivi_software_management | sota_client/sota_client.py | Python | mpl-2.0 | 5,746 | [
"Jaguar"
] | f8c3b7f7cf111f56ee56e94fe55ade7dbf4b25e454b409fde94e0b166604b150 |
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see starthinker/scripts for possible source):
# - Command: "python starthinker_ui/manage.py airflow"
#
###########################################################################
'''
--------------------------------------------------------------
Before running this Airflow module...
Install StarThinker in cloud composer ( recommended ):
From Release: pip install starthinker
From Open Source: pip install git+https://github.com/google/starthinker
Or push local code to the cloud composer plugins directory ( if pushing local code changes ):
source install/deploy.sh
4) Composer Menu
l) Install All
--------------------------------------------------------------
If any recipe task has "auth" set to "user" add user credentials:
1. Ensure an RECIPE['setup']['auth']['user'] = [User Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_user", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/deploy_commandline.md#optional-setup-user-credentials
--------------------------------------------------------------
If any recipe task has "auth" set to "service" add service credentials:
1. Ensure an RECIPE['setup']['auth']['service'] = [Service Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_service", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md
--------------------------------------------------------------
CM360 Report Emailed To BigQuery
Pulls a CM Report from a gMail powered email account into BigQuery.
- The person executing this recipe must be the recipient of the email.
- Schedule a CM report to be sent to .
- Or set up a redirect rule to forward a report you already receive.
- The report must be sent as an attachment.
- Ensure this recipe runs after the report is email daily.
- Give a regular expression to match the email subject.
- Configure the destination in BigQuery to write the data.
--------------------------------------------------------------
This StarThinker DAG can be extended with any additional tasks from the following sources:
- https://google.github.io/starthinker/
- https://github.com/google/starthinker/tree/master/dags
'''
from starthinker.airflow.factory import DAG_Factory
INPUTS = {
'auth_read':'user', # Credentials used for reading data.
'email':'', # Email address report was sent to.
'subject':'.*', # Regular expression to match subject. Double escape backslashes.
'dataset':'', # Existing dataset in BigQuery.
'table':'', # Name of table to be written to.
'is_incremental_load':False, # Append report data to table based on date column, de-duplicates.
}
RECIPE = {
'tasks':[
{
'email':{
'auth':{'field':{'name':'auth_read','kind':'authentication','order':1,'default':'user','description':'Credentials used for reading data.'}},
'read':{
'from':'noreply-cm@google.com',
'to':{'field':{'name':'email','kind':'string','order':1,'default':'','description':'Email address report was sent to.'}},
'subject':{'field':{'name':'subject','kind':'string','order':2,'default':'.*','description':'Regular expression to match subject. Double escape backslashes.'}},
'attachment':'.*'
},
'write':{
'bigquery':{
'dataset':{'field':{'name':'dataset','kind':'string','order':3,'default':'','description':'Existing dataset in BigQuery.'}},
'table':{'field':{'name':'table','kind':'string','order':4,'default':'','description':'Name of table to be written to.'}},
'header':True,
'is_incremental_load':{'field':{'name':'is_incremental_load','kind':'boolean','order':6,'default':False,'description':'Append report data to table based on date column, de-duplicates.'}}
}
}
}
}
]
}
dag_maker = DAG_Factory('email_cm_to_bigquery', RECIPE, INPUTS)
dag = dag_maker.generate()
if __name__ == "__main__":
dag_maker.print_commandline()
| google/starthinker | dags/email_cm_to_bigquery_dag.py | Python | apache-2.0 | 5,356 | [
"VisIt"
] | a6956c17113ac2a294c881a0f2ca1785dbfcec284aa0d2695448546dfb210471 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
"""Generate list of functionals in documentation."""
from cStringIO import StringIO
import json
import os
from horton.meanfield.cext import ULibXCWrapper
from common import write_if_changed
def main():
"""Main program."""
# Load dependency information -> libxc version
with open('../dependencies.json') as f:
dependencies = json.load(f)
# Order does not matter here. Just make it easy to look things up
dependencies = dict((d['name'], d) for d in dependencies)
libxc_version = dependencies['libxc']['version_ci']
# find the qaworkdir
qaworkdir = os.getenv('QAWORKDIR')
if qaworkdir is None:
qaworkdir = '../qaworkdir'
# find all the functional keys by processing funcs_key.c
keys = []
with open('%s/cached/libxc-%s/funcs_key.c' % (qaworkdir, libxc_version)) as f:
for line in f:
if line.startswith('{'):
words = line.strip()[1:-3].split(',')
key = words[0][1:-1]
if len(key) > 0:
keys.append(key)
# sort the functions
splitkeys = []
for key in keys:
words = key.split('_')
if words[0] == 'hyb':
prefix = '_'.join(words[:2])
mid = words[2]
suffix = '_'.join(words[3:])
else:
prefix = words[0]
mid = words[1]
suffix = '_'.join(words[2:])
splitkeys.append((prefix, mid, suffix))
splitkeys.sort(cmp=cmp_splitkey)
keys = []
for splitkey in splitkeys:
splitkey = [part for part in splitkey if len(part) > 0]
keys.append('_'.join(splitkey))
# make a rst table of all functionals
s = StringIO()
print >> s, '.. _ref_functionals:'
print >> s
print >> s, 'LibXC Functionals'
print >> s, '#################'
print >> s
print >> s, 'The following functionals are available in HORTON through `LibXC'
print >> s, '<http://www.tddft.org/programs/octopus/wiki/index.php/Libxc>`_ %s.' % \
libxc_version
print >> s, '[marques2012]_'
print >> s
for key in keys:
try:
w = ULibXCWrapper(key)
print >> s, '**{}**: {}'.format(key, w.name)
print >> s
for ref, doi, _biblio in w.refs:
print >> s, ' | {}'.format(ref),
if len(doi) > 0:
print >> s, ' https://doi.org/{}'.format(doi)
else:
print >> s
print >> s
except ValueError:
# A bug in libxc ...
print 'FAILED to load functional', key
write_if_changed('tech_ref_functionals.rst', s.getvalue())
def cmp_prefix(prefix1, prefix2):
"""Compare order of two LibXC functional prefixes."""
l = ['lda', 'gga', 'hyb_gga', 'mgga', 'hyb_mgga']
pos1 = l.index(prefix1)
pos2 = l.index(prefix2)
return cmp(pos1, pos2)
def cmp_middle(middle1, middle2):
"""Compare the middle part of a LibXC functional key."""
l = ['k', 'x', 'c', 'xc']
pos1 = l.index(middle1)
pos2 = l.index(middle2)
return cmp(pos1, pos2)
def cmp_splitkey(sk1, sk2):
"""Compare LibXC functional keys (splitted)."""
result = cmp_prefix(sk1[0], sk2[0])
if result == 0:
result = cmp_middle(sk1[1], sk2[1])
if result == 0:
result = cmp(sk1[2], sk2[2])
return result
if __name__ == '__main__':
main()
| tovrstra/horton | doc/update_functionals.py | Python | gpl-3.0 | 4,252 | [
"Octopus"
] | 78deaecf49635893e915dae8147b64f76e785265945c450dbc3af0f3e3078718 |
import os
import pymc
import pymbar
import dipoles
import numpy as np
import pandas as pd
import simtk.openmm.app as app
import simtk.openmm as mm
import simtk.unit as u
import mdtraj as md
traj = md.load("./monopole.pdb")
out_dir = os.path.join(os.getenv("HOME"), "dat", "monopoles-symmetric")
q0 = pymc.Uniform("q0", 0.0, 1.0, value=0.0, observed=True)
sigma0 = pymc.Uniform("sigma0", 0.1, 0.6)
#sigma1 = pymc.Uniform("sigma1", 0.1, 0.6)
sigma1 = 1.0 * sigma0
epsilon0 = pymc.Uniform("epsilon0", 0.0, 2.0)
#epsilon1 = pymc.Uniform("epsilon1", 0.0, 2.0)
epsilon1 = 1.0 * epsilon0
model = pymc.Model([q0, sigma0, epsilon0, sigma1, epsilon1])
#temperatures = [280 * u.kelvin, 290 * u.kelvin, 300 * u.kelvin, 310 * u.kelvin, 320 * u.kelvin]
temperatures = [280 * u.kelvin, 300 * u.kelvin, 320 * u.kelvin]
pressure = 1.0 * u.atmospheres
model.draw_from_prior()
for temperature in temperatures:
monopole = dipoles.Monopole(1000, q0=q0.value, sigma0=sigma0.value, epsilon0=epsilon0.value, sigma1=sigma1.value, epsilon1=epsilon1.value)
traj = monopole.build_box()
print(monopole)
try:
values, mu, sigma = dipoles.simulate_density(monopole, temperature, pressure, out_dir)
except Exception as e:
print(e)
| kyleabeauchamp/DBayes | dbayes/simulate/simulate_monopoles.py | Python | gpl-2.0 | 1,243 | [
"MDTraj",
"OpenMM"
] | a019618e0aa94d718e89cc30c1aa8ed48bba8ba8c1b5e85ebbae866db3323968 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
print (sys.version)
import time
import numpy as np
my_dir = os.path.abspath(os.path.dirname(__file__))
# --------------------------------------------------------- #
# functions #
# --------------------------------------------------------- #
my_library = os.path.expanduser('~/.pylib')
sys.path.append(my_library)
# mpl_moving_average
# mpl_forcequench
# mpl_worm
from plot.SETTINGS import *
# --------------------------------------------------------- #
# Start matplotlib (1/4) #
# --------------------------------------------------------- #
import matplotlib
# default - Qt5Agg
# print matplotlib.rcsetup.all_backends
# matplotlib.use('GTKAgg')
# matplotlib.use('TkAgg')
print 'backend:',matplotlib.get_backend()
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
fig = plt.figure(0)
gs = GridSpec(1,1)
ax1 = plt.subplot(gs[0,:])
# ax2 = plt.subplot(gs[1,:-1])
ax = [ax1]
# --------------------------------------------------------- #
# Import Data! (2/4) #
# --------------------------------------------------------- #
result_type = 'emol' # sop | sopnucleo | gsop | namd
plot_type = 'contacts' # fe | tension | rmsd | rdf
# --------------------------------------------------------- #
# mpl_myargs_begin #
# --------------------------------------------------------- #
import argparse
def parse_arguments():
''' Parse script's arguments.
'''
parser = argparse.ArgumentParser()
parser.add_argument("-o","--option",help="select None,publish,show")
parser.add_argument("-d","--dataname",help="data name: run280, 76n")
args = vars(parser.parse_args())
return args
args = parse_arguments()
option = args['option']
data_name = args['dataname']
# --------------------------------------------------------- #
# Import Data! (3/4) #
# --------------------------------------------------------- #
data = np.loadtxt('emol_contacts_EX1.dat')
print data.shape
mtdata = np.reshape(data,(data.shape[0],156,data.shape[1]/156))
print mtdata.shape
# for f in mtdata.shape[0]:
# Slice:
# alphas = mtdata[::,::,0]
# betas = mtdata[::,::,1]
# for a in range(alphas.shape[1]):
# adata = alphas[::,a]
# plt.plot(adata)
# save_fig(my_dir,0,'fig','%s_%s_%s_alpha' % (result_type,plot_type,data_name),option)
# plt.clf()
# for a in range(betas.shape[1]):
# adata = betas[::,a]
# plt.plot(adata)
# save_fig(my_dir,0,'fig','%s_%s_%s_beta' % (result_type,plot_type,data_name),option)
# plt.clf()
# internals = mtdata[::,::,2]
# for a in range(internals.shape[1]):
# adata = internals[::,a]
# plt.plot(adata)
# save_fig(my_dir,0,'fig','%s_%s_%s_internals' % (result_type,plot_type,data_name),option)
# plt.clf()
def plot_contacts(tup):
mdata = mtdata[::,::,tup[1]]
for a in range(mdata.shape[1]):
plt.plot(mdata[::,a])
save_fig(my_dir,0,'fig','%s_%s_%s_%s' % (result_type,plot_type,data_name,tup[0]),option)
plt.clf()
lst = [("alpha",0),("beta",1),("intra",2),("a-east",3),
("a-west",4),("a-south",5),("b-east",6),("b-west",7),("b-north",8)]
for l in lst:
plot_contacts(l)
# be = mtdata[::,::,1]
# for a in range(alphas.shape[1]):
# adata = mtdata[::,a]
# plt.plot(adata)
# save_fig(my_dir,0,'fig','%s_%s_%s_alpha' % (result_type,plot_type,data_name),option)
# plt.clf()
# --------------------------------------------------------- #
# Make final adjustments: (4/4) #
# mpl - available expansions #
# --------------------------------------------------------- #
# mpl_rc
# mpl_font
# mpl_label
# mpl_xy
# mpl_ticks
# mpl_tick
# mpl_minorticks
# mpl_legend
# combined_name = '%s_%s_%s' % (result_type, plot_type, data_name)
# save_fig
# mpl_myargs_end
| dmerz75/emolsion | test/plot_emol_contacts.py | Python | mit | 4,042 | [
"NAMD"
] | e3208ef190798a66985c52ce6f68fe5bceb2463e6f27872a21bb122397fd4ed3 |
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
int_or_none,
)
class GediDigitalIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://video\.
(?:
(?:
(?:espresso\.)?repubblica
|lastampa
|ilsecoloxix
)|
(?:
iltirreno
|messaggeroveneto
|ilpiccolo
|gazzettadimantova
|mattinopadova
|laprovinciapavese
|tribunatreviso
|nuovavenezia
|gazzettadimodena
|lanuovaferrara
|corrierealpi
|lasentinella
)\.gelocal
)\.it(?:/[^/]+){2,3}?/(?P<id>\d+)(?:[/?&#]|$)'''
_TESTS = [{
'url': 'https://video.lastampa.it/politica/il-paradosso-delle-regionali-la-lega-vince-ma-sembra-aver-perso/121559/121683',
'md5': '84658d7fb9e55a6e57ecc77b73137494',
'info_dict': {
'id': '121559',
'ext': 'mp4',
'title': 'Il paradosso delle Regionali: ecco perché la Lega vince ma sembra aver perso',
'description': 'md5:de7f4d6eaaaf36c153b599b10f8ce7ca',
'thumbnail': r're:^https://www\.repstatic\.it/video/photo/.+?-thumb-full-.+?\.jpg$',
'duration': 125,
},
}, {
'url': 'https://video.espresso.repubblica.it/embed/tutti-i-video/01-ted-villa/14772/14870&width=640&height=360',
'only_matching': True,
}, {
'url': 'https://video.repubblica.it/motori/record-della-pista-a-spa-francorchamps-la-pagani-huayra-roadster-bc-stupisce/367415/367963',
'only_matching': True,
}, {
'url': 'https://video.ilsecoloxix.it/sport/cassani-e-i-brividi-azzurri-ai-mondiali-di-imola-qui-mi-sono-innamorato-del-ciclismo-da-ragazzino-incredibile-tornarci-da-ct/66184/66267',
'only_matching': True,
}, {
'url': 'https://video.iltirreno.gelocal.it/sport/dentro-la-notizia-ferrari-cosa-succede-a-maranello/141059/142723',
'only_matching': True,
}, {
'url': 'https://video.messaggeroveneto.gelocal.it/locale/maria-giovanna-elmi-covid-vaccino/138155/139268',
'only_matching': True,
}, {
'url': 'https://video.ilpiccolo.gelocal.it/dossier/big-john/dinosauro-big-john-al-via-le-visite-guidate-a-trieste/135226/135751',
'only_matching': True,
}, {
'url': 'https://video.gazzettadimantova.gelocal.it/locale/dal-ponte-visconteo-di-valeggio-l-and-8217sos-dei-ristoratori-aprire-anche-a-cena/137310/137818',
'only_matching': True,
}, {
'url': 'https://video.mattinopadova.gelocal.it/dossier/coronavirus-in-veneto/covid-a-vo-un-anno-dopo-un-cuore-tricolore-per-non-dimenticare/138402/138964',
'only_matching': True,
}, {
'url': 'https://video.laprovinciapavese.gelocal.it/locale/mede-zona-rossa-via-alle-vaccinazioni-per-gli-over-80/137545/138120',
'only_matching': True,
}, {
'url': 'https://video.tribunatreviso.gelocal.it/dossier/coronavirus-in-veneto/ecco-le-prima-vaccinazioni-di-massa-nella-marca/134485/135024',
'only_matching': True,
}, {
'url': 'https://video.nuovavenezia.gelocal.it/locale/camion-troppo-alto-per-il-ponte-ferroviario-perde-il-carico/135734/136266',
'only_matching': True,
}, {
'url': 'https://video.gazzettadimodena.gelocal.it/locale/modena-scoperta-la-proteina-che-predice-il-livello-di-gravita-del-covid/139109/139796',
'only_matching': True,
}, {
'url': 'https://video.lanuovaferrara.gelocal.it/locale/due-bombole-di-gpl-aperte-e-abbandonate-i-vigili-bruciano-il-gas/134391/134957',
'only_matching': True,
}, {
'url': 'https://video.corrierealpi.gelocal.it/dossier/cortina-2021-i-mondiali-di-sci-alpino/mondiali-di-sci-il-timelapse-sulla-splendida-olympia/133760/134331',
'only_matching': True,
}, {
'url': 'https://video.lasentinella.gelocal.it/locale/vestigne-centra-un-auto-e-si-ribalta/138931/139466',
'only_matching': True,
}, {
'url': 'https://video.espresso.repubblica.it/tutti-i-video/01-ted-villa/14772',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_meta(
['twitter:title', 'og:title'], webpage, fatal=True)
player_data = re.findall(
r"PlayerFactory\.setParam\('(?P<type>format|param)',\s*'(?P<name>[^']+)',\s*'(?P<val>[^']+)'\);",
webpage)
formats = []
duration = thumb = None
for t, n, v in player_data:
if t == 'format':
if n in ('video-hds-vod-ec', 'video-hls-vod-ec', 'video-viralize', 'video-youtube-pfp'):
continue
elif n.endswith('-vod-ak'):
formats.extend(self._extract_akamai_formats(
v, video_id, {'http': 'media.gedidigital.it'}))
else:
ext = determine_ext(v)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
v, video_id, 'mp4', 'm3u8_native', m3u8_id=n, fatal=False))
continue
f = {
'format_id': n,
'url': v,
}
if ext == 'mp3':
abr = int_or_none(self._search_regex(
r'-mp3-audio-(\d+)', v, 'abr', default=None))
f.update({
'abr': abr,
'tbr': abr,
'vcodec': 'none'
})
else:
mobj = re.match(r'^video-rrtv-(\d+)(?:-(\d+))?$', n)
if mobj:
f.update({
'height': int(mobj.group(1)),
'vbr': int_or_none(mobj.group(2)),
})
if not f.get('vbr'):
f['vbr'] = int_or_none(self._search_regex(
r'-video-rrtv-(\d+)', v, 'abr', default=None))
formats.append(f)
elif t == 'param':
if n in ['image_full', 'image']:
thumb = v
elif n == 'videoDuration':
duration = int_or_none(v)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': self._html_search_meta(
['twitter:description', 'og:description', 'description'], webpage),
'thumbnail': thumb or self._og_search_thumbnail(webpage),
'formats': formats,
'duration': duration,
}
| rg3/youtube-dl | youtube_dl/extractor/gedidigital.py | Python | unlicense | 7,100 | [
"ESPResSo"
] | 650445a194af9b6294b89e14c1a6588dfe6dca50e24dde84fea4baeae236192d |
'''
Contains a number of functions for generating virtual spectra from a protein
file, makes use of biopython for protein file parsing.
'''
import re
import itertools
import copy
import numpy as np
def coarsen(inArray, tol=0.5):
'''Bins ion mass predictions to integers within the closest 0.5.'''
return (np.around(inArray/tol))*tol*10
def returnIons(peptideValues, adjustment=0):
'''Returns numpy array of ion masses from AA mass array.'''
return (np.cumsum(peptideValues))+adjustment
def returnPostTranslationMods(peptideDict, mods):
'''Takes peptide and finds all modifications that can be applied to it.'''
output = {}
for i in mods:
index = peptideDict["peptide"].find(i[0])
while index >= 0:
index += len(i[0])
output[index] = i[1]
index = peptideDict["peptide"].find(i[0], index)
return output
def genModdedPeptide(peptideDict, conf, mods):
'''Modifies peptide entry to incorporate modifications.'''
output = copy.deepcopy(peptideDict)
adjust = []
for i in xrange(len(output["orderedMasses"])):
if i in mods:
adjust.append(mods[i])
else:
adjust.append(0)
output["orderedMasses"] += np.array(adjust)
pepName = list(output["peptide"])
for mod in mods:
pepName.insert(mod, str("[%i]" % (mods[mod])))
output["peptide"] = "".join(pepName)
output["mass"] = np.sum(output["orderedMasses"]) + conf["other_constants"]["Mass+"]
return output
def refine(peptideDict, conf):
'''Generates modified peptide entries for a given peptide,
for second pass search.'''
modifications = conf["variable_ptms"].items()
validM = returnPostTranslationMods(peptideDict, modifications)
if len(validM) > 0:
combos = []
for length in xrange(conf["search_options"]["ptm_number"]):
combos.append([{j: validM[j] for j in i}
for i in itertools.combinations(validM, length)])
for subset in combos:
for combo in subset:
yield genModdedPeptide(peptideDict, conf, combo)
else:
yield peptideDict
def returnPeptideDict(peptide, proteins, conf):
'''Returns dictionary of peptide characteristics'''
output = {"peptide":peptide, "proteins":proteins}
output["orderedMasses"] = np.array([conf["AAMassRef"][acid]
for acid in cleanPeptide(peptide)])
output["mass"] = np.sum(output["orderedMasses"]) + conf["other_constants"]["Mass+"]
return output
def iSilSpectra(protein, regEx, conf):
'''Splits a protein entry into a series of peptide strings for processing.'''
output = []
mProtein = str("n%sc" %(protein.seq))
peptides = regEx.sub('\n', str(mProtein)).split()
for x in xrange(conf["search_options"]["maximum_missed_cleavages"]+1):
mid = ["".join(peptides[i:i+(x+1)]) for i in xrange(len(peptides)-x)]
for i in mid:
if len(i) >= conf["search_options"]["min_peptide_length"]:
output.append((i, protein.name))
return output
def cleanPeptide(peptide):
'''Removes sentinel values from a peptide string before mass analysis.'''
if peptide[0] == "n":
peptide = peptide[1:]
if peptide[len(peptide)-1] == "c":
peptide = peptide[:(len(peptide)-1)]
return peptide
def proteinPreprocessing(proteins, conf):
'''Removes proteins with unsupported characters, also generates decoy proteins.'''
output = []
useDecoy = bool(conf["search_options"]["include_decoy"])
append = output.append
for protein in proteins:
if "X" in protein.seq:
continue
elif "B" in protein.seq or "Z" in protein.seq:
continue
else:
append(protein)
if useDecoy:
decoy = copy.deepcopy(protein)
decoy.seq = protein.seq[::-1]
decoy.name = str("DECOY_%s" % (protein.name))
append(decoy)
return output
def peptideDatabaseParse():
try:
from Bio import SeqIO
return SeqIO.parse
except ImportError:
import fastaParse
return fastaParse.fastaParser
def returnPeptides(conf):
'''
Generates dictionary of unique peptide entries from a given reference sequence
dataset, returns dictionary of mass-sorted peptides, with each key holding all
peptides with the same dalton mass. Also implements protein processing from confs.
'''
protFile = conf["data"]["reference_sequences"]
proteinFile = open(protFile, "rb")
protparser = peptideDatabaseParse()
proteins = protparser(proteinFile, conf["data"]["sequence_format"])
peptideDB = {}
print "[+]Digesting proteins in Silico."
matchingRegex = re.compile(r"(?<=[KR])(?=[^P])")
for iProtein in proteinPreprocessing(proteins, conf):
peptides = iSilSpectra(iProtein, matchingRegex, conf)
for i in peptides:
try:
peptideDB[i[0]].append(i[1])
except KeyError:
peptideDB[i[0]] = [i[1]]
print "[+]Generating peptide spectra."
peptideList = [returnPeptideDict(key, peptideDB[key], conf) for key in peptideDB]
print "[+]Sorting peptides."
outHash = {}
for i in sorted(peptideList, key=lambda entry: entry["mass"]):
if int(i["mass"]) in outHash:
outHash[int(i["mass"])].append(i)
else:
outHash[int(i["mass"])] = [i]
return outHash
| DeclanCrew/LC_MS_MS_Search | PeptideDB.py | Python | gpl-3.0 | 5,522 | [
"Biopython",
"Dalton"
] | 0a41b4c2eaee25813372fe437e9b543de71a2847ba5f10a12d163c60f957260f |
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Convert SVN based DEPS into .DEPS.git for use with NewGit."""
import optparse
import os
import sys
import deps_utils
import git_tools
def SplitScmUrl(url):
"""Given a repository, return a set containing the URL and the revision."""
url_split = url.split('@')
scm_url = url_split[0]
scm_rev = 'HEAD'
if len(url_split) == 2:
scm_rev = url_split[1]
return (scm_url, scm_rev)
def SvnRevToGitHash(svn_rev, git_url, repos_path, workspace, dep_path,
git_host):
"""Convert a SVN revision to a Git commit id."""
git_repo = None
if git_url.startswith(git_host):
git_repo = git_url.replace(git_host, '')
else:
raise Exception('Unknown git server')
if repos_path is None and workspace is None:
# We're running without a repository directory (i.e. no -r option).
# We cannot actually find the commit id, but this mode is useful
# just for testing the URL mappings. Produce an output file that
# can't actually be used, but can be eyeballed for correct URLs.
return 'xxx-r%s' % svn_rev
if repos_path:
git_repo_path = os.path.join(repos_path, git_repo)
mirror = True
else:
git_repo_path = os.path.join(workspace, dep_path)
mirror = False
if not os.path.exists(git_repo_path):
git_tools.Clone(git_url, git_repo_path, mirror)
else:
git_tools.Fetch(git_repo_path, mirror)
return git_tools.Search(git_repo_path, svn_rev, mirror)
def ConvertDepsToGit(deps, repos, workspace, deps_type, deps_vars,
svn_deps_vars, verify):
"""Convert a 'deps' section in a DEPS file from SVN to Git."""
new_deps = {}
bad_git_urls = set([])
try:
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
svn_to_git = __import__('svn_to_git_%s' % deps_type)
except ImportError:
raise Exception('invalid DEPS type')
# Pull in any DEPS overrides from svn_to_git.
deps_overrides = {}
if hasattr(svn_to_git, 'DEPS_OVERRIDES'):
deps_overrides.update(svn_to_git.DEPS_OVERRIDES)
for dep in deps:
if not deps[dep]: # dep is 'None' and emitted to exclude the dep
new_deps[dep] = None
continue
# Get the URL and the revision/hash for this dependency.
dep_url, dep_rev = SplitScmUrl(deps[dep])
path = dep
git_url = dep_url
if not dep_url.endswith('.git'):
# Convert this SVN URL to a Git URL.
path, git_url = svn_to_git.SvnUrlToGitUrl(dep, dep_url)
if not path or not git_url:
# We skip this path, this must not be required with Git.
continue
if verify:
print >>sys.stderr, 'checking ' + git_url + '...',
if git_tools.Ping(git_url):
print >>sys.stderr, ' success'
else:
print >>sys.stderr, ' failure'
bad_git_urls.update([git_url])
# Get the Git hash based off the SVN rev.
git_hash = ''
if dep_rev != 'HEAD':
if dep in deps_overrides:
# Transfer any required variables over from SVN DEPS.
if not deps_overrides[dep] in svn_deps_vars:
raise Exception('Missing DEPS variable: %s' % deps_overrides[dep])
deps_vars[deps_overrides[dep]] = (
'@' + svn_deps_vars[deps_overrides[dep]].lstrip('@'))
# Tag this variable as needing a transform by Varify() later.
git_hash = '%s_%s' % (deps_utils.VARIFY_MARKER_TAG_PREFIX,
deps_overrides[dep])
else:
# Pass-through the hash for Git repositories. Resolve the hash for
# subversion repositories.
if dep_url.endswith('.git'):
git_hash = '@%s' % dep_rev
else:
git_hash = '@%s' % SvnRevToGitHash(dep_rev, git_url, repos, workspace,
path, svn_to_git.GIT_HOST)
# If this is webkit, we need to add the var for the hash.
if dep == 'src/third_party/WebKit/Source':
deps_vars['webkit_rev'] = git_hash
git_hash = 'VAR_WEBKIT_REV'
# Add this Git dep to the new deps.
new_deps[path] = '%s%s' % (git_url, git_hash)
return new_deps, bad_git_urls
def main():
parser = optparse.OptionParser()
parser.add_option('-d', '--deps', default='DEPS',
help='path to the DEPS file to convert')
parser.add_option('-o', '--out',
help='path to the converted DEPS file (default: stdout)')
parser.add_option('-t', '--type', default='public',
help='type of DEPS file (public, etc)')
parser.add_option('-r', '--repos',
help='path to the directory holding all the Git repos')
parser.add_option('-w', '--workspace', metavar='PATH',
help='top level of a git-based gclient checkout')
parser.add_option('--verify', action='store_true',
help='ping each Git repo to make sure it exists')
options = parser.parse_args()[0]
# Get the content of the DEPS file.
deps_content = deps_utils.GetDepsContent(options.deps)
(deps, deps_os, include_rules, skip_child_includes, hooks,
svn_deps_vars) = deps_content
# Create a var containing the Git and Webkit URL, this will make it easy for
# people to use a mirror instead.
git_url = 'http://git.chromium.org'
deps_vars = {
'git_url': git_url,
'webkit_url': git_url + '/external/WebKit_trimmed.git'
}
# Convert the DEPS file to Git.
deps, baddeps = ConvertDepsToGit(deps, options.repos, options.workspace,
options.type, deps_vars, svn_deps_vars,
options.verify)
for os_dep in deps_os:
deps_os[os_dep], os_bad_deps = ConvertDepsToGit(deps_os[os_dep],
options.repos, options.workspace,
options.type, deps_vars, svn_deps_vars,
options.verify)
baddeps = baddeps.union(os_bad_deps)
if baddeps:
print >>sys.stderr, ('\nUnable to resolve the following repositories. '
'Please make sure\nthat any svn URLs have a git mirror associated with '
'them.\nTo see the exact error, run `git ls-remote [repository]` where'
'\n[repository] is the URL ending in .git (strip off the @revision\n'
'number.) For more information, visit http://code.google.com\n'
'/p/chromium/wiki/UsingNewGit#Adding_new_repositories_to_DEPS.\n')
for dep in baddeps:
print >>sys.stderr, ' ' + dep
return 2
else:
if options.verify:
print >>sys.stderr, ('\nAll referenced repositories were successfully '
'resolved.')
return 0
# Write the DEPS file to disk.
deps_utils.WriteDeps(options.out, deps_vars, deps, deps_os, include_rules,
skip_child_includes, hooks)
return 0
if '__main__' == __name__:
sys.exit(main())
| leighpauls/k2cro4 | tools/deps2git/deps2git.py | Python | bsd-3-clause | 7,006 | [
"VisIt"
] | 2281c5a03d3ac86b988b616d64afc842b4838b52ee3ddd5b0fe13938f64fd825 |
# -*- coding: utf-8 -*-
import abc
import datetime
import math
import random
import pygame.display
import pygame.image
import pygame.mask
import pygame.sprite
import engine
class BaseKiller(pygame.sprite.Sprite):
def touch_player(self, game):
game.status = game.LOSE
game.lives -= 1
class BaseFantom(BaseKiller):
def __init__(self, x, y, speed, image_to_left, image_to_right):
super(BaseFantom, self).__init__()
self._image_to_left = pygame.image.load(image_to_left).convert_alpha()
self._image_to_right = pygame.image.load(image_to_right).convert_alpha()
self.image = self._image_to_right
self.rect = self.image.get_rect().move(x, y)
self.mask = pygame.mask.from_surface(self.image)
self._speed = speed
self._screen = pygame.display.get_surface()
def update(self):
self.rect = self.rect.move(self._speed)
if self.rect.left < 0 or self.rect.right > self._screen.get_width():
self._speed[0] *= -1
if self.rect.top < 0 or self.rect.bottom > self._screen.get_height():
self._speed[1] *= -1
if self._speed[0] > 0:
self.image = self._image_to_right
else:
self.image = self._image_to_left
class SlowFantom(BaseFantom):
def __init__(self, x, y):
super(SlowFantom, self).__init__(
x, y,
[1, 1],
engine.image_path("slow-fantom-to-left.png"),
engine.image_path("slow-fantom-to-right.png"))
class FastFantom(BaseFantom):
def __init__(self, x, y):
super(FastFantom, self).__init__(
x, y,
[2, 2],
engine.image_path("fast-fantom-to-left.png"),
engine.image_path("fast-fantom-to-right.png"))
class Octopus(pygame.sprite.Sprite):
def __init__(self, x, y, level):
super(Octopus, self).__init__()
self.image = pygame.image.load(engine.image_path("octopus.png")).convert_alpha()
self.rect = self.image.get_rect().move(x, y)
self.mask = pygame.mask.from_surface(self.image)
self.level = level
self.target = random.choice((self.level.red_player, self.level.blue_player))
self._screen = pygame.display.get_surface()
def update(self):
MOVE = 1
if self.rect.x < self.target.rect.x and self.rect.right < self._screen.get_width():
self.rect.x += MOVE
elif self.rect.x > self.target.rect.x:
self.rect.x -= MOVE
if self.rect.y < self.target.rect.y and not self._is_on_floor():
self.rect.y += MOVE
elif self.rect.y > self.target.rect.y:
self.rect.y -= MOVE
def _is_on_floor(self):
return self.rect.bottom >= self._screen.get_height()
def touch_player(self, game):
if pygame.sprite.collide_mask(self, self.target):
self.create_ink()
self.kill()
def create_ink(self):
ink = Ink(self.rect.x, self.rect.y)
self.level.ink_sprites.add(ink)
self.level.all_sprites.add(ink)
class Ink(pygame.sprite.Sprite):
IMAGE_QTY = 9
def __init__(self, center_x, center_y):
super(Ink, self).__init__()
self.image_step = 0
self.creation_datetime = datetime.datetime.now()
self.image = self._set_image()
x = center_x - self.image.get_width() / 2
y = center_y - self.image.get_height() / 2
self.rect = self.image.get_rect().move(x, y)
self.mask = pygame.mask.from_surface(self.image)
self._screen = pygame.display.get_surface()
def update(self):
_seconds = 10 + 3 * self.image_step
delay = datetime.timedelta(seconds=_seconds)
if datetime.datetime.now() - self.creation_datetime > delay:
if self.image_step < self.IMAGE_QTY - 1:
self.image_step += 1
self.image = self._set_image()
else:
self.kill()
def _image_filename(self):
return "ink-%d.png" % self.image_step
def _set_image(self):
return pygame.image.load(
engine.image_path(self._image_filename())).convert_alpha()
class BaseBird(BaseKiller):
X_OFFSET = 50
X_SPEED = 3
def __init__(self, y, to_right=True):
super(BaseBird, self).__init__()
self._screen = pygame.display.get_surface()
self._to_right = to_right
self._wings_up = True
self.image = self._set_image()
x_start = -self.X_OFFSET if to_right else self._screen.get_width() + self.X_OFFSET
self.rect = self.image.get_rect().move(x_start, y)
self.mask = pygame.mask.from_surface(self.image)
self._x_speed = self.X_SPEED if to_right else -self.X_SPEED
self._last_flap = datetime.datetime.now()
@abc.abstractmethod
def update(self):
y = 5 * math.cos(self.rect.x * 2)
speed = [self._x_speed, y]
self.rect = self.rect.move(speed)
self.flap()
def flap(self):
delay = datetime.timedelta(milliseconds=300)
if datetime.datetime.now() - self._last_flap > delay:
self._wings_up = not self._wings_up
self._last_flap = datetime.datetime.now()
self.image = self._set_image()
def _set_image(self):
image = "bird-0.png" if self._wings_up else "bird-1.png"
surface = pygame.image.load(
engine.image_path(image)).convert_alpha()
return surface if self._to_right else pygame.transform.flip(surface, True, False)
class LeftBird(BaseBird):
def __init__(self, y):
super(LeftBird, self).__init__(y, True)
def update(self):
super(LeftBird, self).update()
if self.rect.left > self._screen.get_width():
self.kill()
class RightBird(BaseBird):
def __init__(self, y):
super(RightBird, self).__init__(y, False)
def update(self):
super(RightBird, self).update()
if self.rect.right < 0:
self.kill()
| sblondon/jumpjump | src/ennemies.py | Python | gpl-3.0 | 6,070 | [
"Octopus"
] | fb33bee24326dfa7bea690cfa0a9993b9fbad2d248700bcffb4e673662ff41c8 |
#!/usr/bin/env python
# Common constants and methods for the Tn-seq pipeline
#
# Copyright (c) 2014 University of Washington
#------------------------------------------------------------------------------
# constants
#------------------------------------------------------------------------------
import os.path
import subprocess
import re
#------------------------------------------------------------------------------
# constants
#------------------------------------------------------------------------------
IDX_RATIO = .5
BWA = "/usr/bin/bwa"
BWA_SEED_DIFF = 2
BWA_PCT_MISSING = .035
BOWTIE = "/usr/bin/bowtie"
BOWTIE_BUILD = "/usr/bin/bowtie-build"
ALN_EXTENSION = ".sai"
SAM_EXTENSION = ".sam"
SUM_EXTENSION = "_sum.txt"
HASH_EXTENSION = ".index.log"
CHASTE_SUFFIX = "_ch"
TNEND_SUFFIX = "_iPass"
TRIM_SUFFIX = "_trim"
MERGE_SUFFIX = "_mg"
NORM_SUFFIX = "_norm"
NORM_FACTOR = 10000000
WORKING_DIR = "work"
SCRIPTS_PATH = "."
ALL_SUFFIX = "_all"
Q0_SUFFIX = "_q0"
ANNO_SUFFIX = "_Annot"
TAB_SUFFIX = "_HitsToTab"
READSCOMP = "reads_cmp.txt"
OUTFILE_ANNO = "AnnotatedHits.txt"
OUTFILE_TAB = "HitsPerGene.txt"
#------------------------------------------------------------------------------
# methods
#------------------------------------------------------------------------------
# Add a suffix to a filename before the file extension
def add_suffix(filename, suffix):
parts = os.path.splitext(filename)
new_name = parts[0] + suffix + parts[1]
return new_name
# Check and run a command or exit with an error
def run_cmd(cmd):
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError as e:
badcmd = " ".join(e.cmd)
print "Error running command: " + badcmd
exit(1)
# Check and run a command or exit with an error, piping stdout to the specified file
def run_cmd_file_out(cmd, stdout_fh):
try:
subprocess.check_call(cmd, stdout=stdout_fh)
except subprocess.CalledProcessError as e:
badcmd = " ".join(e.cmd)
print "Error running command: " + badcmd
exit(1)
# Replicon names are in the fasta headers - read fasta and return a list of names
def read_replicon_names(fasta):
replicons = dict()
repl_num = 0
with open(fasta, "r") as fh:
for line in fh:
if line.startswith(">"):
repl_long = line[1:-1]
repl = repl_long.split(" ", 1)[0]
replicons[repl_num] = repl
repl_num += 1
return replicons
# Read annotation data from one or more .ptt files and return a hash
def read_annotations(annofile_list, replicon_list):
annotations = dict()
count = 0
for filenum, annofile in enumerate(annofile_list):
replicon = replicon_list[filenum]
annotations[replicon] = dict()
with open(annofile, "r") as fh:
for line in fh:
mobj = re.match("^(\d+)\.\.(\d+)", line)
if mobj:
startpos = mobj.group(1)
endpos = mobj.group(2)
(loc, strand, length, pid, gene, synonym, code, cog, product) = line.rstrip().split('\t')
annotations[replicon][pid] = dict();
annotations[replicon][pid]['locus_tag'] = synonym
annotations[replicon][pid]['startpos'] = int(startpos)
annotations[replicon][pid]['endpos'] = int(endpos)
annotations[replicon][pid]['strand'] = strand
annotations[replicon][pid]['length'] = length
annotations[replicon][pid]['info'] = '\t'.join([gene, code, cog, product])
count += 1
print "read " + str(count) + " annotations for " + str(len(annotations.keys())) + " replicon(s)"
return annotations
| elijweiss/Tn-seq | python/common.py | Python | mit | 3,799 | [
"BWA",
"Bowtie"
] | 2bcac3b9cedd7c238262dc79919ac0bd465f6b57cd809b9fc2946a2f0856b637 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2005, 2006 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
""" Person role wizards definition """
from kiwi.python import Settable
from kiwi.datatypes import ValidationError
from stoqlib.api import api
from stoqlib.domain.person import Person
from stoqlib.gui.base.wizards import (WizardEditorStep, BaseWizard,
BaseWizardStep)
from stoqlib.gui.base.dialogs import run_dialog
from stoqlib.gui.editors.personeditor import BranchEditor, UserEditor
from stoqlib.gui.templates.persontemplate import BasePersonRoleEditor
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
#
# Wizard Steps
#
class RoleEditorStep(BaseWizardStep):
gladefile = 'HolderTemplate'
def __init__(self, wizard, store, previous, role_type, person=None,
document=None):
BaseWizardStep.__init__(self, store, wizard, previous=previous)
self.role_editor = self.wizard.role_editor(self.store,
person=person,
role_type=role_type,
parent=self.wizard,
document=document)
self.wizard.set_editor(self.role_editor)
self.person_slave = self.role_editor.get_person_slave()
self.person_slave.get_toplevel().reparent(self.place_holder)
def post_init(self):
refresh_method = self.wizard.refresh_next
self.person_slave.register_validate_function(refresh_method)
self.person_slave.force_validation()
def previous_step(self):
# We don't want to create duplicate person objects when switching
# steps.
self.store.rollback(close=False)
return BaseWizardStep.previous_step(self)
def has_next_step(self):
return False
class PersonRoleTypeStep(WizardEditorStep):
gladefile = 'PersonRoleTypeStep'
model_type = Settable
def __init__(self, wizard, store):
WizardEditorStep.__init__(self, store, wizard)
self._setup_widgets()
def _setup_widgets(self):
self.document_l10n = api.get_l10n_field('person_document')
self.person_document.set_mask(self.document_l10n.entry_mask)
self.person_document.set_width_chars(17)
self.document_label.set_text(self.document_l10n.label)
# Just adding some labels
label = _('What kind of %s are you adding?')
role_editor = self.wizard.role_editor
if role_editor == BranchEditor or role_editor == UserEditor:
self.company_check.set_sensitive(False)
self.individual_check.set_sensitive(False)
if role_editor == UserEditor:
self.individual_check.set_active(True)
else:
label = _('Adding a %s')
self.company_check.set_active(True)
role_name = self.wizard.get_role_name().lower()
self.person_role_label.set_text(label % role_name)
self.person_role_label.set_size('large')
self.person_role_label.set_bold(True)
self.register_validate_function(self.wizard.refresh_next)
#
# WizardStep hooks
#
def create_model(self, store):
return Settable(document=u'')
def setup_proxies(self):
self.add_proxy(self.model, ['person_document'])
def next_step(self):
if self.individual_check.get_active():
role_type = Person.ROLE_INDIVIDUAL
else:
role_type = Person.ROLE_COMPANY
# If someone wants to register with an empty document
if self.person_document.is_empty():
return RoleEditorStep(self.wizard, self.store, self, role_type)
person = Person.get_by_document(self.store, self.model.person_document)
return RoleEditorStep(self.wizard, self.store, self, role_type, person,
document=self.model.person_document)
def has_previous_step(self):
return False
# Callbacks
def on_person_document__activate(self, entry):
self.wizard.go_to_next()
def on_person_document__validate(self, entry, value):
# FIXME: There is a bug in kiwi that this method gets called when
# setting the mask.
if not self.person_document.mask:
return
# This will allow the user to use an empty value to this field
if self.person_document.is_empty():
return
if not self.document_l10n.validate(value):
return ValidationError(_('%s is not valid.') %
(self.document_l10n.label,))
def on_individual_check__toggled(self, *args):
"""
Change document labels based on check button
Changes the document_label (proxy_widget) with the right document (CPF or CNPJ)
that will be inserted on the person_document entry. Also changes the mask of
person_document when is necessary
"""
if self.individual_check.get_active():
self.document_l10n = api.get_l10n_field('person_document')
self.document_label.set_text(self.document_l10n.label + ':')
# Change the entry size (in chars) to accomodate the cpf
self.person_document.set_width_chars(17)
else:
self.document_l10n = api.get_l10n_field('company_document')
self.document_label.set_text(self.document_l10n.label + ':')
# Change the entry size (in chars) to accomodate the cnpj
self.person_document.set_width_chars(21)
self.person_document.set_mask(self.document_l10n.entry_mask)
#
# Main wizard
#
class PersonRoleWizard(BaseWizard):
size = (650, 450)
def __init__(self, store, role_editor):
if not issubclass(role_editor, BasePersonRoleEditor):
raise TypeError('Editor %s must be BasePersonRoleEditor '
'instance' % role_editor)
self.role_editor = role_editor
BaseWizard.__init__(self, store,
self.get_first_step(store),
title=self.get_role_title())
if role_editor.help_section:
self.set_help_section(role_editor.help_section)
def get_first_step(self, store):
return PersonRoleTypeStep(self, store)
def get_role_name(self):
if not self.role_editor.model_name:
raise ValueError('Editor %s must define a model_name attribute '
% self.role_editor)
return self.role_editor.model_name
def get_role_title(self):
if not self.role_editor.title:
raise ValueError('Editor %s must define a title attribute '
% self.role_editor)
return self.role_editor.title
def set_editor(self, editor):
self.editor = editor
#
# WizardStep hooks
#
def finish(self):
if not self.editor.confirm():
return
self.retval = self.editor.model
self.close()
def run_person_role_dialog(role_editor, parent, store, model=None,
**editor_kwargs):
if not model:
editor_kwargs.pop('visual_mode', None)
return run_dialog(PersonRoleWizard, parent, store, role_editor,
**editor_kwargs)
return run_dialog(role_editor, parent, store, model, **editor_kwargs)
| andrebellafronte/stoq | stoqlib/gui/wizards/personwizard.py | Python | gpl-2.0 | 8,302 | [
"VisIt"
] | baf4e28361709430a97cffc9e9e6ae381b6811419662b8910f1a619577123778 |
import numpy as np
import pandas as pd
import mdtraj as md
import nmrpystar
from sklearn.externals.joblib import Memory
from .simulation_parameters import CS_CACHE_PATH
CHEMICAL_SHIFT_MODEL = "shiftx2"
FIRST_FRAMES = None
amino_acids = ["R","H", "K", "D", "E", "S", "T", "N", "Q", "C", "G", "A", "I", "L", "M", "F", "W", "Y", "V"]
memory = Memory(cachedir=CS_CACHE_PATH, verbose=0)
def multi_index_to_str(multi_index):
return ["_".join([str(a) for a in ind]) for ind in multi_index]
class Analyzer(object):
def __init__(self, identifier, data_filename):
self.identifier = identifier
self.data_filename = data_filename
@memory.cache
def chemical_shift_function(traj, identifier, model):
prediction = md.compute_chemical_shifts(traj, model=model)
return prediction
class ChemicalShiftAnalyzer(Analyzer):
@staticmethod
def find_assigned_shifts(parsed):
for key, val in parsed.value.saves.items():
if "Assigned_chem_shift_list.Sf_category" in val.datums:
if val.datums["Assigned_chem_shift_list.Sf_category"] == "assigned_chemical_shifts":
return val.loops[1]
@staticmethod
def old_find_assigned_shifts(parsed):
if "assigned_chemical_shifts" in parsed.value.saves:
q = parsed.value.saves["assigned_chemical_shifts"].loops[1]
print parsed.value.saves["assigned_chemical_shifts"].datums["Assigned_chem_shift_list.Sf_category"]
elif "assigned_chem_shift_list_1" in parsed.value.saves:
q = parsed.value.saves["assigned_chem_shift_list_1"].loops[1]
print parsed.value.saves["assigned_chem_shift_list_1"].datums["Assigned_chem_shift_list.Sf_category"]
else:
raise(KeyError("Can't find chemical shift assignments in BMRB file %s" % self.data_filename))
def analyze(self, traj):
prediction = chemical_shift_function(traj, self.identifier, CHEMICAL_SHIFT_MODEL)
if FIRST_FRAMES is not None:
prediction = prediction.iloc[:, 0:FIRST_FRAMES]
prediction = prediction.mean(1).reset_index() # Average over time dimensions and turn into dataframe
top, bonds = traj.top.to_dataframe()
prediction.rename(columns={0:"value"}, inplace=True) # Give a name to the colum with the actual values.
prediction["expt"] = "CS"
prediction["system"] = self.identifier
multi_index = prediction.set_index(["system", "expt", "resSeq", "name"]).index
prediction["identifier"] = multi_index_to_str(multi_index)
prediction = prediction.set_index("identifier")
sigma_dict = pd.Series({"N":2.0862, "CA":0.7743, "CB":0.8583, "C":0.8699, "H":0.3783, "HA":0.1967}) # From http://www.shiftx2.ca/performance.html
prediction["sigma"] = sigma_dict[prediction.name].values
prediction.rename(columns={"name":"atom"}, inplace=True) # Use a more descriptive name for the chemical shift atom name
resSeq_to_AA = top.groupby("resSeq").first().resName
prediction["AA"] = resSeq_to_AA[prediction.resSeq].values
return prediction
def load_expt(self):
parsed = nmrpystar.parse(open(self.data_filename).read())
print(parsed.status)
q = ChemicalShiftAnalyzer.find_assigned_shifts(parsed)
x = pd.DataFrame(q.rows, columns=q.keys)
x = x[["Atom_chem_shift.Seq_ID", "Atom_chem_shift.Atom_ID", "Atom_chem_shift.Val"]]
x.rename(columns={"Atom_chem_shift.Seq_ID":"resSeq", "Atom_chem_shift.Atom_ID":"name", "Atom_chem_shift.Val":"value"}, inplace=True)
# Need to make dtypes match to do eventual comparison.
x["resSeq"] = x["resSeq"].astype('int')
x["value"] = x["value"].astype('float')
x["expt"] = "CS"
x["system"] = self.identifier
expt = x.set_index(["system", "expt", "resSeq", "name"]).value
expt = pd.Series(expt.values, multi_index_to_str(expt.index), name="value")
return expt
class ScalarCouplingAnalyzer(Analyzer):
def analyze(self, traj):
top, bonds = traj.top.to_dataframe()
ind, values = md.compute_J3_HN_HA(traj)
prediction = pd.DataFrame({"value":values.mean(0)})
prediction["resSeq"] = top.ix[ind[:, -1]].resSeq.values # Set the residue numbers to the last (fourth) atom in the dihedral
if top.ix[0].resName == "ACE":
prediction["resSeq"] -= 1 # HARDCODED Hack to account for the ACE residue!!!!!!!!!! Fix me later!
prediction["AA"] = top.ix[ind[:, -1]].resName.values
prediction["expt"] = "3JHNHA"
prediction["system"] = self.identifier
prediction["sigma"] = 0.36
multi_index = prediction.set_index(["system", "expt", "resSeq"]).index
prediction["identifier"] = multi_index_to_str(multi_index)
prediction = prediction.set_index("identifier")
return prediction
def load_expt(self):
parsed = nmrpystar.parse(open(self.data_filename).read())
print(parsed.status)
q = parsed.value.saves["coupling_constant_list_1"].loops[1]
x = pd.DataFrame(q.rows, columns=q.keys)
x = x[["Coupling_constant.Seq_ID_1", "Coupling_constant.Val", "Coupling_constant.Val_err"]]
x.rename(columns={"Coupling_constant.Seq_ID_1":"resSeq", "Coupling_constant.Val":"value", "Coupling_constant.Val_err":"err"}, inplace=True)
# Need to make dtypes match to do eventual comparison.
x["resSeq"] = x["resSeq"].astype('int')
x["value"] = x["value"].astype('float')
x["expt"] = "3JHNHA"
x["system"] = self.identifier
expt = x.set_index(["system", "expt", "resSeq"]).value
expt = pd.Series(expt.values, multi_index_to_str(expt.index))
return expt
class BuzzScalarCouplingAnalyzer(ScalarCouplingAnalyzer):
def load_expt(self):
expt = pd.read_csv(self.data_filename, index_col=0)
aa = self.identifier.split("_")[1]
expt.ix["H"] = 7.76 # We're using the pH 2.9 result for HIS because that will allow us to simulate fully protonated HIS
# Rather than need to do a constant pH simulation near the midpoint of the HIS titration curve.
expt = expt.ix[[aa]]
expt["value"] = expt["coupling"]
expt["resSeq"] = 1
expt["system"] = self.identifier
expt["expt"] = "3JHNHA"
expt = expt.set_index(["system", "expt", "resSeq"]).value
expt = pd.Series(expt.values, multi_index_to_str(expt.index))
return expt
class OhScalarCouplingAnalyzer(ScalarCouplingAnalyzer):
def load_expt(self):
# To DO: FIX HARDCODED PATH!!!
larger = pd.read_csv("/home/kyleb/src/choderalab/ForcefieldData/nmr/ace_x_y_nh2/data/larger_couplings.csv")
smaller = pd.read_csv("/home/kyleb/src/choderalab/ForcefieldData/nmr/ace_x_y_nh2/data/smaller_couplings.csv")
expt = []
for aa in amino_acids:
value = smaller.ix["G"][aa]
xyz = ["G%s" % aa, 1, value] # Using indices 1 and 2 here: {Ace:0, X:1, Y:2, NH2:3}
expt.append(xyz)
value = larger.ix["G"][aa]
xyz = ["G%s" % aa, 2, value]
expt.append(xyz)
value = larger.ix[aa]["G"]
xyz = ["%sG" % aa, 1, value]
expt.append(xyz)
value = smaller.ix[aa]["G"]
xyz = ["%sG" % aa, 2, value]
expt.append(xyz)
expt = pd.DataFrame(expt, columns=["seq", "resSeq", "value"])
seq = self.identifier.split("_")[1]
expt = expt[expt.seq == seq]
expt["system"] = self.identifier
expt["expt"] = "3JHNHA"
expt = expt.set_index(["system", "expt", "resSeq"]).value
expt = expt.drop_duplicates()
expt = pd.Series(expt.values, multi_index_to_str(expt.index))
return expt
def accumulate_experiments(analyzers_dict):
data = []
for key, analyzers in analyzers_dict.items():
data.append(pd.concat([analyzer.load_expt() for analyzer in analyzers]))
return pd.concat(data)
| choderalab/TrustButVerify | trustbutverify/analyzers.py | Python | gpl-2.0 | 8,341 | [
"MDTraj"
] | 755082b5f0bf1dbb2ea923bad39a3e3610776111e86a8f93ca85d8703d461528 |
"""KTEQ-FM GENIUS API FUNCTIONS.
This module contains all of the Genius API calls for the TeqBot project.
These api calls are all located centrally within this module for convenience.
All API calls will be built in this module, and corresponding wapper functions
will be created for each of these calls for TeqBot to use. The Genius API
is used to look up song lyric information on the Genius lyrics website. This
allows for DJs to get an updated lyrics page for each song they log while
doing their set, as well as provide a preliminary search for profanity for
each song.
Please visit https://docs.genius.com/ for more information on how the
Genius API works.
Example:
$ python genius.py "<SONG_NAME>" "<SONG_NAME>" "<GENIUS_TOKEN>(optional)"
Running this module from command line, if provided with a valid Genius
API information, a song name, and an artist name, will search for the queried
song on genius and return a report on the song lyrics
Todo:
* Add additional tests
.. _TeqBot GitHub Repository:
https://github.com/kteq-fm/kteq-teqbot
.. _KTEQ-FM Website:
http://www.kteq.org/
"""
import sys
import requests
import os
from bs4 import BeautifulSoup
from nltk.stem.lancaster import LancasterStemmer
from difflib import SequenceMatcher
GENIUS_URL = "https://api.genius.com"
SONG_HAS_SWEARS = 0
SONG_SWEAR_FREE = 1
SONG_NOT_FOUND = 2
def load_auth(token=None):
"""Convert Genius Token into required format.
This function simply reformats a genius token if needed.
Args:
token (str): unformatted token
Returns:
(dict): Formatted token
"""
if token is None:
auth = 'Bearer ' + os.environ.get('GENIUS_TOKEN')
else:
auth = 'Bearer ' + token
return { 'Authorization' : auth }
def similarity(a, b):
"""Calculate similarity between two strings.
This function will compare two strings and determine how close they are
to one another. This will allow for imprecise queries for song artists and
song names, such as mispellings or other slight differences.
Args:
a (str): string being compared to b
b (str): string being compared to a
Returns:
(double): similarity between a and b (1.0 indicates identical)
Example:
>>> import genius
>>> genius.similarity("apples","oranges")
0.46153846153846156
>>> genius.similarity("apples","Appels")
0.6666666666666666
>>> genius.similarity("apples","apples")
1.0
>>> genius.similarity("Kendrick Lamar","Kendrick")
0.7272727272727273
"""
return SequenceMatcher(None, a, b).ratio()
def load_profanity(filename):
"""Load a profanity list from a file.
creates a list to compare words to in order to determine profanity.
A more robust profanity filter can be built by adding words to the file
loaded, or by using different/multiple files.
Args:
filename (str): file containing swear words, one per line
Returns:
(list): list containing swear words
"""
with open(filename) as f:
return [ word.strip() for word in f.readlines() ]
def clean_test_01(lyrics, bad_words=None):
"""Check if lyrics are clean (TEST #1).
given a string containing the song lyrics, determines if the song contains
any profanity. This test uses the web API for http://wdylike.appspot.com/
which gives a very simple boolean value of True if the song has profanity
or False if the song is clean.
Issues with this Test:
This test is not very reliable for various reasons. This simply gives a
yes or no response without returning a list of suspect words. Furthermore,
the test uses very loosely defined regular expressions, allowing for several
false positives. An example would be the query
http://wdylike.appspot.com/?q=massive, which would return a True for
explicit content because "massive" contains the word "ass" in it.
Args:
lyrics (str): song lyrics
bad_words (list): list of bad words (ignored for this test)
Returns:
(int): value indicating:
SONG_HAS_SWEARS if song has profanity.
SONG_SWEAR_FREE if song is clean.
SONG_NOT_FOUND if failure to reach server.
(list): empty list. Here to match structure of other
profanity tests, which actually
"""
# URL is the true or false checker for profanity
url = "http://www.wdylike.appspot.com"
params = {'q': lyrics}
# GET request, using the lyrics of song
response = requests.get(url, params=params)
test = None
# Determine if song is clean, has swears, or other
if 'true' in response.text :
test = SONG_HAS_SWEARS
elif 'false' in response.text :
test = SONG_SWEAR_FREE
else :
test = SONG_NOT_FOUND
return [test, [] ]
def clean_test_02(lyrics, bad_words):
"""Check if lyrics are clean (TEST #2).
given a string containing the song lyrics, determines if the song contains
any profanity. This test uses a profanity list loaded in from a file to
determine if songs are profane.
Issues with this Test:
This test will only catch words that have been added to a profanity file,
so if a swear word is not present in this file, it will not be checked.
Although this test uses lemmatization to reduce missed swears, this
test will fail to find compound swear words, or swear words embedded into
other words unless those compound words are added to the profanity file.
An example would be the word "unf*ckable", which contains "f*ck" in the
middle of it. This test does not use regular expressions so it might miss
words like this. However, it would match the word "f*cking" as "f*ck" due
to the lemmatizer.
Args:
lyrics (str): song lyrics
bad_words (list): list of bad words
Returns:
(int): value indicating:
SONG_HAS_SWEARS if song has profanity.
SONG_SWEAR_FREE if song is clean.
SONG_NOT_FOUND if failure to reach server.
(list): list containing swear words in order of appearance in the
song, based on lyrics provided.
"""
tokens = lyrics.split()
bad_found = []
st = LancasterStemmer()
test = None
for word in tokens:
w = word.strip('!,.?').lower()
if st.stem(w) in bad_words:
bad_found.append(w)
if len(bad_found) > 0:
test = SONG_HAS_SWEARS
else:
test = SONG_SWEAR_FREE
return [test, bad_found ]
def get_lyrics(auth, api_path):
"""Find the Lyrics of a given song.
given an api path for a specific song, return the lyrics from genius.
Args:
auth (str): Genius API token
api_path (str): path to song API
Returns:
(str): string containing song lyrics
"""
# URL Is combination of genius API URL and the api path for a song
url = GENIUS_URL + api_path
# GET request
response = requests.get(url, headers=auth)
# Get json version
json = response.json()
path = json["response"]["song"]["path"]
# Scrape using soup
url = "http://genius.com" + path
lyric_page = requests.get(url)
html = BeautifulSoup(lyric_page.text, "html.parser")
# Clean script tags
[h.extract() for h in html('script')]
# Return lyrics, these are tagged nicely in Genius
lyrics = html.find("div", class_="lyrics").get_text()
return lyrics
def run_tests(lyrics,bad_words):
"""Run all existing profanity tests and return results.
Args:
lyrics (str): Song Lyrics
bad_words (str): loaded in list of bad words
Returns:
(list): list containing reports from each test
"""
# Add new clean tests here
test_list = [ clean_test_01,
clean_test_02 ]
res = []
for test in test_list:
res.append( test(lyrics,bad_words) )
return res
def evaluate_tests(results):
"""Convert Test Results into a readable report.
Given a list of results, converts into a readable report message.
Args:
results (str): unformatted results listings for each profanity test
Returns:
(str): Generated report
"""
i = 1
msg = ""
swears = ""
code = ""
for test in results:
code = test_code(test[0],i)
if test[0] == SONG_HAS_SWEARS:
if i > 1:
swears = " Song Contains: " + ", ".join(test[1])
else:
swears = " Song May Contain Swears, Check other Tests"
#print(code)
msg += code + swears + "\n"
i += 1
return msg
def test_code(code, number):
"""Convert each test code into a readable message.
Given a code value and a test number, generate a readable report
synopsis for each test. This synopsis will simply state whether a song
passed or failed a given test.
Args:
code (int): Code value corresponding to a test result
number (int): Test Number
Returns:
(str): Generated synopsis for given test
"""
if code == SONG_HAS_SWEARS:
return "FAIL Profanity Test #" + str(number)
elif code == SONG_SWEAR_FREE:
return "PASS Profanity Test #" + str(number)
else:
return "Song Lyrics Not Found"
def generate_report(song,artist,lyrics,result):
"""Generate Final Lyrics Report.
Combining all previous reporting features, this will generate a
report containing the song name and artist, as well as
the results from each test and the lyrics for the song.
Args:
song (str): Song Name
artist (str): Song Artist
lyrics (str): Song Lyrics
result (list): List of (Unevaluated) Results
Returns:
(str): Generated report for a song
"""
msg = ""
msg += "Song Name: " + song + "\n"
msg += "Song Artist: " + artist + "\n\n"
msg += evaluate_tests(result) + "\n\n"
msg += "Song Lyrics: "
msg += lyrics
return msg
def get_api_path(auth, song_title, song_artist):
"""Find a song using Genius API and return an api path to it.
Attempt to find a song on Genius using various API queries. If
a song is found on the genius site, the path to the song is returned.
This can be later used to return the song's lyrics.
simliarity tests can be adjusted to fine tune accuracy of finding
songs.
Args:
auth (str): Genuis API token
song_title (str): Song name
song_artist (str): Song artist
Returns:
(str): song's API path
"""
url = GENIUS_URL + "/search"
# First search: Search by song title
data = {'q': song_title}
response = requests.get(url, data=data, headers=auth)
# Get JSON Data
json = response.json()
# Info will contain data for a "hit"
info = None
song_api_path = None
# compare two strings
a = None
b = song_artist.lower()
for hit in json["response"]["hits"]:
a = hit["result"]["primary_artist"]["name"].lower()
if similarity(a,b) >= 0.7:
info = hit
break
if info:
song_api_path = info["result"]["api_path"]
else:
# Second search: Reversed, search by artist
data = {'q': song_artist}
response = requests.get(url, data=data, headers=auth)
json = response.json()
info = None
b = song_title.lower()
for hit in json["response"]["hits"]:
a = hit["result"]["title"].lower()
if similarity(a,b) >= 0.7:
info = hit
break
if info:
song_api_path = info["result"]["api_path"]
return song_api_path
def run(song,artist,bad_words,auth):
"""Run a report on a song, generating lyrics and potential swears.
Args:
song (str): Song Name
artist (str): Song Artist
bad_words (list): List of Bad Words
auth (str): Genuis API token
Returns:
(str) : Report containing found swears, and lyrics
(boolean): True if runs without finding swears, False if swears found
"""
api_path = get_api_path(auth, song, artist)
report = ""
lyrics = ""
if api_path is not None:
lyrics = get_lyrics(auth, api_path)
result = run_tests(lyrics, bad_words)
report = generate_report(song,artist,lyrics,result)
else:
report = "Song Lyrics Not Found"
return report, True
if report.count("FAIL") > 1:
return report, False
else:
return report, True
def usage():
"""Print Usage Statement.
Print the usage statement for running genius.py standalone.
Returns:
msg (str): Usage Statement.
Example:
>>> import genius
>>> msg = genius.usage()
>>> msg
'<genius.py usage statement>'
"""
msg = "genius.py usage:\n"
msg = msg + "$ python genius.py \"<SONG_NAME>\" "
msg = msg + "\"<SONG_NAME>\" "
msg = msg + "\"<GENIUS_TOKEN>(optional)\" "
return msg
if __name__ == "__main__":
if(len(sys.argv) > 3):
auth = load_auth(sys.argv[3])
elif(len(sys.argv) > 2):
auth = load_auth()
else:
print(usage())
sys.exit()
song = sys.argv[1]
artist = sys.argv[2]
bad_words = load_profanity("../profanity.txt")
msg, status = run(song,artist,bad_words,auth)
print( msg, "Clean: ", status )
| KTEQ-FM/kteq-teqbot | teqbot/genius.py | Python | unlicense | 13,688 | [
"VisIt"
] | 717e25ea34ef33c68339481f996a98c79567adc92853d0531ca9bd51afaaca94 |
import unittest, time, sys, random
sys.path.extend(['.','..','../..','py'])
import h2o2 as h2o
import h2o_cmd, h2o_import as h2i, h2o_jobs, h2o_glm
from h2o_test import verboseprint, dump_json, OutputObj
from tabulate import tabulate
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM_basic_1(self):
importFolderPath = "logreg"
csvFilename = "benign.csv"
hex_key = "benign.hex"
csvPathname = importFolderPath + "/" + csvFilename
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=hex_key, check_header=1,
timeoutSecs=180, doSummary=False)
pA = h2o_cmd.ParseObj(parseResult)
iA = h2o_cmd.InspectObj(pA.parse_key)
parse_key = pA.parse_key
numRows = iA.numRows
numCols = iA.numCols
labelList = iA.labelList
expected = []
allowedDelta = 0
# loop, to see if we get same centers
labelListUsed = list(labelList)
labelListUsed.remove('STR')
labelListUsed.remove('FNDX') # response removed also
numColsUsed = numCols - 2
for trial in range(1):
# family [u'gaussian', u'binomial', u'poisson', u'gamma', u'tweedie']
# link [u'family_default', u'identity', u'logit', u'log', u'inverse', u'tweedie']
# can we do classification with probabilities?
# are only lambda and alpha grid searchable?
# glm parameters:
# model_id Key<Model> False None []
# training_frame Key<Frame> False None []
# validation_frame Key<Frame> False None []
# ignored_columns string[] False None []
# drop_na20_cols boolean False False []
# score_each_iteration boolean False False []
# response_column VecSpecifier False None []
# balance_classes boolean False False []
# class_sampling_factors float[] False None []
# max_after_balance_size float False 5.0 []
# max_confusion_matrix_size int False 20 []
# max_hit_ratio_k int False 10 []
# family enum False gaussian [u'gaussian', u'binomial', u'poisson', u'gamma']
# solver enum False IRLSM [u'AUTO', u'IRLSM', u'L_BFGS']
# alpha double[] False None []
# lambda double[] False None []
# lambda_search boolean False False []
# lambda_min_ratio double False -1.0 []
# nlambdas int False -1 []
# standardize boolean False True []
# max_iterations int False -1 []
# beta_epsilon double False 0.0001 []
# link enum False family_default [u'family_default', u'identity', u'logit', u'log', u'inverse', u'tweedie']
# prior double False -1.0 []
# use_all_factor_levels boolean False False []
# beta_constraints Key<Frame> False None []
# max_active_predictors int False -1 []
parameters = {
'ignored_columns': '["STR"]',
'response_column': 'FNDX',
# FIX! when is this needed? redundant for binomial?
'balance_classes': False,
'max_after_balance_size': None,
'standardize': False,
'family': 'binomial',
'link': None,
'alpha': '[1e-4]',
'lambda': '[0.5]',
'prior1': None,
'lambda_search': None,
'nlambdas': None,
'lambda_min_ratio': None,
# 'use_all_factor_levels': False,
}
model_key = 'benign_glm.hex'
bmResult = h2o.n0.build_model(
algo='glm',
model_id=model_key,
training_frame=parse_key,
parameters=parameters,
timeoutSecs=10)
bm = OutputObj(bmResult, 'bm')
modelResult = h2o.n0.models(key=model_key)
model = OutputObj(modelResult['models'][0]['output'], 'model')
h2o_glm.simpleCheckGLM(self, model, parameters, labelList, labelListUsed)
cmmResult = h2o.n0.compute_model_metrics(model=model_key, frame=parse_key, timeoutSecs=60)
cmm = OutputObj(cmmResult, 'cmm')
mcms = OutputObj({'data': cmm.max_criteria_and_metric_scores.data}, 'mcms')
m1 = mcms.data[1:]
h0 = mcms.data[0]
print "\nmcms", tabulate(m1, headers=h0)
thms = OutputObj(cmm.thresholds_and_metric_scores, 'thms')
if 1==0:
cmms = OutputObj({'cm': cmm.confusion_matrices}, 'cmms')
print ""
for i,c in enumerate(cmms.cm):
print "\ncmms.cm[%s]" % i, tabulate(c)
print ""
mmResult = h2o.n0.model_metrics(model=model_key, frame=parse_key, timeoutSecs=60)
mm = OutputObj(mmResult['model_metrics'][0], 'mm')
prResult = h2o.n0.predict(model=model_key, frame=parse_key, timeoutSecs=60)
pr = OutputObj(prResult['model_metrics'][0]['predictions'], 'pr')
# h2o_cmd.runStoreView()
if __name__ == '__main__':
h2o.unit_main()
| YzPaul3/h2o-3 | py2/testdir_single_jvm/test_GLM_basic_1.py | Python | apache-2.0 | 5,516 | [
"Gaussian"
] | 989b33cbaa47887383bf628e3029f8bcd54d4332ba0510e29992b5f05485ce7b |
from pymongo import MongoClient
import argparse
parser = argparse.ArgumentParser(description="Create a MongoDB collection of orthologues from an existing collection of internal BLAST hits.")
parser.add_argument("--host",
type=str,
default='localhost',
help="Hostname for the MongoDB database (default=localhost)")
parser.add_argument("--port",
type=str,
default=27017,
help="Port where MongoDB is listening (default=27017)")
parser.add_argument("--database",
type=str,
default='symbiont',
help="Name of the database to store the data in (default=symbiont)")
parser.add_argument("--existing_collection",
type=str,
default='internal_blast_hits',
help="Name of the existing collection containing internal BLAST hits. (default=internal_blast_hits)")
parser.add_argument("--orthologues_collection",
type=str,
default='orthologues',
help="Name of the new collection of orthologues. (default=orthologues)")
args = parser.parse_args()
client = MongoClient(args.host, args.port)
db = client[args.database]
internal_blast_hits = db[args.existing_collection]
orthologues = db[args.orthologues_collection]
internal_blast_hits_indices = internal_blast_hits.index_information().keys()
if 'evalue_score' not in internal_blast_hits_indices:
internal_blast_hits.create_index([("evalue",1), ("score", -1)], name='evalue_score')
if 'qid' not in internal_blast_hits_indices:
internal_blast_hits.create_index([("qid",1)], name='qid')
# get all distinct qids in the collection
qids = internal_blast_hits.distinct("qid")
print len(qids)
# for every qid in the collection, find the top hit (based on evalue, then score) for each genome for which there is a hit
i = 0
for qid in set(qids):
i = i+1
print(i)
# find distinct sgenomes for that qid
sgenomes = internal_blast_hits.distinct("sgenome", {'qid':qid})
for sgenome in sgenomes:
#check that this qid doesn't already have a BBH in this genome
if (orthologues.find({"sid":qid, "qgenome":sgenome}).limit(1).count() == 0):
# get best hit for that qid and that sgenome after sorting by evalue and then score
q_hit = internal_blast_hits.find({"qid":qid, "sgenome":sgenome}).sort([["evalue",1], ["score",-1]])
q_best_hit = q_hit[0]
# NEW: check that sgenome is not the same as qgenome - could possibly do this when creating internal_blast_hits instead? Although paralogues may be useful at some point.
if (q_best_hit["qgenome"] != q_best_hit["sgenome"] ):
# see if the sid in best hit has the qid as best hit in qid's genome
qgenome = internal_blast_hits.find_one({"qid":qid},{"qgenome":1})
s_hit = internal_blast_hits.find({"qid":q_best_hit["sid"], "sgenome":qgenome["qgenome"]}).sort([["evalue",1], ["score",-1]])
if (s_hit.count()>0):
s_best_hit = s_hit[0]
if (s_best_hit["sid"] == qid):
orthologues.insert_one({"qid": q_best_hit["qid"], "sid": q_best_hit["sid"], "qgenome": q_best_hit["qgenome"], "sgenome": q_best_hit["sgenome"]})
| peteashton/symbionts.org | tools/createOrthologuesCollection.py | Python | mit | 3,542 | [
"BLAST"
] | ffcfb072fb380ba2479e9aa40db41e08448532a9381027a030d5e360a2fad201 |
import copy
import time
import lb_loader
import numpy as np
import pandas as pd
import simtk.openmm as mm
from simtk import unit as u
from openmmtools import integrators, testsystems, hmc_integrators
pd.set_option('display.width', 1000)
n_steps = 1000
temperature = 300. * u.kelvin
collision_rate = 1.0 / u.picoseconds
timestep = 1.0 * u.femtoseconds
testsystem = testsystems.DHFRExplicit()
system, positions = testsystem.system, testsystem.positions
equil_steps = 1000
positions, boxes = lb_loader.equilibrate(testsystem, temperature, timestep, steps=equil_steps, minimize=True, use_hmc=False)
hmc_integrators.guess_force_groups(system, nonbonded=1, fft=1, others=0)
groups = [(0, 2), (1, 1)]
idict = {
"verlet": mm.VerletIntegrator(timestep),
"langevin": mm.LangevinIntegrator(temperature, collision_rate, timestep),
"vv": integrators.VelocityVerletIntegrator(timestep),
"vvvr": integrators.VelocityVerletIntegrator(timestep),
"ghmc10": integrators.GHMCIntegrator(temperature=temperature, collision_rate=collision_rate, timestep=timestep, steps_per_hmc=10),
"ghmc20": integrators.GHMCIntegrator(temperature=temperature, collision_rate=collision_rate, timestep=timestep, steps_per_hmc=20),
"ghmcrespa20": integrators.GHMCRESPA(temperature, steps_per_hmc=20, timestep=timestep, collision_rate=collision_rate, groups=groups)
}
factors = {"ghmc10":10, "ghmc20":20, "ghmcrespa20":20}
data = []
for name, integrator in idict.items():
context = mm.Context(system, integrator)
context.setPositions(positions)
context.setVelocitiesToTemperature(temperature)
integrator.step(1)
t0 = time.time()
cur_steps = n_steps / factors.get(name, 1)
integrator.step(cur_steps)
dt = time.time() - t0
ns_per_day = 0.002 / dt * 24 * 60 * 60
data.append(dict(name=name, dt=dt, ns_per_day=ns_per_day))
print(data[-1])
data = pd.DataFrame(data)
data.to_csv("./tables/raw_performance.csv")
| kyleabeauchamp/HMCNotes | code/obsolete/benchmark_raw.py | Python | gpl-2.0 | 1,915 | [
"OpenMM"
] | 7041e1d2842796bbead8ff63c21e065b1eef516bb9c521645efb08958752af05 |
import collections
import heapq
import traceback
import weakref
import numpy
import six
import chainer
from chainer import _backprop_utils
from chainer.backends import cuda
from chainer import configuration
from chainer import function_hook
from chainer.graph_optimizations.static_graph_utilities \
import static_forward_optimizations
from chainer.utils import type_check
from chainer import variable
class FunctionNode(object):
"""Function node of the computational graph.
FunctionNode is a class representing a node in a computational graph. The
node corresponds to an application of a differentiable function to input
variables.
When a differentiable function is applied to :class:`~chainer.Variable`
objects,
it creates an instance of FunctionNode implementation and calls its
:meth:`apply` method. The :meth:`apply` method basically does the following
three things.
1. Adding an edge from the function node to the variable node corresponding
to each input. The node of each input is extracted by
:attr:`Variable.node <chainer.Variable.node>`.
2. Computing the output arrays of the function.
3. Creating a :class:`~chainer.Variable` object for each output array and
adding an edge from the node of the variable to the function node.
The output variables are then returned.
.. admonition:: Example
Let ``x`` be an instance of :class:`~chainer.Variable` and ``f`` be an
instance of :class:`FunctionNode` taking only one argument.
Then the following code
>>> import numpy, chainer
>>> x = chainer.Variable(numpy.zeros(10))
>>> f = chainer.functions.math.identity.Identity()
>>> y = f.apply((x,))[0]
computes a new variable ``y`` and creates backward references. The
backward references are actually set as per the following diagram::
x.node <--- f <--- y.node
If an application of another function ``g`` occurs as
>>> g = chainer.functions.math.identity.Identity()
>>> z = g.apply((x,))[0]
then the graph grows with a branch::
|--- f <--- y.node
x.node <-+
|--- g <--- z.node
Note that the branching is correctly managed on backward computation,
i.e. the gradients from ``f`` and ``g`` are accumulated to the gradient
of ``x``.
Every function-node implementation should provide :meth:`forward` and
:meth:`backward`. Instead of overriding :meth:`forward`, one can also
implement :meth:`forward_cpu` and :meth:`forward_gpu` when the
implementations for CPU and GPU arrays are totally different.
Note that the input and output variables are inaccessible from
:meth:`backward` by default. If it needs accesses to these variables, the
:meth:`forward` method (or its CPU/GPU variants) has to call
:meth:`retain_inputs` and :meth:`retain_outputs` appropriately. The
retained input/output variables can be accessed from :meth:`backward` by
calling :meth:`get_retained_inputs` and :meth:`get_retained_outputs`.
.. note::
There are two types of differentiable functions in Chainer (since v3).
The first type is of a function using a subclass of
:class:`~chainer.Function`,
which is called *old-style differentiable function*. The second type is
of a function using a subclass of :class:`FunctionNode`, which is called
**new-style differentiable function**. There are several advantages on
using the new-style differentiable function.
- The new-style differentiable function supports *differentiable
backpropagation*. The backpropagated gradients computed through the
new-style differentiable functions themselves support further
backpropagations so that the automatic higher-order differentiation is
available.
- The backpropagation of the new-style differentiable function can be
more computationally efficient because the interface allows an
implementation to omit the computation of unneeded input gradients.
Note that the new-style differentiable function is the standard way of
defining a function node of the computational graph in Chainer; old-
style differentiable functions are implemented as wrappers of the new-
style differentiable functions.
Attributes:
~FunctionNode.inputs: A tuple of the input
:class:`~chainer.variable.VariableNode` objects.
~FunctionNode.outputs: A tuple of weak references to the output
:class:`~chainer.variable.VariableNode` objects.
~FunctionNode.rank (int): An ordinal following the topological order
of the computational graph.
~FunctionNode.stack: Stack trace retrieved at the forward computation.
The stack trace is available only in the debug mode.
.. versionadded:: 3.0.0
"""
inputs = None
outputs = None
rank = 0
stack = None
_input_indexes_to_retain = None
_output_indexes_to_retain = None
_retained_output_data = None
_local_function_hooks = None
_supports_static_optimizations = False
lazy_grad_sum = False
@property
def local_function_hooks(self):
"""Ordered dictionary of registered function hooks.
Contrary to ``chainer.thread_local.function_hooks``,
which registers its elements to all functions,
Function hooks in this property is specific to this function.
"""
if self._local_function_hooks is None:
self._local_function_hooks = collections.OrderedDict()
return self._local_function_hooks
@property
def _n_local_function_hooks(self):
return (0 if self._local_function_hooks is None
else len(self._local_function_hooks))
@property
def label(self):
"""Short text that represents the function.
The default implementation returns its type name.
Each function should override it to give more information.
"""
return self.__class__.__name__
@property
def output_data(self):
"""A tuple of the retained output arrays.
This property is mainly used by :class:`Function`. Users basically do
not have to use this property; use :meth:`get_retained_outputs`
instead.
"""
if self._retained_output_data is None:
raise RuntimeError('retained output data is gone')
out_data = [None] * len(self.outputs)
for index, data in six.moves.zip(self._output_indexes_to_retain,
self._retained_output_data):
out_data[index] = data
return tuple(out_data)
@property
def _impl_name(self):
return self.__class__.__name__
def __call__(self, *args, **kwargs):
if self.__class__.__module__.startswith('chainer.'):
msg = '''\
Chainer's built-in function class object ({}) which is derived from \
chainer.FunctionNode has been called as if it were a callable. \
Use FunctionNode.apply() method instead.
Furthermore, it's not recommended to use built-in function classes directly; \
use corresponding function aliases (those with snake_case name, such as \
F.convolution_nd) instead.\
'''.format(self.__class__.__name__)
else:
msg = '''\
A function class object ({}) which is derived from \
chainer.FunctionNode has been called as if it were a callable. \
Use apply() method instead.\
'''.format(self.__class__.__name__)
raise RuntimeError(msg)
def apply(self, inputs):
"""Computes output variables and grows the computational graph.
Basic behavior is expressed in the documentation of
:class:`FunctionNode`.
.. note::
If the :data:`~Variable.data` attribute of input variables exist on
a GPU device, that device is made current before calling
:meth:`forward`, so implementors do not need to take care of device
selection in most cases.
Args:
inputs: Tuple of input variables. Each element can be either
:class:`~chainer.Variable`, :class:`numpy.ndarray`,
or :class:`cupy.ndarray`. If the element is an ndarray, it is
automatically wrapped with :class:`~chainer.Variable`.
Returns:
A tuple of output :class:`~chainer.Variable` objects.
"""
input_vars = [chainer.as_variable(x) for x in inputs]
in_data = tuple([x.data for x in input_vars])
requires_grad = any([x.requires_grad for x in input_vars])
# Check for input array types
if not chainer.is_arrays_compatible(in_data):
raise TypeError(
'incompatible array types are mixed in the forward input '
'({}).\n'
'Actual: {}'.format(
self.label,
', '.join(str(type(x)) for x in in_data)))
is_debug = chainer.is_debug()
if is_debug:
# Keep stack trace for debug
self.stack = traceback.extract_stack()
if configuration.config.type_check:
self._check_data_type_forward(in_data)
hooks = chainer.get_function_hooks()
if self._n_local_function_hooks > 0:
hooks = collections.OrderedDict(hooks)
hooks.update(self.local_function_hooks)
hooks = hooks.values() # avoid six for performance
for hook in hooks:
hook.forward_preprocess(self, in_data)
# Forward propagation
with cuda.get_device_from_array(*in_data):
self._input_indexes_to_retain = None
self._output_indexes_to_retain = None
if chainer.config.schedule_func is not None:
outputs = static_forward_optimizations(self, in_data)
else:
outputs = self.forward(in_data)
# Check for output array types
if not isinstance(outputs, tuple):
raise TypeError(
'forward output must be a tuple ({})\n'
'Actual: {}'.format(self.label, type(outputs)))
if not chainer.is_arrays_compatible(outputs):
raise TypeError(
'incompatible array types are mixed in the forward output '
'({}).\n'
'Actual: {}'.format(
self.label,
', '.join(str(type(x)) for x in outputs)))
for hook in hooks:
hook.forward_postprocess(self, in_data)
# NaN check of output values
if is_debug:
if any(chainer.backend._contains_nan(out)
for out in outputs):
msg = ('NaN is detected on forward computation of '
'{}'.format(self.label))
raise RuntimeError(msg)
ret = tuple([variable.Variable(y, requires_grad=requires_grad)
for y in outputs])
if configuration.config.enable_backprop:
# Topological ordering
self.rank = max([x.rank for x in input_vars]) if input_vars else 0
# Add backward edges
for y in ret:
y.creator_node = self
self.inputs = tuple([x.node for x in input_vars])
# Add forward edges (must be weak references)
self.outputs = tuple([weakref.ref(y.node) for y in ret])
if self._input_indexes_to_retain is not None:
for index in self._input_indexes_to_retain:
input_vars[index].retain_data()
if self._output_indexes_to_retain is not None:
retained_data = []
for index in self._output_indexes_to_retain:
ret[index].retain_data()
retained_data.append(outputs[index])
self._retained_output_data = tuple(retained_data)
self.lazy_grad_sum = configuration.config.lazy_grad_sum
return ret
def _check_data_type_forward(self, in_data):
in_type = type_check.get_light_types(in_data)
try:
with type_check.light_mode:
self.check_type_forward(in_type)
return
except type_check.InvalidType:
# Ignore errors on first run
pass
in_type = type_check.get_types(in_data, 'in_types', False)
with type_check.get_function_check_context(self):
self.check_type_forward(in_type)
def check_type_forward(self, in_types):
"""Checks types of input data before forward propagation.
This method is called before :meth:`forward` and validates the types of
input variables using
:ref:`the type checking utilities <type-check-utils>`.
Args:
in_types (~chainer.utils.type_check.TypeInfoTuple): The type
information of input variables for :meth:`forward`.
"""
pass
def forward(self, inputs):
"""Computes the output arrays from the input arrays.
It delegates the procedure to :meth:`forward_cpu` or
:meth:`forward_gpu` by default. Which of them this method selects is
determined by the type of input arrays. Implementations of
:class:`FunctionNode` must implement either CPU/GPU methods or this
method.
Args:
inputs: Tuple of input array(s).
Returns:
Tuple of output array(s).
.. warning::
Implementations of :class:`FunctionNode` must take care that the
return value must be a tuple even if it returns only one array.
"""
assert len(inputs) > 0
if isinstance(inputs[0], cuda.ndarray):
return self.forward_gpu(inputs)
return self.forward_cpu(inputs)
def forward_cpu(self, inputs):
"""Computes the output arrays from the input NumPy arrays.
Args:
inputs: Tuple of input :class:`numpy.ndarray` objects.
Returns:
Tuple of output arrays. Each element can be NumPy or CuPy arrays.
.. warning::
Implementation of :class:`FunctionNode` must take care that the
return value must be a tuple even if it returns only one array.
"""
raise NotImplementedError
def forward_gpu(self, inputs):
"""Computes the output arrays from the input CuPy arrays.
Args:
inputs: Tuple of input :class:`cupy.ndarray` objects.
Returns:
Tuple of output arrays. Each element can be NumPy or CuPy arrays.
.. warning::
Implementation of :class:`FunctionNode` must take care that the
return value must be a tuple even if it returns only one array.
"""
raise NotImplementedError
def retain_inputs(self, indexes):
"""Lets specified input variable nodes keep data arrays.
By calling this method from :meth:`forward`, the function node can
specify which inputs are required for backprop. The input variables
with retained arrays can then be obtained by calling
:meth:`get_retained_inputs` from inside :meth:`backward`.
Unlike :class:`~chainer.Function`, the function node **DOES NOT** keep
input
arrays by default. If you want to keep some or all input arrays, do not
forget to call this method.
Note that **this method must not be called from the outside of**
:meth:`forward`.
Args:
indexes (iterable of int): Indexes of input variables that the
function will require for backprop.
"""
self._input_indexes_to_retain = indexes
def retain_outputs(self, indexes):
"""Lets specified output variable nodes keep data arrays.
By calling this method from :meth:`forward`, the function node can
specify which outputs are required for backprop. If this method is not
called, no output variables will be marked to keep their data array at
the point of returning from :meth:`apply`. The output variables with
retained arrays can then be obtained by calling
:meth:`get_retained_outputs` from inside :meth:`backward`.
.. note::
It is recommended to use this method if the function requires some
or all output arrays in backprop. The function can also use output
arrays just by keeping references to them directly, although it
might affect the performance of later function applications on the
output variables.
Note that **this method must not be called from the outside of**
:meth:`forward`.
Args:
indexes (iterable of int): Indexes of output variables that the
function will require for backprop.
"""
self._output_indexes_to_retain = indexes
def backward(self, target_input_indexes, grad_outputs):
"""Computes gradients w.r.t.\\ specified inputs given output gradients.
This method is used to compute one step of the backpropagation
corresponding to the forward computation of this function node.
Given the gradients w.r.t. output variables, this method computes the
gradients w.r.t. specified input variables. Note that this method does
not need to compute any input gradients not specified by
``target_input_indices``.
Unlike :meth:`Function.backward() <chainer.Function.backward>`,
gradients are given as :class:`~chainer.Variable` objects and this
method itself has to return input gradients as
:class:`~chainer.Variable` objects. It enables the function node to
return the input gradients with the full computational history, in
which case it supports *differentiable backpropagation* or
*higher-order differentiation*.
The default implementation returns ``None`` s, which means the
function is not differentiable.
Args:
target_input_indexes (tuple of int): Sorted indices of the input
variables w.r.t. which the gradients are required. It is
guaranteed that this tuple contains at least one element.
grad_outputs (tuple of :class:`~chainer.Variable`\\ s): Gradients
w.r.t. the output variables.
If the gradient w.r.t. an output variable is not
given, the corresponding element is ``None``.
Returns:
Tuple of variables that represent the gradients w.r.t. specified
input variables. The length of the tuple can be same as either
``len(target_input_indexes)`` or the number of inputs. In the
latter case, the elements not specified by ``target_input_indexes``
will be discarded.
.. seealso::
:meth:`backward_accumulate` provides an alternative interface that
allows you to implement the backward computation fused with the
gradient accumulation.
"""
return (None,) * len(target_input_indexes)
def backward_accumulate(self, target_input_indexes, grad_outputs,
grad_inputs):
"""Computes gradients w.r.t.\\ specified inputs and accumulates them.
This method provides a way to fuse the backward computation and the
gradient accumulations in the case that the multiple functions are
applied to the same variable.
Users have to override either of this method or :meth:`backward`.
It is often simpler to implement :meth:`backward` and is recommended
if you do not need to provide efficient gradient accumulation.
Args:
target_input_indexes (tuple of int): Sorted indices of the input
variables w.r.t. which the gradients are required. It is
guaranteed that this tuple contains at least one element.
grad_outputs (tuple of Variable): Gradients w.r.t. the output
variables. If the gradient w.r.t. an output variable is not
given, the corresponding element is ``None``.
grad_inputs (tuple of Variable): Gradients w.r.t. the input
variables specified by ``target_input_indexes``. These values
are computed by other computation paths. If there is no
gradient value existing for the variable, the corresponding
element is ``None``. See also the note below.
Returns:
Tuple of variables that represent the gradients w.r.t. specified
input variables. Unlike :meth:`backward`, the length of the tuple
**must** be same as that of ``target_input_indices``.
.. note::
Gradient variables in ``grad_outputs`` are distinct, even if a
variable is passed to multiple input arguments of the function.
This is an implementation-detail convention to avoid the
complication of correctly accumulating gradients in such a case.
Usually, only the first position of ``grad_inputs`` corresponding to
these input arguments may contain the gradient variable
corresponding to that input variable, and other entries are set to
``None``. This is not the case with the ``lazy_grad_sum`` feature.
This behavior might be changed in a future version.
"""
# If backward_accumulate is implemented, it should be equivalent to
# the following code using backward(). This code is provided for the
# convenience, and it's *not* used unless you override it. You don't
# have to use backward().
assert isinstance(target_input_indexes, tuple)
assert isinstance(grad_outputs, tuple)
assert isinstance(grad_inputs, tuple)
gxs = self.backward(target_input_indexes, grad_outputs)
len_gxs = len(gxs)
if len_gxs == len(self.inputs):
gxs = tuple([gxs[i] for i in target_input_indexes])
else:
assert len_gxs == len(target_input_indexes)
return tuple([gx if g_input is None else
g_input if gx is None else
gx + g_input
for gx, g_input in six.moves.zip(gxs, grad_inputs)])
def _get_error_message(self, message):
lines = [
message,
' function={} ({})'.format(self._impl_name, self.label)
]
if self.inputs:
for i, input in enumerate(self.inputs):
lines.append(
' input {}: shape={} dtype={}'.format(
i, input.shape, input.dtype))
if self.outputs:
for i, output_ref in enumerate(self.outputs):
output = output_ref()
if output is None:
lines.append(
' output {}: not available')
else:
lines.append(
' output {}: shape={} dtype={}'.format(
i, output.shape, output.dtype))
return '\n'.join(lines)
def get_retained_inputs(self):
"""Returns a tuple of retained input variables.
This method is used to retrieve the input variables retained in
:meth:`forward`.
Returns:
A tuple of retained input variables, if available. Otherwise
return `None`.
"""
if self._input_indexes_to_retain is None or self.inputs is None:
return
inputs = self.inputs
if self._input_indexes_to_retain is None:
raise ValueError(self._get_error_message(
'retain_inputs is not called in forward.'))
return tuple([inputs[index].get_variable()
for index in self._input_indexes_to_retain])
def get_retained_outputs(self):
"""Returns a tuple of retained output variables.
This method is used to retrieve the output variables retained in
:meth:`forward`.
Returns:
A tuple of retained output variables, if available. Otherwise
return `None`.
.. note::
This method does a tricky thing to support the case of an output
node garbage-collected before this method is called; in this case,
this method creates a fresh variable node that acts as an output
node of the function node.
"""
if self._output_indexes_to_retain is None or self.outputs is None:
return
if self._retained_output_data is None:
raise ValueError(self._get_error_message(
'retain_outputs is not called in forward.'))
ret = []
outputs = self.outputs
new_outputs = list(outputs)
outputs_modified = False
for index, data in six.moves.zip(self._output_indexes_to_retain,
self._retained_output_data):
output = outputs[index]()
if output is None:
# The output node is garbage collected, so create a fresh
# Variable object.
output_var = variable.Variable(data)
output_var.creator_node = self
new_outputs[index] = weakref.ref(output_var)
outputs_modified = True
else:
output_var = output.get_variable()
ret.append(output_var)
if outputs_modified:
self.outputs = tuple(new_outputs)
return tuple(ret)
def unchain(self):
"""Purges in/out nodes and this function node itself from the graph."""
for y in self.outputs:
y_ref = y()
if y_ref is not None:
y_ref.unchain()
self.inputs = None
def add_hook(self, hook, name=None):
"""Registers a function hook.
Args:
hook (~chainer.FunctionHook): Function hook to be
registered.
name (str): Name of the function hook. The name must be unique
among function hooks registered to this function. If ``None``,
the default name of the function hook is used.
"""
if not isinstance(hook, function_hook.FunctionHook):
raise TypeError('Hook must be of type FunctionHook')
if name is None:
name = hook.name
hooks = self.local_function_hooks
if name in hooks:
raise KeyError('Hook %s already exists' % name)
hooks[name] = hook
hook.added(self)
def delete_hook(self, name):
"""Unregisters the function hook.
Args:
name (str): The name of the function hook to be unregistered.
"""
if name in self.local_function_hooks:
self.local_function_hooks[name].deleted(self)
del self.local_function_hooks[name]
else:
raise KeyError('Hook %s does not exist' % name)
def grad(outputs, inputs, grad_outputs=None, grad_inputs=None, set_grad=False,
retain_grad=False, enable_double_backprop=False, loss_scale=None):
"""Computes the gradient of output variables w.r.t.\\ the input variables.
This function implements the backpropagation algorithm. While
:meth:`Variable.backward` also implements backprop, this function selects
the smallest paths in the computational graph needed to compute the
gradients w.r.t. inputs. The error is backpropagated only through these
selected paths, which may reduce the overall computational cost.
This function also differs from :meth:`Variable.backward` in the way to
return the gradients; it directly returns the gradient variables as a list
instead of setting gradients to the :attr:`Variable.grad_var` attribute of
the original variable. It means users do not need to clear the gradient
w.r.t. each variable before computing the gradient using this function.
If ``set_grad`` option is set to ``True``, the computed gradient is also
stored in the :attr:`Variable.grad_var` attribute of each variable, in
which case any original value of :attr:`Variable.grad_var` will be updated
even if it had already been set.
Args:
outputs (tuple or list of :class:`~chainer.Variable`):
A sequence of output variables from which backprop starts.
inputs (tuple or list of :class:`~chainer.Variable`):
A sequence of input variables each of which this function computes
the gradient w.r.t.
grad_outputs (tuple or list of :class:`~chainer.Variable` or None):
A sequence of variables that gives the initial value of each output
gradient.
If an element is set to ``None``, an array filled with 1 is used.
If this argument itself is ``None``, it is treated as a sequence of
``None``\\ s.
grad_inputs (tuple or list of :class:`~chainer.Variable` or None):
A sequence of variables that gives the initial value of each input
gradient. The gradients computed by the backprop
algorithm are accumulated to them (not in-place). If an element
is set to ``None``, the gradient is not accumulated to this value.
If this argument itself is ``None``, it is treated as a sequence of
``None``\\ s.
set_grad (bool): If it is ``True``, the :attr:`Variable.grad_var`
attribute of each input variable is set to the corresponding
computed gradient variable.
retain_grad (bool): If it is ``True``, the gradients w.r.t. all the
intermediate variables are stored in the :attr:`Variable.grad_var`
attribute. In this case, the ``set_grad`` option is ignored.
enable_double_backprop (bool): If it is ``True``, the computed
gradients can be further backpropagated. Enabling it may increase
the memory consumption (and possibly the computational time) to
remember the intermediate gradient values for the second
backpropagation.
loss_scale (float): Loss scaling factor. Loss scaling is a usefull
technique to mitigate vanishing gradient issue that tends to happen
when low precision data type like float16 is used during training.
If you set loss scaling factor, gradients of loss values are to be
multiplied by the factor before backprop starts. The factor is
propagated to whole gradients in a computational graph along the
backprop. The gradients of parameters are divided by the factor
just before the parameters are to be updated.
Returns:
A list of gradient variables w.r.t. the inputs.
"""
if not isinstance(outputs, (tuple, list)):
raise TypeError(
'outputs must be a tuple or a list, not {}.'.format(type(outputs)))
if not isinstance(inputs, (tuple, list)):
raise TypeError(
'inputs must be a tuple or a list, not {}.'.format(type(inputs)))
if not (grad_outputs is None or isinstance(grad_outputs, (tuple, list))):
raise TypeError(
'grad_outputs must be a tuple or a list or None, not {}.'.format(
type(grad_outputs)))
if not (grad_inputs is None or isinstance(grad_inputs, (tuple, list))):
raise TypeError(
'grad_inputs must be a tuple or a list or None, not {}.'.format(
type(grad_inputs)))
for v in outputs:
# Raise error here if v is created by Function.backward.
# In such case, we don't know exact inputs of the creator.
v.node._check_old_style_gradient()
# The implementation consists of three steps.
# 1. Backward enumeration: all the nodes reachable backward from the output
# nodes are enumerated. The forward direction links are collected in
# this step. Note that the variable nodes whose requires_grad is false
# are ignored and their creators are not searched.
candidate_funcs = [v.creator_node for v in outputs
if v.creator_node is not None]
visited_funcs = set()
forward_graph = collections.defaultdict(list)
while candidate_funcs:
func = candidate_funcs.pop()
if func in visited_funcs:
continue
visited_funcs.add(func)
for x in func.inputs:
# Raise error here if x is created by Function.backward.
# In such case, we don't know exact inputs of the creator.
x._check_old_style_gradient()
if not x.requires_grad:
continue
forward_graph[x].append(func)
creator = x.creator_node
if creator is not None and creator not in visited_funcs:
candidate_funcs.append(creator)
# 2. Forward enumeration: all the nodes in the subgraph reachable from the
# input nodes are enumerated. The extracted (sub-)subgraph is the union
# of all paths that backpropagation will visit.
candidate_vars = [x.node for x in inputs]
visited_funcs = set()
grad_required = set()
while candidate_vars:
x = candidate_vars.pop()
grad_required.add(x)
for func in forward_graph[x]:
if func in visited_funcs:
continue
visited_funcs.add(func)
for y_ref in func.outputs:
y = y_ref()
if y is not None and y in forward_graph:
candidate_vars.append(y)
# 3. Backpropagation: the backpropagation is executed along the
# (sub-)subgraph. It uses the topological order of the subgraph which is
# induced by the reversed order of function applications ("rank").
grads = _backprop_utils.GradTable()
# Initialize the gradient mapping.
if grad_outputs is None:
grad_outputs = (None,) * len(outputs)
for y, gy in zip(outputs, grad_outputs):
if gy is None:
with cuda.get_device_from_array(y.data) as device:
if device is cuda.DummyDevice:
gy_data = numpy.ones_like(y.data)
else:
gy_data = cuda.cupy.ones_like(y.data)
gy = variable.Variable(gy_data, requires_grad=False)
if loss_scale is not None:
gy.data *= loss_scale
grads[y.node] = gy
if grad_inputs is not None:
for x, gx in zip(inputs, grad_inputs):
if gx is not None:
grads[x.node] = gx
# Backprop implementation. It edits grads which will only contain the
# gradients w.r.t. the inputs.
with chainer.using_config('enable_backprop', enable_double_backprop):
ret_dict = _backprop(
outputs, inputs, grad_required, retain_grad, grads, loss_scale)
# Extract the gradients w.r.t. the inputs and return them.
ret = [ret_dict[x.node] for x in inputs]
if set_grad:
for x, gx in zip(inputs, ret):
x.grad_var = gx
return ret
def _backprop(outputs, inputs, grad_required, retain_grad, grads, loss_scale):
candidate_funcs, push_candidate, pop_candidate = _get_ordered_func_heap()
for y in outputs:
creator = y.creator_node
if creator is not None:
push_candidate(creator)
input_nodes = set(x.node for x in inputs)
ret_dict = {}
while candidate_funcs:
func = pop_candidate()
# Collect the gradients w.r.t. the outputs
ys = [y() for y in func.outputs] # access via weak ref
gys = tuple([grads.pop(y) for y in ys])
for node, gy in six.moves.zip(ys, gys):
if node is not None:
if node in input_nodes:
ret_dict[node] = gy
if retain_grad:
y = node.get_variable_or_none()
if y is not None:
y.grad_var = gy
y._loss_scale = loss_scale
# Collect the gradients w.r.t. the inputs
input_indexes = []
x_grads = collections.OrderedDict()
for i, x in enumerate(func.inputs):
if x not in grad_required:
continue
input_indexes.append(i)
if x not in x_grads:
x_grads[x] = grads.get_as_list(x)
if not input_indexes:
continue
input_indexes = tuple(input_indexes)
# Do backward
# Call pre-backward hooks
hooks = chainer.get_function_hooks()
if func._n_local_function_hooks != 0:
hooks = collections.OrderedDict(hooks)
hooks.update(func.local_function_hooks)
hooks = hooks.values() # avoid six for performance
in_data = tuple([x.data for x in func.inputs])
out_grad_data = tuple(
[None if g is None else g.data for g in gys])
with cuda.get_device_from_array(*in_data):
for hook in hooks:
hook.backward_preprocess(func, in_data, out_grad_data)
_backprop_utils.backprop_step(func, input_indexes, gys, x_grads)
# Call post-backward hooks
for hook in hooks:
hook.backward_postprocess(func, in_data, out_grad_data)
# Update grads
for node, g in x_grads.items():
if not g: # gradient == None
continue
creator = node.creator_node
if creator is not None:
push_candidate(creator)
for x in input_nodes:
if x not in ret_dict:
ret_dict[x] = grads.pop(x)
return ret_dict
def _get_ordered_func_heap():
heap = []
visited_funcs = set()
def push_heap(func):
if func not in visited_funcs:
# Negate since heapq is min-heap
# The second element is used to make each item unique
ordered_func = -func.rank, len(visited_funcs), func
visited_funcs.add(func)
heapq.heappush(heap, ordered_func)
def pop_heap():
_, _, func = heapq.heappop(heap)
return func
return heap, push_heap, pop_heap
| rezoo/chainer | chainer/function_node.py | Python | mit | 38,426 | [
"VisIt"
] | a68f1192c989e2f586274c303bb0881417c6ce5aeadfbb2c79e8e9f3a6f4052c |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import copy
import numpy
import unittest
from pyscf import lib
from pyscf import gto
from pyscf import scf
from pyscf.scf import jk
mol = gto.M(
verbose = 7,
output = '/dev/null',
atom = '''
O 0 0 0
H 0 -0.757 0.587
H 0 0.757 0.587''',
basis = '631g',
cart = True,
)
mf = scf.RHF(mol).run(conv_tol=1e-10)
def tearDownModule():
global mol, mf
del mol, mf
class KnownValues(unittest.TestCase):
def test_range_separated_Coulomb(self):
'''test range-separated Coulomb'''
with mol.with_range_coulomb(0.2):
dm = mf.make_rdm1()
vk0 = jk.get_jk(mol, dm, 'ijkl,jk->il', hermi=0)
vk1 = jk.get_jk(mol, (dm,dm), ['ijkl,jk->il','ijkl,li->kj'], hermi=1)
self.assertAlmostEqual(abs(vk1[0]-vk0).max(), 0, 9)
self.assertAlmostEqual(abs(vk1[1]-vk0).max(), 0, 9)
self.assertAlmostEqual(lib.finger(vk0), 0.87325708945599279, 9)
vk = scf.hf.get_jk(mol, dm)[1]
self.assertAlmostEqual(abs(vk-vk0).max(), 0, 12)
vk = scf.hf.get_jk(mol, dm)[1]
self.assertTrue(abs(vk-vk0).max() > 0.1)
def test_shls_slice(self):
dm = mf.make_rdm1()
ao_loc = mol.ao_loc_nr()
shls_slice = [0, 2, 1, 4, 2, 5, 0, 4]
locs = [ao_loc[i] for i in shls_slice]
i0, i1, j0, j1, k0, k1, l0, l1 = locs
vs = jk.get_jk(mol, (dm[j0:j1,k0:k1], dm[l0:l1,k0:k1]),
['ijkl,jk->il', 'ijkl,lk->ij'], hermi=0,
intor='int2e_ip1', shls_slice=shls_slice)
self.assertEqual(vs[0].shape, (3,2,6))
self.assertEqual(vs[1].shape, (3,2,5))
def test_shls_slice1(self):
mol = gto.M(atom='H 0 -.5 0; H 0 .5 0', basis='cc-pvdz')
nao = mol.nao_nr()
dm = numpy.random.random((nao,nao))
mol1 = gto.M(atom='He 2 0 0', basis='6-31g')
nao1 = mol1.nao_nr()
dm1 = numpy.random.random((nao1,nao1))
eri0 = gto.conc_mol(mol, mol1).intor('int2e_sph').reshape([nao+nao1]*4)
j1part = jk.get_jk((mol1,mol1,mol,mol), dm1[:1,:1], scripts='ijkl,ji->kl', intor='int2e',
shls_slice=(0,1,0,1,0,mol.nbas,0,mol.nbas))
j1ref = numpy.einsum('ijkl,ji->kl', eri0[nao:nao+1,nao:nao+1,:nao,:nao], dm1[:1,:1])
self.assertAlmostEqual(abs(j1part - j1ref).max(), 0, 12)
k1part = jk.get_jk((mol1,mol,mol,mol1), dm1[:,:1], scripts='ijkl,li->kj', intor='int2e',
shls_slice=(0,1,0,1,0,mol.nbas,0,mol1.nbas))
k1ref = numpy.einsum('ijkl,li->kj', eri0[nao:nao+1,:1,:nao,nao:], dm1[:,:1])
self.assertAlmostEqual(abs(k1part - k1ref).max(), 0, 12)
j1part = jk.get_jk(mol, dm[:1,1:2], scripts='ijkl,ji->kl', intor='int2e',
shls_slice=(1,2,0,1,0,mol.nbas,0,mol.nbas))
j1ref = numpy.einsum('ijkl,ji->kl', eri0[1:2,:1,:nao,:nao], dm[:1,1:2])
self.assertAlmostEqual(abs(j1part - j1ref).max(), 0, 12)
k1part = jk.get_jk(mol, dm[:,1:2], scripts='ijkl,li->kj', intor='int2e',
shls_slice=(1,2,0,1,0,mol.nbas,0,mol.nbas))
k1ref = numpy.einsum('ijkl,li->kj', eri0[:1,1:2,:nao,:nao], dm[:,1:2])
self.assertAlmostEqual(abs(k1part - k1ref).max(), 0, 12)
def test_mols(self):
pmol = copy.copy(mol)
mols = (mol, pmol, pmol, mol)
dm = mf.make_rdm1()
vj0 = jk.get_jk(mols, dm, 'ijkl,lk->ij')
vj1 = scf.hf.get_jk(mol, dm)[0]
self.assertAlmostEqual(abs(vj1-vj0).max(), 0, 9)
self.assertAlmostEqual(lib.finger(vj0), 28.36214139459754, 9)
def test_vk_s8(self):
mol = gto.M(atom='H 0 -.5 0; H 0 .5 0; H 1.1 0.2 0.2; H 0.6 0.5 0.4',
basis='cc-pvdz')
ao_loc = mol.ao_loc_nr()
eri0 = mol.intor('int2e')
nao = mol.nao
numpy.random.seed(1)
dm = numpy.random.random((nao,nao))
vk0 = numpy.einsum('ijkl,jk->il', eri0, dm)
self.assertAlmostEqual(abs(vk0-get_vk_s4(mol, dm)).max(), 0, 12)
self.assertAlmostEqual(abs(vk0-get_vk_s8(mol, dm)).max(), 0, 12)
shls_slice = (2,4,0,2,0,8,0,5)
i0,i1,j0,j1,k0,k1,l0,l1 = [ao_loc[x] for x in shls_slice]
vk0 = numpy.einsum('ijkl,jk->il', eri0[i0:i1,j0:j1,k0:k1,l0:l1], dm[j0:j1,k0:k1])
vk1 = numpy.einsum('ijkl,jl->ik', eri0[i0:i1,j0:j1,k0:k1,l0:l1], dm[j0:j1,l0:l1])
vk2 = numpy.einsum('ijkl,ik->jl', eri0[i0:i1,j0:j1,k0:k1,l0:l1], dm[i0:i1,k0:k1])
vk3 = numpy.einsum('ijkl,il->jk', eri0[i0:i1,j0:j1,k0:k1,l0:l1], dm[i0:i1,l0:l1])
vk = jk.get_jk(mol,
[dm[j0:j1,k0:k1], dm[j0:j1,l0:l1], dm[i0:i1,k0:k1], dm[i0:i1,l0:l1]],
scripts=['ijkl,jk->il', 'ijkl,jl->ik', 'ijkl,ik->jl', 'ijkl,il->jk'],
shls_slice=shls_slice)
self.assertAlmostEqual(abs(vk0-vk[0]).max(), 0, 12)
self.assertAlmostEqual(abs(vk1-vk[1]).max(), 0, 12)
self.assertAlmostEqual(abs(vk2-vk[2]).max(), 0, 12)
self.assertAlmostEqual(abs(vk3-vk[3]).max(), 0, 12)
def get_vk_s4(mol, dm):
ao_loc = mol.ao_loc_nr()
nao = ao_loc[-1]
vk = numpy.zeros((nao,nao))
bas_groups = list(lib.prange(0, mol.nbas, 3))
for ip, (ish0, ish1) in enumerate(bas_groups):
for jp, (jsh0, jsh1) in enumerate(bas_groups[:ip]):
for kp, (ksh0, ksh1) in enumerate(bas_groups):
for lp, (lsh0, lsh1) in enumerate(bas_groups[:kp]):
shls_slice = (ish0, ish1, jsh0, jsh1, ksh0, ksh1, lsh0, lsh1)
i0, i1, j0, j1, k0, k1, l0, l1 = [ao_loc[x] for x in shls_slice]
dms = [dm[j0:j1,k0:k1],
dm[i0:i1,k0:k1],
dm[j0:j1,l0:l1],
dm[i0:i1,l0:l1]]
scripts = ['ijkl,jk->il',
'ijkl,ik->jl',
'ijkl,jl->ik',
'ijkl,il->jk']
kparts = jk.get_jk(mol, dms, scripts, shls_slice=shls_slice)
vk[i0:i1,l0:l1] += kparts[0]
vk[j0:j1,l0:l1] += kparts[1]
vk[i0:i1,k0:k1] += kparts[2]
vk[j0:j1,k0:k1] += kparts[3]
lsh0, lsh1 = ksh0, ksh1
shls_slice = (ish0, ish1, jsh0, jsh1, ksh0, ksh1, lsh0, lsh1)
i0, i1, j0, j1, k0, k1, l0, l1 = [ao_loc[x] for x in shls_slice]
kparts = jk.get_jk(mol,
[dm[j0:j1,k0:k1], dm[i0:i1,k0:k1]],
scripts=['ijkl,jk->il', 'ijkl,ik->jl'],
shls_slice=shls_slice)
vk[i0:i1,l0:l1] += kparts[0]
vk[j0:j1,l0:l1] += kparts[1]
jsh0, jsh1 = ish0, ish1
for kp, (ksh0, ksh1) in enumerate(bas_groups):
for lp, (lsh0, lsh1) in enumerate(bas_groups[:kp]):
shls_slice = (ish0, ish1, jsh0, jsh1, ksh0, ksh1, lsh0, lsh1)
i0, i1, j0, j1, k0, k1, l0, l1 = [ao_loc[x] for x in shls_slice]
kparts = jk.get_jk(mol,
[dm[j0:j1,k0:k1], dm[j0:j1,l0:l1]],
scripts=['ijkl,jk->il', 'ijkl,jl->ik'],
shls_slice=shls_slice)
vk[i0:i1,l0:l1] += kparts[0]
vk[j0:j1,k0:k1] += kparts[1]
lsh0, lsh1 = ksh0, ksh1
shls_slice = (ish0, ish1, jsh0, jsh1, ksh0, ksh1, lsh0, lsh1)
i0, i1, j0, j1, k0, k1, l0, l1 = [ao_loc[x] for x in shls_slice]
kparts = jk.get_jk(mol,
[dm[j0:j1,k0:k1]],
scripts=['ijkl,jk->il'],
shls_slice=shls_slice)
vk[i0:i1,l0:l1] += kparts[0]
return vk
def get_vk_s8(mol, dm):
ao_loc = mol.ao_loc_nr()
nao = ao_loc[-1]
vk = numpy.zeros((nao,nao))
bas_groups = list(lib.prange(0, mol.nbas, 3))
for ip, (ish0, ish1) in enumerate(bas_groups):
for jp, (jsh0, jsh1) in enumerate(bas_groups[:ip]):
for kp, (ksh0, ksh1) in enumerate(bas_groups[:ip]):
for lp, (lsh0, lsh1) in enumerate(bas_groups[:kp]):
shls_slice = (ish0, ish1, jsh0, jsh1, ksh0, ksh1, lsh0, lsh1)
i0, i1, j0, j1, k0, k1, l0, l1 = [ao_loc[x] for x in shls_slice]
dms = [dm[j0:j1,k0:k1],
dm[j0:j1,l0:l1],
dm[i0:i1,k0:k1],
dm[i0:i1,l0:l1],
dm[l0:l1,i0:i1],
dm[l0:l1,j0:j1],
dm[k0:k1,i0:i1],
dm[k0:k1,j0:j1]]
scripts = ['ijkl,jk->il',
'ijkl,jl->ik',
'ijkl,ik->jl',
'ijkl,il->jk',
'ijkl,li->kj',
'ijkl,lj->ki',
'ijkl,ki->lj',
'ijkl,kj->li']
kparts = jk.get_jk(mol, dms, scripts, shls_slice=shls_slice)
vk[i0:i1,l0:l1] += kparts[0]
vk[i0:i1,k0:k1] += kparts[1]
vk[j0:j1,l0:l1] += kparts[2]
vk[j0:j1,k0:k1] += kparts[3]
vk[k0:k1,j0:j1] += kparts[4]
vk[k0:k1,i0:i1] += kparts[5]
vk[l0:l1,j0:j1] += kparts[6]
vk[l0:l1,i0:i1] += kparts[7]
# ip > jp, ip > kp, kp == lp
lsh0, lsh1 = ksh0, ksh1
shls_slice = (ish0, ish1, jsh0, jsh1, ksh0, ksh1, lsh0, lsh1)
i0, i1, j0, j1, k0, k1, l0, l1 = [ao_loc[x] for x in shls_slice]
dms = [dm[j0:j1,k0:k1],
dm[i0:i1,k0:k1],
dm[k0:k1,j0:j1],
dm[k0:k1,i0:i1]]
scripts = ['ijkl,jk->il',
'ijkl,ik->jl',
'ijkl,kj->li',
'ijkl,ki->lj']
kparts = jk.get_jk(mol, dms, scripts, shls_slice=shls_slice)
vk[i0:i1,l0:l1] += kparts[0]
vk[j0:j1,l0:l1] += kparts[1]
vk[l0:l1,i0:i1] += kparts[2]
vk[l0:l1,j0:j1] += kparts[3]
# ip == kp and ip > jp and kp > lp
kp, ksh0, ksh1 = ip, ish0, ish1
for lp, (lsh0, lsh1) in enumerate(bas_groups[:kp]):
shls_slice = (ish0, ish1, jsh0, jsh1, ksh0, ksh1, lsh0, lsh1)
i0, i1, j0, j1, k0, k1, l0, l1 = [ao_loc[x] for x in shls_slice]
dms = [dm[j0:j1,k0:k1],
dm[i0:i1,k0:k1],
dm[j0:j1,l0:l1],
dm[i0:i1,l0:l1]]
scripts = ['ijkl,jk->il',
'ijkl,ik->jl',
'ijkl,jl->ik',
'ijkl,il->jk']
kparts = jk.get_jk(mol, dms, scripts, shls_slice=shls_slice)
vk[i0:i1,l0:l1] += kparts[0]
vk[j0:j1,l0:l1] += kparts[1]
vk[i0:i1,k0:k1] += kparts[2]
vk[j0:j1,k0:k1] += kparts[3]
# ip == jp and ip >= kp
jsh0, jsh1 = ish0, ish1
for kp, (ksh0, ksh1) in enumerate(bas_groups[:ip+1]):
for lp, (lsh0, lsh1) in enumerate(bas_groups[:kp]):
shls_slice = (ish0, ish1, jsh0, jsh1, ksh0, ksh1, lsh0, lsh1)
i0, i1, j0, j1, k0, k1, l0, l1 = [ao_loc[x] for x in shls_slice]
dms = [dm[j0:j1,k0:k1],
dm[j0:j1,l0:l1],
dm[k0:k1,j0:j1],
dm[l0:l1,j0:j1]]
scripts = ['ijkl,jk->il',
'ijkl,jl->ik',
'ijkl,kj->li',
'ijkl,lj->ki']
kparts = jk.get_jk(mol, dms, scripts, shls_slice=shls_slice)
vk[i0:i1,l0:l1] += kparts[0]
vk[i0:i1,k0:k1] += kparts[1]
vk[l0:l1,i0:i1] += kparts[2]
vk[k0:k1,i0:i1] += kparts[3]
# ip == jp and ip > kp and kp == lp
for kp, (ksh0, ksh1) in enumerate(bas_groups[:ip]):
lsh0, lsh1 = ksh0, ksh1
shls_slice = (ish0, ish1, jsh0, jsh1, ksh0, ksh1, lsh0, lsh1)
i0, i1, j0, j1, k0, k1, l0, l1 = [ao_loc[x] for x in shls_slice]
dms = [dm[j0:j1,k0:k1],
dm[l0:l1,i0:i1]]
scripts = ['ijkl,jk->il',
'ijkl,li->kj']
kparts = jk.get_jk(mol, dms, scripts, shls_slice=shls_slice)
vk[i0:i1,l0:l1] += kparts[0]
vk[k0:k1,j0:j1] += kparts[1]
# ip == jp == kp == lp
kp, ksh0, ksh1 = ip, ish0, ish1
lsh0, lsh1 = ksh0, ksh1
shls_slice = (ish0, ish1, jsh0, jsh1, ksh0, ksh1, lsh0, lsh1)
i0, i1, j0, j1, k0, k1, l0, l1 = [ao_loc[x] for x in shls_slice]
kparts = jk.get_jk(mol,
[dm[j0:j1,k0:k1]],
scripts=['ijkl,jk->il'],
shls_slice=shls_slice)
vk[i0:i1,l0:l1] += kparts[0]
return vk
if __name__ == "__main__":
print("Full Tests for rhf")
unittest.main()
| gkc1000/pyscf | pyscf/scf/test/test_jk.py | Python | apache-2.0 | 14,186 | [
"PySCF"
] | 486d3a7d9baeffff0f9222c209cbf6fd88ea567742a1eacccb47ed713cc14006 |
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkMergeColumns(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkMergeColumns(), 'Processing.',
('vtkTable',), ('vtkTable',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| nagyistoce/devide | modules/vtk_basic/vtkMergeColumns.py | Python | bsd-3-clause | 479 | [
"VTK"
] | 5dcd9fe9e0ebe91318be68ca3c3d89d8fe11d55f67cda4bef07a900337cb9287 |
# -*- coding: utf-8 -*-
""" Tests for student account views. """
import re
from unittest import skipUnless
from urllib import urlencode
import json
import mock
import ddt
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core import mail
from django.contrib import messages
from django.contrib.messages.middleware import MessageMiddleware
from django.test import TestCase
from django.test.utils import override_settings
from django.test.client import RequestFactory
from openedx.core.djangoapps.user_api.accounts.api import activate_account, create_account
from openedx.core.djangoapps.user_api.accounts import EMAIL_MAX_LENGTH
from openedx.core.lib.json_utils import EscapedEdxJSONEncoder
from student.tests.factories import UserFactory
from student_account.views import account_settings_context
from third_party_auth.tests.testutil import simulate_running_pipeline, ThirdPartyAuthTestMixin
from util.testing import UrlResetMixin
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
@ddt.ddt
class StudentAccountUpdateTest(UrlResetMixin, TestCase):
""" Tests for the student account views that update the user's account information. """
USERNAME = u"heisenberg"
ALTERNATE_USERNAME = u"walt"
OLD_PASSWORD = u"ḅḷüëṡḳÿ"
NEW_PASSWORD = u"🄱🄸🄶🄱🄻🅄🄴"
OLD_EMAIL = u"walter@graymattertech.com"
NEW_EMAIL = u"walt@savewalterwhite.com"
INVALID_ATTEMPTS = 100
INVALID_EMAILS = [
None,
u"",
u"a",
"no_domain",
"no+domain",
"@",
"@domain.com",
"test@no_extension",
# Long email -- subtract the length of the @domain
# except for one character (so we exceed the max length limit)
u"{user}@example.com".format(
user=(u'e' * (EMAIL_MAX_LENGTH - 11))
)
]
INVALID_KEY = u"123abc"
def setUp(self):
super(StudentAccountUpdateTest, self).setUp("student_account.urls")
# Create/activate a new account
activation_key = create_account(self.USERNAME, self.OLD_PASSWORD, self.OLD_EMAIL)
activate_account(activation_key)
# Login
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertTrue(result)
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS')
def test_password_change(self):
# Request a password change while logged in, simulating
# use of the password reset link from the account page
response = self._change_password()
self.assertEqual(response.status_code, 200)
# Check that an email was sent
self.assertEqual(len(mail.outbox), 1)
# Retrieve the activation link from the email body
email_body = mail.outbox[0].body
result = re.search('(?P<url>https?://[^\s]+)', email_body)
self.assertIsNot(result, None)
activation_link = result.group('url')
# Visit the activation link
response = self.client.get(activation_link)
self.assertEqual(response.status_code, 200)
# Submit a new password and follow the redirect to the success page
response = self.client.post(
activation_link,
# These keys are from the form on the current password reset confirmation page.
{'new_password1': self.NEW_PASSWORD, 'new_password2': self.NEW_PASSWORD},
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Your password has been set.")
# Log the user out to clear session data
self.client.logout()
# Verify that the new password can be used to log in
result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD)
self.assertTrue(result)
# Try reusing the activation link to change the password again
response = self.client.post(
activation_link,
{'new_password1': self.OLD_PASSWORD, 'new_password2': self.OLD_PASSWORD},
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "The password reset link was invalid, possibly because the link has already been used.")
self.client.logout()
# Verify that the old password cannot be used to log in
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertFalse(result)
# Verify that the new password continues to be valid
result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD)
self.assertTrue(result)
@ddt.data(True, False)
def test_password_change_logged_out(self, send_email):
# Log the user out
self.client.logout()
# Request a password change while logged out, simulating
# use of the password reset link from the login page
if send_email:
response = self._change_password(email=self.OLD_EMAIL)
self.assertEqual(response.status_code, 200)
else:
# Don't send an email in the POST data, simulating
# its (potentially accidental) omission in the POST
# data sent from the login page
response = self._change_password()
self.assertEqual(response.status_code, 400)
def test_password_change_inactive_user(self):
# Log out the user created during test setup
self.client.logout()
# Create a second user, but do not activate it
create_account(self.ALTERNATE_USERNAME, self.OLD_PASSWORD, self.NEW_EMAIL)
# Send the view the email address tied to the inactive user
response = self._change_password(email=self.NEW_EMAIL)
# Expect that the activation email is still sent,
# since the user may have lost the original activation email.
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
def test_password_change_no_user(self):
# Log out the user created during test setup
self.client.logout()
# Send the view an email address not tied to any user
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 400)
def test_password_change_rate_limited(self):
# Log out the user created during test setup, to prevent the view from
# selecting the logged-in user's email address over the email provided
# in the POST data
self.client.logout()
# Make many consecutive bad requests in an attempt to trigger the rate limiter
for attempt in xrange(self.INVALID_ATTEMPTS):
self._change_password(email=self.NEW_EMAIL)
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 403)
@ddt.data(
('post', 'password_change_request', []),
)
@ddt.unpack
def test_require_http_method(self, correct_method, url_name, args):
wrong_methods = {'get', 'put', 'post', 'head', 'options', 'delete'} - {correct_method}
url = reverse(url_name, args=args)
for method in wrong_methods:
response = getattr(self.client, method)(url)
self.assertEqual(response.status_code, 405)
def _change_password(self, email=None):
"""Request to change the user's password. """
data = {}
if email:
data['email'] = email
return self.client.post(path=reverse('password_change_request'), data=data)
@ddt.ddt
class StudentAccountLoginAndRegistrationTest(ThirdPartyAuthTestMixin, UrlResetMixin, ModuleStoreTestCase):
""" Tests for the student account views that update the user's account information. """
USERNAME = "bob"
EMAIL = "bob@example.com"
PASSWORD = "password"
@mock.patch.dict(settings.FEATURES, {'EMBARGO': True})
def setUp(self):
super(StudentAccountLoginAndRegistrationTest, self).setUp('embargo')
# For these tests, two third party auth providers are enabled by default:
self.configure_google_provider(enabled=True)
self.configure_facebook_provider(enabled=True)
@ddt.data(
("signin_user", "login"),
("register_user", "register"),
)
@ddt.unpack
def test_login_and_registration_form(self, url_name, initial_mode):
response = self.client.get(reverse(url_name))
expected_data = '"initial_mode": "{mode}"'.format(mode=initial_mode)
self.assertContains(response, expected_data)
@ddt.data("signin_user", "register_user")
def test_login_and_registration_form_already_authenticated(self, url_name):
# Create/activate a new account and log in
activation_key = create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
activate_account(activation_key)
result = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(result)
# Verify that we're redirected to the dashboard
response = self.client.get(reverse(url_name))
self.assertRedirects(response, reverse("dashboard"))
@ddt.data(
(False, "signin_user"),
(False, "register_user"),
(True, "signin_user"),
(True, "register_user"),
)
@ddt.unpack
def test_login_and_registration_form_signin_preserves_params(self, is_edx_domain, url_name):
params = [
('course_id', 'edX/DemoX/Demo_Course'),
('enrollment_action', 'enroll'),
]
# The response should have a "Sign In" button with the URL
# that preserves the querystring params
with mock.patch.dict(settings.FEATURES, {'IS_EDX_DOMAIN': is_edx_domain}):
response = self.client.get(reverse(url_name), params)
expected_url = '/login?{}'.format(self._finish_auth_url_param(params + [('next', '/dashboard')]))
self.assertContains(response, expected_url)
# Add additional parameters:
params = [
('course_id', 'edX/DemoX/Demo_Course'),
('enrollment_action', 'enroll'),
('course_mode', 'honor'),
('email_opt_in', 'true'),
('next', '/custom/final/destination')
]
# Verify that this parameter is also preserved
with mock.patch.dict(settings.FEATURES, {'IS_EDX_DOMAIN': is_edx_domain}):
response = self.client.get(reverse(url_name), params)
expected_url = '/login?{}'.format(self._finish_auth_url_param(params))
self.assertContains(response, expected_url)
@mock.patch.dict(settings.FEATURES, {"ENABLE_THIRD_PARTY_AUTH": False})
@ddt.data("signin_user", "register_user")
def test_third_party_auth_disabled(self, url_name):
response = self.client.get(reverse(url_name))
self._assert_third_party_auth_data(response, None, None, [])
@ddt.data(
("signin_user", None, None),
("register_user", None, None),
("signin_user", "google-oauth2", "Google"),
("register_user", "google-oauth2", "Google"),
("signin_user", "facebook", "Facebook"),
("register_user", "facebook", "Facebook"),
)
@ddt.unpack
def test_third_party_auth(self, url_name, current_backend, current_provider):
params = [
('course_id', 'course-v1:Org+Course+Run'),
('enrollment_action', 'enroll'),
('course_mode', 'honor'),
('email_opt_in', 'true'),
('next', '/custom/final/destination'),
]
# Simulate a running pipeline
if current_backend is not None:
pipeline_target = "student_account.views.third_party_auth.pipeline"
with simulate_running_pipeline(pipeline_target, current_backend):
response = self.client.get(reverse(url_name), params)
# Do NOT simulate a running pipeline
else:
response = self.client.get(reverse(url_name), params)
# This relies on the THIRD_PARTY_AUTH configuration in the test settings
expected_providers = [
{
"id": "oa2-facebook",
"name": "Facebook",
"iconClass": "fa-facebook",
"loginUrl": self._third_party_login_url("facebook", "login", params),
"registerUrl": self._third_party_login_url("facebook", "register", params)
},
{
"id": "oa2-google-oauth2",
"name": "Google",
"iconClass": "fa-google-plus",
"loginUrl": self._third_party_login_url("google-oauth2", "login", params),
"registerUrl": self._third_party_login_url("google-oauth2", "register", params)
}
]
self._assert_third_party_auth_data(response, current_backend, current_provider, expected_providers)
def test_hinted_login(self):
params = [("next", "/courses/something/?tpa_hint=oa2-google-oauth2")]
response = self.client.get(reverse('signin_user'), params)
self.assertContains(response, '"third_party_auth_hint": "oa2-google-oauth2"')
@override_settings(SITE_NAME=settings.MICROSITE_TEST_HOSTNAME)
def test_microsite_uses_old_login_page(self):
# Retrieve the login page from a microsite domain
# and verify that we're served the old page.
resp = self.client.get(
reverse("signin_user"),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertContains(resp, "Log into your Test Microsite Account")
self.assertContains(resp, "login-form")
def test_microsite_uses_old_register_page(self):
# Retrieve the register page from a microsite domain
# and verify that we're served the old page.
resp = self.client.get(
reverse("register_user"),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertContains(resp, "Register for Test Microsite")
self.assertContains(resp, "register-form")
def _assert_third_party_auth_data(self, response, current_backend, current_provider, providers):
"""Verify that third party auth info is rendered correctly in a DOM data attribute. """
finish_auth_url = None
if current_backend:
finish_auth_url = reverse("social:complete", kwargs={"backend": current_backend}) + "?"
auth_info = {
"currentProvider": current_provider,
"providers": providers,
"secondaryProviders": [],
"finishAuthUrl": finish_auth_url,
"errorMessage": None,
}
auth_info = json.dumps(auth_info, cls=EscapedEdxJSONEncoder)
expected_data = '"third_party_auth": {auth_info}'.format(
auth_info=auth_info
)
self.assertContains(response, expected_data)
def _third_party_login_url(self, backend_name, auth_entry, login_params):
"""Construct the login URL to start third party authentication. """
return u"{url}?auth_entry={auth_entry}&{param_str}".format(
url=reverse("social:begin", kwargs={"backend": backend_name}),
auth_entry=auth_entry,
param_str=self._finish_auth_url_param(login_params),
)
def _finish_auth_url_param(self, params):
"""
Make the next=... URL parameter that indicates where the user should go next.
>>> _finish_auth_url_param([('next', '/dashboard')])
'/account/finish_auth?next=%2Fdashboard'
"""
return urlencode({
'next': '/account/finish_auth?{}'.format(urlencode(params))
})
class AccountSettingsViewTest(ThirdPartyAuthTestMixin, TestCase):
""" Tests for the account settings view. """
USERNAME = 'student'
PASSWORD = 'password'
FIELDS = [
'country',
'gender',
'language',
'level_of_education',
'password',
'year_of_birth',
'preferred_language',
]
@mock.patch("django.conf.settings.MESSAGE_STORAGE", 'django.contrib.messages.storage.cookie.CookieStorage')
def setUp(self):
super(AccountSettingsViewTest, self).setUp()
self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD)
self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.request = RequestFactory()
self.request.user = self.user
# For these tests, two third party auth providers are enabled by default:
self.configure_google_provider(enabled=True)
self.configure_facebook_provider(enabled=True)
# Python-social saves auth failure notifcations in Django messages.
# See pipeline.get_duplicate_provider() for details.
self.request.COOKIES = {}
MessageMiddleware().process_request(self.request)
messages.error(self.request, 'Facebook is already in use.', extra_tags='Auth facebook')
def test_context(self):
context = account_settings_context(self.request)
user_accounts_api_url = reverse("accounts_api", kwargs={'username': self.user.username})
self.assertEqual(context['user_accounts_api_url'], user_accounts_api_url)
user_preferences_api_url = reverse('preferences_api', kwargs={'username': self.user.username})
self.assertEqual(context['user_preferences_api_url'], user_preferences_api_url)
for attribute in self.FIELDS:
self.assertIn(attribute, context['fields'])
self.assertEqual(
context['user_accounts_api_url'], reverse("accounts_api", kwargs={'username': self.user.username})
)
self.assertEqual(
context['user_preferences_api_url'], reverse('preferences_api', kwargs={'username': self.user.username})
)
self.assertEqual(context['duplicate_provider'], 'facebook')
self.assertEqual(context['auth']['providers'][0]['name'], 'Facebook')
self.assertEqual(context['auth']['providers'][1]['name'], 'Google')
def test_view(self):
view_path = reverse('account_settings')
response = self.client.get(path=view_path)
for attribute in self.FIELDS:
self.assertIn(attribute, response.content)
@override_settings(SITE_NAME=settings.MICROSITE_LOGISTRATION_HOSTNAME)
class MicrositeLogistrationTests(TestCase):
"""
Test to validate that microsites can display the logistration page
"""
def test_login_page(self):
"""
Make sure that we get the expected logistration page on our specialized
microsite
"""
resp = self.client.get(
reverse('signin_user'),
HTTP_HOST=settings.MICROSITE_LOGISTRATION_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertIn('<div id="login-and-registration-container"', resp.content)
def test_registration_page(self):
"""
Make sure that we get the expected logistration page on our specialized
microsite
"""
resp = self.client.get(
reverse('register_user'),
HTTP_HOST=settings.MICROSITE_LOGISTRATION_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertIn('<div id="login-and-registration-container"', resp.content)
@override_settings(SITE_NAME=settings.MICROSITE_TEST_HOSTNAME)
def test_no_override(self):
"""
Make sure we get the old style login/registration if we don't override
"""
resp = self.client.get(
reverse('signin_user'),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('<div id="login-and-registration-container"', resp.content)
resp = self.client.get(
reverse('register_user'),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('<div id="login-and-registration-container"', resp.content)
| rismalrv/edx-platform | lms/djangoapps/student_account/test/test_views.py | Python | agpl-3.0 | 20,128 | [
"VisIt"
] | 0f6257ff0837fb4bcfb5e6ca2f42cb075f003cfbe12170ecd4233950f120c905 |
# -*- coding: utf-8 -*-
"""
functions.py - Miscellaneous functions with no other home
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more infomation.
"""
from __future__ import division
import warnings
import numpy as np
import decimal, re
import ctypes
import sys, struct
from .python2_3 import asUnicode, basestring
from .Qt import QtGui, QtCore, QT_LIB
from . import getConfigOption, setConfigOptions
from . import debug, reload
from .metaarray import MetaArray
Colors = {
'b': QtGui.QColor(0,0,255,255),
'g': QtGui.QColor(0,255,0,255),
'r': QtGui.QColor(255,0,0,255),
'c': QtGui.QColor(0,255,255,255),
'm': QtGui.QColor(255,0,255,255),
'y': QtGui.QColor(255,255,0,255),
'k': QtGui.QColor(0,0,0,255),
'w': QtGui.QColor(255,255,255,255),
'd': QtGui.QColor(150,150,150,255),
'l': QtGui.QColor(200,200,200,255),
's': QtGui.QColor(100,100,150,255),
}
SI_PREFIXES = asUnicode('yzafpnµm kMGTPEZY')
SI_PREFIXES_ASCII = 'yzafpnum kMGTPEZY'
SI_PREFIX_EXPONENTS = dict([(SI_PREFIXES[i], (i-8)*3) for i in range(len(SI_PREFIXES))])
SI_PREFIX_EXPONENTS['u'] = -6
FLOAT_REGEX = re.compile(r'(?P<number>[+-]?((\d+(\.\d*)?)|(\d*\.\d+))([eE][+-]?\d+)?)\s*((?P<siPrefix>[u' + SI_PREFIXES + r']?)(?P<suffix>\w.*))?$')
INT_REGEX = re.compile(r'(?P<number>[+-]?\d+)\s*(?P<siPrefix>[u' + SI_PREFIXES + r']?)(?P<suffix>.*)$')
def siScale(x, minVal=1e-25, allowUnicode=True):
"""
Return the recommended scale factor and SI prefix string for x.
Example::
siScale(0.0001) # returns (1e6, 'μ')
# This indicates that the number 0.0001 is best represented as 0.0001 * 1e6 = 100 μUnits
"""
if isinstance(x, decimal.Decimal):
x = float(x)
try:
if np.isnan(x) or np.isinf(x):
return(1, '')
except:
print(x, type(x))
raise
if abs(x) < minVal:
m = 0
x = 0
else:
m = int(np.clip(np.floor(np.log(abs(x))/np.log(1000)), -9.0, 9.0))
if m == 0:
pref = ''
elif m < -8 or m > 8:
pref = 'e%d' % (m*3)
else:
if allowUnicode:
pref = SI_PREFIXES[m+8]
else:
pref = SI_PREFIXES_ASCII[m+8]
p = .001**m
return (p, pref)
def siFormat(x, precision=3, suffix='', space=True, error=None, minVal=1e-25, allowUnicode=True):
"""
Return the number x formatted in engineering notation with SI prefix.
Example::
siFormat(0.0001, suffix='V') # returns "100 μV"
"""
if space is True:
space = ' '
if space is False:
space = ''
(p, pref) = siScale(x, minVal, allowUnicode)
if not (len(pref) > 0 and pref[0] == 'e'):
pref = space + pref
if error is None:
fmt = "%." + str(precision) + "g%s%s"
return fmt % (x*p, pref, suffix)
else:
if allowUnicode:
plusminus = space + asUnicode("±") + space
else:
plusminus = " +/- "
fmt = "%." + str(precision) + "g%s%s%s%s"
return fmt % (x*p, pref, suffix, plusminus, siFormat(error, precision=precision, suffix=suffix, space=space, minVal=minVal))
def siParse(s, regex=FLOAT_REGEX, suffix=None):
"""Convert a value written in SI notation to a tuple (number, si_prefix, suffix).
Example::
siParse('100 μV") # returns ('100', 'μ', 'V')
"""
s = asUnicode(s)
s = s.strip()
if suffix is not None and len(suffix) > 0:
if s[-len(suffix):] != suffix:
raise ValueError("String '%s' does not have the expected suffix '%s'" % (s, suffix))
s = s[:-len(suffix)] + 'X' # add a fake suffix so the regex still picks up the si prefix
m = regex.match(s)
if m is None:
raise ValueError('Cannot parse number "%s"' % s)
try:
sip = m.group('siPrefix')
except IndexError:
sip = ''
if suffix is None:
try:
suf = m.group('suffix')
except IndexError:
suf = ''
else:
suf = suffix
return m.group('number'), '' if sip is None else sip, '' if suf is None else suf
def siEval(s, typ=float, regex=FLOAT_REGEX, suffix=None):
"""
Convert a value written in SI notation to its equivalent prefixless value.
Example::
siEval("100 μV") # returns 0.0001
"""
val, siprefix, suffix = siParse(s, regex, suffix=suffix)
v = typ(val)
return siApply(v, siprefix)
def siApply(val, siprefix):
"""
"""
n = SI_PREFIX_EXPONENTS[siprefix] if siprefix != '' else 0
if n > 0:
return val * 10**n
elif n < 0:
# this case makes it possible to use Decimal objects here
return val / 10**-n
else:
return val
class Color(QtGui.QColor):
def __init__(self, *args):
QtGui.QColor.__init__(self, mkColor(*args))
def glColor(self):
"""Return (r,g,b,a) normalized for use in opengl"""
return (self.red()/255., self.green()/255., self.blue()/255., self.alpha()/255.)
def __getitem__(self, ind):
return (self.red, self.green, self.blue, self.alpha)[ind]()
def mkColor(*args):
"""
Convenience function for constructing QColor from a variety of argument types. Accepted arguments are:
================ ================================================
'c' one of: r, g, b, c, m, y, k, w
R, G, B, [A] integers 0-255
(R, G, B, [A]) tuple of integers 0-255
float greyscale, 0.0-1.0
int see :func:`intColor() <pyqtgraph.intColor>`
(int, hues) see :func:`intColor() <pyqtgraph.intColor>`
"RGB" hexadecimal strings; may begin with '#'
"RGBA"
"RRGGBB"
"RRGGBBAA"
QColor QColor instance; makes a copy.
================ ================================================
"""
err = 'Not sure how to make a color from "%s"' % str(args)
if len(args) == 1:
if isinstance(args[0], basestring):
c = args[0]
if c[0] == '#':
c = c[1:]
if len(c) == 1:
try:
return Colors[c]
except KeyError:
raise ValueError('No color named "%s"' % c)
if len(c) == 3:
r = int(c[0]*2, 16)
g = int(c[1]*2, 16)
b = int(c[2]*2, 16)
a = 255
elif len(c) == 4:
r = int(c[0]*2, 16)
g = int(c[1]*2, 16)
b = int(c[2]*2, 16)
a = int(c[3]*2, 16)
elif len(c) == 6:
r = int(c[0:2], 16)
g = int(c[2:4], 16)
b = int(c[4:6], 16)
a = 255
elif len(c) == 8:
r = int(c[0:2], 16)
g = int(c[2:4], 16)
b = int(c[4:6], 16)
a = int(c[6:8], 16)
elif isinstance(args[0], QtGui.QColor):
return QtGui.QColor(args[0])
elif isinstance(args[0], float):
r = g = b = int(args[0] * 255)
a = 255
elif hasattr(args[0], '__len__'):
if len(args[0]) == 3:
(r, g, b) = args[0]
a = 255
elif len(args[0]) == 4:
(r, g, b, a) = args[0]
elif len(args[0]) == 2:
return intColor(*args[0])
else:
raise TypeError(err)
elif type(args[0]) == int:
return intColor(args[0])
else:
raise TypeError(err)
elif len(args) == 3:
(r, g, b) = args
a = 255
elif len(args) == 4:
(r, g, b, a) = args
else:
raise TypeError(err)
args = [r,g,b,a]
args = [0 if np.isnan(a) or np.isinf(a) else a for a in args]
args = list(map(int, args))
return QtGui.QColor(*args)
def mkBrush(*args, **kwds):
"""
| Convenience function for constructing Brush.
| This function always constructs a solid brush and accepts the same arguments as :func:`mkColor() <pyqtgraph.mkColor>`
| Calling mkBrush(None) returns an invisible brush.
"""
if 'color' in kwds:
color = kwds['color']
elif len(args) == 1:
arg = args[0]
if arg is None:
return QtGui.QBrush(QtCore.Qt.NoBrush)
elif isinstance(arg, QtGui.QBrush):
return QtGui.QBrush(arg)
else:
color = arg
elif len(args) > 1:
color = args
return QtGui.QBrush(mkColor(color))
def mkPen(*args, **kargs):
"""
Convenience function for constructing QPen.
Examples::
mkPen(color)
mkPen(color, width=2)
mkPen(cosmetic=False, width=4.5, color='r')
mkPen({'color': "FF0", width: 2})
mkPen(None) # (no pen)
In these examples, *color* may be replaced with any arguments accepted by :func:`mkColor() <pyqtgraph.mkColor>` """
color = kargs.get('color', None)
width = kargs.get('width', 1)
style = kargs.get('style', None)
dash = kargs.get('dash', None)
cosmetic = kargs.get('cosmetic', True)
hsv = kargs.get('hsv', None)
if len(args) == 1:
arg = args[0]
if isinstance(arg, dict):
return mkPen(**arg)
if isinstance(arg, QtGui.QPen):
return QtGui.QPen(arg) ## return a copy of this pen
elif arg is None:
style = QtCore.Qt.NoPen
else:
color = arg
if len(args) > 1:
color = args
if color is None:
color = mkColor('l')
if hsv is not None:
color = hsvColor(*hsv)
else:
color = mkColor(color)
pen = QtGui.QPen(QtGui.QBrush(color), width)
pen.setCosmetic(cosmetic)
if style is not None:
pen.setStyle(style)
if dash is not None:
pen.setDashPattern(dash)
return pen
def hsvColor(hue, sat=1.0, val=1.0, alpha=1.0):
"""Generate a QColor from HSVa values. (all arguments are float 0.0-1.0)"""
c = QtGui.QColor()
c.setHsvF(hue, sat, val, alpha)
return c
def colorTuple(c):
"""Return a tuple (R,G,B,A) from a QColor"""
return (c.red(), c.green(), c.blue(), c.alpha())
def colorStr(c):
"""Generate a hex string code from a QColor"""
return ('%02x'*4) % colorTuple(c)
def intColor(index, hues=9, values=1, maxValue=255, minValue=150, maxHue=360, minHue=0, sat=255, alpha=255):
"""
Creates a QColor from a single index. Useful for stepping through a predefined list of colors.
The argument *index* determines which color from the set will be returned. All other arguments determine what the set of predefined colors will be
Colors are chosen by cycling across hues while varying the value (brightness).
By default, this selects from a list of 9 hues."""
hues = int(hues)
values = int(values)
ind = int(index) % (hues * values)
indh = ind % hues
indv = ind // hues
if values > 1:
v = minValue + indv * ((maxValue-minValue) / (values-1))
else:
v = maxValue
h = minHue + (indh * (maxHue-minHue)) / hues
c = QtGui.QColor()
c.setHsv(h, sat, v)
c.setAlpha(alpha)
return c
def glColor(*args, **kargs):
"""
Convert a color to OpenGL color format (r,g,b,a) floats 0.0-1.0
Accepts same arguments as :func:`mkColor <pyqtgraph.mkColor>`.
"""
c = mkColor(*args, **kargs)
return (c.red()/255., c.green()/255., c.blue()/255., c.alpha()/255.)
def makeArrowPath(headLen=20, tipAngle=20, tailLen=20, tailWidth=3, baseAngle=0, headWidth=None):
"""
Construct a path outlining an arrow with the given dimensions.
The arrow points in the -x direction with tip positioned at 0,0.
If *tipAngle* is supplied (in degrees), it overrides *headWidth*.
If *tailLen* is None, no tail will be drawn.
"""
if tipAngle is not None:
headWidth = headLen * np.tan(tipAngle * 0.5 * np.pi/180.)
path = QtGui.QPainterPath()
path.moveTo(0,0)
path.lineTo(headLen, -headWidth)
if tailLen is None:
innerY = headLen - headWidth * np.tan(baseAngle*np.pi/180.)
path.lineTo(innerY, 0)
else:
tailWidth *= 0.5
innerY = headLen - (headWidth-tailWidth) * np.tan(baseAngle*np.pi/180.)
path.lineTo(innerY, -tailWidth)
path.lineTo(headLen + tailLen, -tailWidth)
path.lineTo(headLen + tailLen, tailWidth)
path.lineTo(innerY, tailWidth)
path.lineTo(headLen, headWidth)
path.lineTo(0,0)
return path
def eq(a, b):
"""The great missing equivalence function: Guaranteed evaluation to a single bool value.
This function has some important differences from the == operator:
1. Returns True if a IS b, even if a==b still evaluates to False, such as with nan values.
2. Tests for equivalence using ==, but silently ignores some common exceptions that can occur
(AtrtibuteError, ValueError).
3. When comparing arrays, returns False if the array shapes are not the same.
4. When comparing arrays of the same shape, returns True only if all elements are equal (whereas
the == operator would return a boolean array).
"""
if a is b:
return True
# Avoid comparing large arrays against scalars; this is expensive and we know it should return False.
aIsArr = isinstance(a, (np.ndarray, MetaArray))
bIsArr = isinstance(b, (np.ndarray, MetaArray))
if (aIsArr or bIsArr) and type(a) != type(b):
return False
# If both inputs are arrays, we can speeed up comparison if shapes / dtypes don't match
# NOTE: arrays of dissimilar type should be considered unequal even if they are numerically
# equal because they may behave differently when computed on.
if aIsArr and bIsArr and (a.shape != b.shape or a.dtype != b.dtype):
return False
# Test for equivalence.
# If the test raises a recognized exception, then return Falase
try:
try:
# Sometimes running catch_warnings(module=np) generates AttributeError ???
catcher = warnings.catch_warnings(module=np) # ignore numpy futurewarning (numpy v. 1.10)
catcher.__enter__()
except Exception:
catcher = None
e = a==b
except (ValueError, AttributeError):
return False
except:
print('failed to evaluate equivalence for:')
print(" a:", str(type(a)), str(a))
print(" b:", str(type(b)), str(b))
raise
finally:
if catcher is not None:
catcher.__exit__(None, None, None)
t = type(e)
if t is bool:
return e
elif t is np.bool_:
return bool(e)
elif isinstance(e, np.ndarray) or (hasattr(e, 'implements') and e.implements('MetaArray')):
try: ## disaster: if a is an empty array and b is not, then e.all() is True
if a.shape != b.shape:
return False
except:
return False
if (hasattr(e, 'implements') and e.implements('MetaArray')):
return e.asarray().all()
else:
return e.all()
else:
raise Exception("== operator returned type %s" % str(type(e)))
def affineSliceCoords(shape, origin, vectors, axes):
"""Return the array of coordinates used to sample data arrays in affineSlice().
"""
# sanity check
if len(shape) != len(vectors):
raise Exception("shape and vectors must have same length.")
if len(origin) != len(axes):
raise Exception("origin and axes must have same length.")
for v in vectors:
if len(v) != len(axes):
raise Exception("each vector must be same length as axes.")
shape = list(map(np.ceil, shape))
## make sure vectors are arrays
if not isinstance(vectors, np.ndarray):
vectors = np.array(vectors)
if not isinstance(origin, np.ndarray):
origin = np.array(origin)
origin.shape = (len(axes),) + (1,)*len(shape)
## Build array of sample locations.
grid = np.mgrid[tuple([slice(0,x) for x in shape])] ## mesh grid of indexes
x = (grid[np.newaxis,...] * vectors.transpose()[(Ellipsis,) + (np.newaxis,)*len(shape)]).sum(axis=1) ## magic
x += origin
return x
def affineSlice(data, shape, origin, vectors, axes, order=1, returnCoords=False, **kargs):
"""
Take a slice of any orientation through an array. This is useful for extracting sections of multi-dimensional arrays
such as MRI images for viewing as 1D or 2D data.
The slicing axes are aribtrary; they do not need to be orthogonal to the original data or even to each other. It is
possible to use this function to extract arbitrary linear, rectangular, or parallelepiped shapes from within larger
datasets. The original data is interpolated onto a new array of coordinates using either interpolateArray if order<2
or scipy.ndimage.map_coordinates otherwise.
For a graphical interface to this function, see :func:`ROI.getArrayRegion <pyqtgraph.ROI.getArrayRegion>`
============== ====================================================================================================
**Arguments:**
*data* (ndarray) the original dataset
*shape* the shape of the slice to take (Note the return value may have more dimensions than len(shape))
*origin* the location in the original dataset that will become the origin of the sliced data.
*vectors* list of unit vectors which point in the direction of the slice axes. Each vector must have the same
length as *axes*. If the vectors are not unit length, the result will be scaled relative to the
original data. If the vectors are not orthogonal, the result will be sheared relative to the
original data.
*axes* The axes in the original dataset which correspond to the slice *vectors*
*order* The order of spline interpolation. Default is 1 (linear). See scipy.ndimage.map_coordinates
for more information.
*returnCoords* If True, return a tuple (result, coords) where coords is the array of coordinates used to select
values from the original dataset.
*All extra keyword arguments are passed to scipy.ndimage.map_coordinates.*
--------------------------------------------------------------------------------------------------------------------
============== ====================================================================================================
Note the following must be true:
| len(shape) == len(vectors)
| len(origin) == len(axes) == len(vectors[i])
Example: start with a 4D fMRI data set, take a diagonal-planar slice out of the last 3 axes
* data = array with dims (time, x, y, z) = (100, 40, 40, 40)
* The plane to pull out is perpendicular to the vector (x,y,z) = (1,1,1)
* The origin of the slice will be at (x,y,z) = (40, 0, 0)
* We will slice a 20x20 plane from each timepoint, giving a final shape (100, 20, 20)
The call for this example would look like::
affineSlice(data, shape=(20,20), origin=(40,0,0), vectors=((-1, 1, 0), (-1, 0, 1)), axes=(1,2,3))
"""
x = affineSliceCoords(shape, origin, vectors, axes)
## transpose data so slice axes come first
trAx = list(range(data.ndim))
for ax in axes:
trAx.remove(ax)
tr1 = tuple(axes) + tuple(trAx)
data = data.transpose(tr1)
#print "tr1:", tr1
## dims are now [(slice axes), (other axes)]
if order > 1:
try:
import scipy.ndimage
except ImportError:
raise ImportError("Interpolating with order > 1 requires the scipy.ndimage module, but it could not be imported.")
# iterate manually over unused axes since map_coordinates won't do it for us
extraShape = data.shape[len(axes):]
output = np.empty(tuple(shape) + extraShape, dtype=data.dtype)
for inds in np.ndindex(*extraShape):
ind = (Ellipsis,) + inds
output[ind] = scipy.ndimage.map_coordinates(data[ind], x, order=order, **kargs)
else:
# map_coordinates expects the indexes as the first axis, whereas
# interpolateArray expects indexes at the last axis.
tr = tuple(range(1, x.ndim)) + (0,)
output = interpolateArray(data, x.transpose(tr), order=order)
tr = list(range(output.ndim))
trb = []
for i in range(min(axes)):
ind = tr1.index(i) + (len(shape)-len(axes))
tr.remove(ind)
trb.append(ind)
tr2 = tuple(trb+tr)
## Untranspose array before returning
output = output.transpose(tr2)
if returnCoords:
return (output, x)
else:
return output
def interpolateArray(data, x, default=0.0, order=1):
"""
N-dimensional interpolation similar to scipy.ndimage.map_coordinates.
This function returns linearly-interpolated values sampled from a regular
grid of data. It differs from `ndimage.map_coordinates` by allowing broadcasting
within the input array.
============== ===========================================================================================
**Arguments:**
*data* Array of any shape containing the values to be interpolated.
*x* Array with (shape[-1] <= data.ndim) containing the locations within *data* to interpolate.
(note: the axes for this argument are transposed relative to the same argument for
`ndimage.map_coordinates`).
*default* Value to return for locations in *x* that are outside the bounds of *data*.
*order* Order of interpolation: 0=nearest, 1=linear.
============== ===========================================================================================
Returns array of shape (x.shape[:-1] + data.shape[x.shape[-1]:])
For example, assume we have the following 2D image data::
>>> data = np.array([[1, 2, 4 ],
[10, 20, 40 ],
[100, 200, 400]])
To compute a single interpolated point from this data::
>>> x = np.array([(0.5, 0.5)])
>>> interpolateArray(data, x)
array([ 8.25])
To compute a 1D list of interpolated locations::
>>> x = np.array([(0.5, 0.5),
(1.0, 1.0),
(1.0, 2.0),
(1.5, 0.0)])
>>> interpolateArray(data, x)
array([ 8.25, 20. , 40. , 55. ])
To compute a 2D array of interpolated locations::
>>> x = np.array([[(0.5, 0.5), (1.0, 2.0)],
[(1.0, 1.0), (1.5, 0.0)]])
>>> interpolateArray(data, x)
array([[ 8.25, 40. ],
[ 20. , 55. ]])
..and so on. The *x* argument may have any shape as long as
```x.shape[-1] <= data.ndim```. In the case that
```x.shape[-1] < data.ndim```, then the remaining axes are simply
broadcasted as usual. For example, we can interpolate one location
from an entire row of the data::
>>> x = np.array([[0.5]])
>>> interpolateArray(data, x)
array([[ 5.5, 11. , 22. ]])
This is useful for interpolating from arrays of colors, vertexes, etc.
"""
if order not in (0, 1):
raise ValueError("interpolateArray requires order=0 or 1 (got %s)" % order)
prof = debug.Profiler()
nd = data.ndim
md = x.shape[-1]
if md > nd:
raise TypeError("x.shape[-1] must be less than or equal to data.ndim")
totalMask = np.ones(x.shape[:-1], dtype=bool) # keep track of out-of-bound indexes
if order == 0:
xinds = np.round(x).astype(int) # NOTE: for 0.5 this rounds to the nearest *even* number
for ax in range(md):
mask = (xinds[...,ax] >= 0) & (xinds[...,ax] <= data.shape[ax]-1)
xinds[...,ax][~mask] = 0
# keep track of points that need to be set to default
totalMask &= mask
result = data[tuple([xinds[...,i] for i in range(xinds.shape[-1])])]
elif order == 1:
# First we generate arrays of indexes that are needed to
# extract the data surrounding each point
fields = np.mgrid[(slice(0,order+1),) * md]
xmin = np.floor(x).astype(int)
xmax = xmin + 1
indexes = np.concatenate([xmin[np.newaxis, ...], xmax[np.newaxis, ...]])
fieldInds = []
for ax in range(md):
mask = (xmin[...,ax] >= 0) & (x[...,ax] <= data.shape[ax]-1)
# keep track of points that need to be set to default
totalMask &= mask
# ..and keep track of indexes that are out of bounds
# (note that when x[...,ax] == data.shape[ax], then xmax[...,ax] will be out
# of bounds, but the interpolation will work anyway)
mask &= (xmax[...,ax] < data.shape[ax])
axisIndex = indexes[...,ax][fields[ax]]
axisIndex[axisIndex < 0] = 0
axisIndex[axisIndex >= data.shape[ax]] = 0
fieldInds.append(axisIndex)
prof()
# Get data values surrounding each requested point
fieldData = data[tuple(fieldInds)]
prof()
## Interpolate
s = np.empty((md,) + fieldData.shape, dtype=float)
dx = x - xmin
# reshape fields for arithmetic against dx
for ax in range(md):
f1 = fields[ax].reshape(fields[ax].shape + (1,)*(dx.ndim-1))
sax = f1 * dx[...,ax] + (1-f1) * (1-dx[...,ax])
sax = sax.reshape(sax.shape + (1,) * (s.ndim-1-sax.ndim))
s[ax] = sax
s = np.product(s, axis=0)
result = fieldData * s
for i in range(md):
result = result.sum(axis=0)
prof()
if totalMask.ndim > 0:
result[~totalMask] = default
else:
if totalMask is False:
result[:] = default
prof()
return result
def subArray(data, offset, shape, stride):
"""
Unpack a sub-array from *data* using the specified offset, shape, and stride.
Note that *stride* is specified in array elements, not bytes.
For example, we have a 2x3 array packed in a 1D array as follows::
data = [_, _, 00, 01, 02, _, 10, 11, 12, _]
Then we can unpack the sub-array with this call::
subArray(data, offset=2, shape=(2, 3), stride=(4, 1))
..which returns::
[[00, 01, 02],
[10, 11, 12]]
This function operates only on the first axis of *data*. So changing
the input in the example above to have shape (10, 7) would cause the
output to have shape (2, 3, 7).
"""
data = np.ascontiguousarray(data)[offset:]
shape = tuple(shape)
extraShape = data.shape[1:]
strides = list(data.strides[::-1])
itemsize = strides[-1]
for s in stride[1::-1]:
strides.append(itemsize * s)
strides = tuple(strides[::-1])
return np.ndarray(buffer=data, shape=shape+extraShape, strides=strides, dtype=data.dtype)
def transformToArray(tr):
"""
Given a QTransform, return a 3x3 numpy array.
Given a QMatrix4x4, return a 4x4 numpy array.
Example: map an array of x,y coordinates through a transform::
## coordinates to map are (1,5), (2,6), (3,7), and (4,8)
coords = np.array([[1,2,3,4], [5,6,7,8], [1,1,1,1]]) # the extra '1' coordinate is needed for translation to work
## Make an example transform
tr = QtGui.QTransform()
tr.translate(3,4)
tr.scale(2, 0.1)
## convert to array
m = pg.transformToArray()[:2] # ignore the perspective portion of the transformation
## map coordinates through transform
mapped = np.dot(m, coords)
"""
#return np.array([[tr.m11(), tr.m12(), tr.m13()],[tr.m21(), tr.m22(), tr.m23()],[tr.m31(), tr.m32(), tr.m33()]])
## The order of elements given by the method names m11..m33 is misleading--
## It is most common for x,y translation to occupy the positions 1,3 and 2,3 in
## a transformation matrix. However, with QTransform these values appear at m31 and m32.
## So the correct interpretation is transposed:
if isinstance(tr, QtGui.QTransform):
return np.array([[tr.m11(), tr.m21(), tr.m31()], [tr.m12(), tr.m22(), tr.m32()], [tr.m13(), tr.m23(), tr.m33()]])
elif isinstance(tr, QtGui.QMatrix4x4):
return np.array(tr.copyDataTo()).reshape(4,4)
else:
raise Exception("Transform argument must be either QTransform or QMatrix4x4.")
def transformCoordinates(tr, coords, transpose=False):
"""
Map a set of 2D or 3D coordinates through a QTransform or QMatrix4x4.
The shape of coords must be (2,...) or (3,...)
The mapping will _ignore_ any perspective transformations.
For coordinate arrays with ndim=2, this is basically equivalent to matrix multiplication.
Most arrays, however, prefer to put the coordinate axis at the end (eg. shape=(...,3)). To
allow this, use transpose=True.
"""
if transpose:
## move last axis to beginning. This transposition will be reversed before returning the mapped coordinates.
coords = coords.transpose((coords.ndim-1,) + tuple(range(0,coords.ndim-1)))
nd = coords.shape[0]
if isinstance(tr, np.ndarray):
m = tr
else:
m = transformToArray(tr)
m = m[:m.shape[0]-1] # remove perspective
## If coords are 3D and tr is 2D, assume no change for Z axis
if m.shape == (2,3) and nd == 3:
m2 = np.zeros((3,4))
m2[:2, :2] = m[:2,:2]
m2[:2, 3] = m[:2,2]
m2[2,2] = 1
m = m2
## if coords are 2D and tr is 3D, ignore Z axis
if m.shape == (3,4) and nd == 2:
m2 = np.empty((2,3))
m2[:,:2] = m[:2,:2]
m2[:,2] = m[:2,3]
m = m2
## reshape tr and coords to prepare for multiplication
m = m.reshape(m.shape + (1,)*(coords.ndim-1))
coords = coords[np.newaxis, ...]
# separate scale/rotate and translation
translate = m[:,-1]
m = m[:, :-1]
## map coordinates and return
mapped = (m*coords).sum(axis=1) ## apply scale/rotate
mapped += translate
if transpose:
## move first axis to end.
mapped = mapped.transpose(tuple(range(1,mapped.ndim)) + (0,))
return mapped
def solve3DTransform(points1, points2):
"""
Find a 3D transformation matrix that maps points1 onto points2.
Points must be specified as either lists of 4 Vectors or
(4, 3) arrays.
"""
import numpy.linalg
pts = []
for inp in (points1, points2):
if isinstance(inp, np.ndarray):
A = np.empty((4,4), dtype=float)
A[:,:3] = inp[:,:3]
A[:,3] = 1.0
else:
A = np.array([[inp[i].x(), inp[i].y(), inp[i].z(), 1] for i in range(4)])
pts.append(A)
## solve 3 sets of linear equations to determine transformation matrix elements
matrix = np.zeros((4,4))
for i in range(3):
## solve Ax = B; x is one row of the desired transformation matrix
matrix[i] = numpy.linalg.solve(pts[0], pts[1][:,i])
return matrix
def solveBilinearTransform(points1, points2):
"""
Find a bilinear transformation matrix (2x4) that maps points1 onto points2.
Points must be specified as a list of 4 Vector, Point, QPointF, etc.
To use this matrix to map a point [x,y]::
mapped = np.dot(matrix, [x*y, x, y, 1])
"""
import numpy.linalg
## A is 4 rows (points) x 4 columns (xy, x, y, 1)
## B is 4 rows (points) x 2 columns (x, y)
A = np.array([[points1[i].x()*points1[i].y(), points1[i].x(), points1[i].y(), 1] for i in range(4)])
B = np.array([[points2[i].x(), points2[i].y()] for i in range(4)])
## solve 2 sets of linear equations to determine transformation matrix elements
matrix = np.zeros((2,4))
for i in range(2):
matrix[i] = numpy.linalg.solve(A, B[:,i]) ## solve Ax = B; x is one row of the desired transformation matrix
return matrix
def rescaleData(data, scale, offset, dtype=None, clip=None):
"""Return data rescaled and optionally cast to a new dtype::
data => (data-offset) * scale
"""
if dtype is None:
dtype = data.dtype
else:
dtype = np.dtype(dtype)
try:
if not getConfigOption('useWeave'):
raise Exception('Weave is disabled; falling back to slower version.')
try:
import scipy.weave
except ImportError:
raise Exception('scipy.weave is not importable; falling back to slower version.')
## require native dtype when using weave
if not data.dtype.isnative:
data = data.astype(data.dtype.newbyteorder('='))
if not dtype.isnative:
weaveDtype = dtype.newbyteorder('=')
else:
weaveDtype = dtype
newData = np.empty((data.size,), dtype=weaveDtype)
flat = np.ascontiguousarray(data).reshape(data.size)
size = data.size
code = """
double sc = (double)scale;
double off = (double)offset;
for( int i=0; i<size; i++ ) {
newData[i] = ((double)flat[i] - off) * sc;
}
"""
scipy.weave.inline(code, ['flat', 'newData', 'size', 'offset', 'scale'], compiler='gcc')
if dtype != weaveDtype:
newData = newData.astype(dtype)
data = newData.reshape(data.shape)
except:
if getConfigOption('useWeave'):
if getConfigOption('weaveDebug'):
debug.printExc("Error; disabling weave.")
setConfigOptions(useWeave=False)
#p = np.poly1d([scale, -offset*scale])
#d2 = p(data)
d2 = data - float(offset)
d2 *= scale
# Clip before converting dtype to avoid overflow
if dtype.kind in 'ui':
lim = np.iinfo(dtype)
if clip is None:
# don't let rescale cause integer overflow
d2 = np.clip(d2, lim.min, lim.max)
else:
d2 = np.clip(d2, max(clip[0], lim.min), min(clip[1], lim.max))
else:
if clip is not None:
d2 = np.clip(d2, *clip)
data = d2.astype(dtype)
return data
def applyLookupTable(data, lut):
"""
Uses values in *data* as indexes to select values from *lut*.
The returned data has shape data.shape + lut.shape[1:]
Note: color gradient lookup tables can be generated using GradientWidget.
"""
if data.dtype.kind not in ('i', 'u'):
data = data.astype(int)
return np.take(lut, data, axis=0, mode='clip')
def makeRGBA(*args, **kwds):
"""Equivalent to makeARGB(..., useRGBA=True)"""
kwds['useRGBA'] = True
return makeARGB(*args, **kwds)
def makeARGB(data, lut=None, levels=None, scale=None, useRGBA=False):
"""
Convert an array of values into an ARGB array suitable for building QImages,
OpenGL textures, etc.
Returns the ARGB array (unsigned byte) and a boolean indicating whether
there is alpha channel data. This is a two stage process:
1) Rescale the data based on the values in the *levels* argument (min, max).
2) Determine the final output by passing the rescaled values through a
lookup table.
Both stages are optional.
============== ==================================================================================
**Arguments:**
data numpy array of int/float types. If
levels List [min, max]; optionally rescale data before converting through the
lookup table. The data is rescaled such that min->0 and max->*scale*::
rescaled = (clip(data, min, max) - min) * (*scale* / (max - min))
It is also possible to use a 2D (N,2) array of values for levels. In this case,
it is assumed that each pair of min,max values in the levels array should be
applied to a different subset of the input data (for example, the input data may
already have RGB values and the levels are used to independently scale each
channel). The use of this feature requires that levels.shape[0] == data.shape[-1].
scale The maximum value to which data will be rescaled before being passed through the
lookup table (or returned if there is no lookup table). By default this will
be set to the length of the lookup table, or 255 if no lookup table is provided.
lut Optional lookup table (array with dtype=ubyte).
Values in data will be converted to color by indexing directly from lut.
The output data shape will be input.shape + lut.shape[1:].
Lookup tables can be built using ColorMap or GradientWidget.
useRGBA If True, the data is returned in RGBA order (useful for building OpenGL textures).
The default is False, which returns in ARGB order for use with QImage
(Note that 'ARGB' is a term used by the Qt documentation; the *actual* order
is BGRA).
============== ==================================================================================
"""
profile = debug.Profiler()
if data.ndim not in (2, 3):
raise TypeError("data must be 2D or 3D")
if data.ndim == 3 and data.shape[2] > 4:
raise TypeError("data.shape[2] must be <= 4")
if lut is not None and not isinstance(lut, np.ndarray):
lut = np.array(lut)
if levels is None:
# automatically decide levels based on data dtype
if data.dtype.kind == 'u':
levels = np.array([0, 2**(data.itemsize*8)-1])
elif data.dtype.kind == 'i':
s = 2**(data.itemsize*8 - 1)
levels = np.array([-s, s-1])
elif data.dtype.kind == 'b':
levels = np.array([0,1])
else:
raise Exception('levels argument is required for float input types')
if not isinstance(levels, np.ndarray):
levels = np.array(levels)
if levels.ndim == 1:
if levels.shape[0] != 2:
raise Exception('levels argument must have length 2')
elif levels.ndim == 2:
if lut is not None and lut.ndim > 1:
raise Exception('Cannot make ARGB data when both levels and lut have ndim > 2')
if levels.shape != (data.shape[-1], 2):
raise Exception('levels must have shape (data.shape[-1], 2)')
else:
raise Exception("levels argument must be 1D or 2D (got shape=%s)." % repr(levels.shape))
profile()
# Decide on maximum scaled value
if scale is None:
if lut is not None:
scale = lut.shape[0] - 1
else:
scale = 255.
# Decide on the dtype we want after scaling
if lut is None:
dtype = np.ubyte
else:
dtype = np.min_scalar_type(lut.shape[0]-1)
# Apply levels if given
if levels is not None:
if isinstance(levels, np.ndarray) and levels.ndim == 2:
# we are going to rescale each channel independently
if levels.shape[0] != data.shape[-1]:
raise Exception("When rescaling multi-channel data, there must be the same number of levels as channels (data.shape[-1] == levels.shape[0])")
newData = np.empty(data.shape, dtype=int)
for i in range(data.shape[-1]):
minVal, maxVal = levels[i]
if minVal == maxVal:
maxVal += 1e-16
rng = maxVal-minVal
rng = 1 if rng == 0 else rng
newData[...,i] = rescaleData(data[...,i], scale / rng, minVal, dtype=dtype)
data = newData
else:
# Apply level scaling unless it would have no effect on the data
minVal, maxVal = levels
if minVal != 0 or maxVal != scale:
if minVal == maxVal:
maxVal += 1e-16
data = rescaleData(data, scale/(maxVal-minVal), minVal, dtype=dtype)
profile()
# apply LUT if given
if lut is not None:
data = applyLookupTable(data, lut)
else:
if data.dtype is not np.ubyte:
data = np.clip(data, 0, 255).astype(np.ubyte)
profile()
# this will be the final image array
imgData = np.empty(data.shape[:2]+(4,), dtype=np.ubyte)
profile()
# decide channel order
if useRGBA:
order = [0,1,2,3] # array comes out RGBA
else:
order = [2,1,0,3] # for some reason, the colors line up as BGR in the final image.
# copy data into image array
if data.ndim == 2:
# This is tempting:
# imgData[..., :3] = data[..., np.newaxis]
# ..but it turns out this is faster:
for i in range(3):
imgData[..., i] = data
elif data.shape[2] == 1:
for i in range(3):
imgData[..., i] = data[..., 0]
else:
for i in range(0, data.shape[2]):
imgData[..., i] = data[..., order[i]]
profile()
# add opaque alpha channel if needed
if data.ndim == 2 or data.shape[2] == 3:
alpha = False
imgData[..., 3] = 255
else:
alpha = True
profile()
return imgData, alpha
def makeQImage(imgData, alpha=None, copy=True, transpose=True):
"""
Turn an ARGB array into QImage.
By default, the data is copied; changes to the array will not
be reflected in the image. The image will be given a 'data' attribute
pointing to the array which shares its data to prevent python
freeing that memory while the image is in use.
============== ===================================================================
**Arguments:**
imgData Array of data to convert. Must have shape (width, height, 3 or 4)
and dtype=ubyte. The order of values in the 3rd axis must be
(b, g, r, a).
alpha If True, the QImage returned will have format ARGB32. If False,
the format will be RGB32. By default, _alpha_ is True if
array.shape[2] == 4.
copy If True, the data is copied before converting to QImage.
If False, the new QImage points directly to the data in the array.
Note that the array must be contiguous for this to work
(see numpy.ascontiguousarray).
transpose If True (the default), the array x/y axes are transposed before
creating the image. Note that Qt expects the axes to be in
(height, width) order whereas pyqtgraph usually prefers the
opposite.
============== ===================================================================
"""
## create QImage from buffer
profile = debug.Profiler()
## If we didn't explicitly specify alpha, check the array shape.
if alpha is None:
alpha = (imgData.shape[2] == 4)
copied = False
if imgData.shape[2] == 3: ## need to make alpha channel (even if alpha==False; QImage requires 32 bpp)
if copy is True:
d2 = np.empty(imgData.shape[:2] + (4,), dtype=imgData.dtype)
d2[:,:,:3] = imgData
d2[:,:,3] = 255
imgData = d2
copied = True
else:
raise Exception('Array has only 3 channels; cannot make QImage without copying.')
if alpha:
imgFormat = QtGui.QImage.Format_ARGB32
else:
imgFormat = QtGui.QImage.Format_RGB32
if transpose:
imgData = imgData.transpose((1, 0, 2)) ## QImage expects the row/column order to be opposite
profile()
if not imgData.flags['C_CONTIGUOUS']:
if copy is False:
extra = ' (try setting transpose=False)' if transpose else ''
raise Exception('Array is not contiguous; cannot make QImage without copying.'+extra)
imgData = np.ascontiguousarray(imgData)
copied = True
if copy is True and copied is False:
imgData = imgData.copy()
if QT_LIB in ['PySide', 'PySide2']:
ch = ctypes.c_char.from_buffer(imgData, 0)
# Bug in PySide + Python 3 causes refcount for image data to be improperly
# incremented, which leads to leaked memory. As a workaround, we manually
# reset the reference count after creating the QImage.
# See: https://bugreports.qt.io/browse/PYSIDE-140
# Get initial reference count (PyObject struct has ob_refcnt as first element)
rcount = ctypes.c_long.from_address(id(ch)).value
img = QtGui.QImage(ch, imgData.shape[1], imgData.shape[0], imgFormat)
if sys.version[0] == '3':
# Reset refcount only on python 3. Technically this would have no effect
# on python 2, but this is a nasty hack, and checking for version here
# helps to mitigate possible unforseen consequences.
ctypes.c_long.from_address(id(ch)).value = rcount
else:
#addr = ctypes.addressof(ctypes.c_char.from_buffer(imgData, 0))
## PyQt API for QImage changed between 4.9.3 and 4.9.6 (I don't know exactly which version it was)
## So we first attempt the 4.9.6 API, then fall back to 4.9.3
#addr = ctypes.c_char.from_buffer(imgData, 0)
#try:
#img = QtGui.QImage(addr, imgData.shape[1], imgData.shape[0], imgFormat)
#except TypeError:
#addr = ctypes.addressof(addr)
#img = QtGui.QImage(addr, imgData.shape[1], imgData.shape[0], imgFormat)
try:
img = QtGui.QImage(imgData.ctypes.data, imgData.shape[1], imgData.shape[0], imgFormat)
except:
if copy:
# does not leak memory, is not mutable
img = QtGui.QImage(buffer(imgData), imgData.shape[1], imgData.shape[0], imgFormat)
else:
# mutable, but leaks memory
img = QtGui.QImage(memoryview(imgData), imgData.shape[1], imgData.shape[0], imgFormat)
img.data = imgData
return img
#try:
#buf = imgData.data
#except AttributeError: ## happens when image data is non-contiguous
#buf = imgData.data
#profiler()
#qimage = QtGui.QImage(buf, imgData.shape[1], imgData.shape[0], imgFormat)
#profiler()
#qimage.data = imgData
#return qimage
def imageToArray(img, copy=False, transpose=True):
"""
Convert a QImage into numpy array. The image must have format RGB32, ARGB32, or ARGB32_Premultiplied.
By default, the image is not copied; changes made to the array will appear in the QImage as well (beware: if
the QImage is collected before the array, there may be trouble).
The array will have shape (width, height, (b,g,r,a)).
"""
fmt = img.format()
ptr = img.bits()
if QT_LIB in ['PySide', 'PySide2']:
arr = np.frombuffer(ptr, dtype=np.ubyte)
else:
ptr.setsize(img.byteCount())
arr = np.asarray(ptr)
if img.byteCount() != arr.size * arr.itemsize:
# Required for Python 2.6, PyQt 4.10
# If this works on all platforms, then there is no need to use np.asarray..
arr = np.frombuffer(ptr, np.ubyte, img.byteCount())
arr = arr.reshape(img.height(), img.width(), 4)
if fmt == img.Format_RGB32:
arr[...,3] = 255
if copy:
arr = arr.copy()
if transpose:
return arr.transpose((1,0,2))
else:
return arr
def colorToAlpha(data, color):
"""
Given an RGBA image in *data*, convert *color* to be transparent.
*data* must be an array (w, h, 3 or 4) of ubyte values and *color* must be
an array (3) of ubyte values.
This is particularly useful for use with images that have a black or white background.
Algorithm is taken from Gimp's color-to-alpha function in plug-ins/common/colortoalpha.c
Credit:
/*
* Color To Alpha plug-in v1.0 by Seth Burgess, sjburges@gimp.org 1999/05/14
* with algorithm by clahey
*/
"""
data = data.astype(float)
if data.shape[-1] == 3: ## add alpha channel if needed
d2 = np.empty(data.shape[:2]+(4,), dtype=data.dtype)
d2[...,:3] = data
d2[...,3] = 255
data = d2
color = color.astype(float)
alpha = np.zeros(data.shape[:2]+(3,), dtype=float)
output = data.copy()
for i in [0,1,2]:
d = data[...,i]
c = color[i]
mask = d > c
alpha[...,i][mask] = (d[mask] - c) / (255. - c)
imask = d < c
alpha[...,i][imask] = (c - d[imask]) / c
output[...,3] = alpha.max(axis=2) * 255.
mask = output[...,3] >= 1.0 ## avoid zero division while processing alpha channel
correction = 255. / output[...,3][mask] ## increase value to compensate for decreased alpha
for i in [0,1,2]:
output[...,i][mask] = ((output[...,i][mask]-color[i]) * correction) + color[i]
output[...,3][mask] *= data[...,3][mask] / 255. ## combine computed and previous alpha values
#raise Exception()
return np.clip(output, 0, 255).astype(np.ubyte)
def gaussianFilter(data, sigma):
"""
Drop-in replacement for scipy.ndimage.gaussian_filter.
(note: results are only approximately equal to the output of
gaussian_filter)
"""
if np.isscalar(sigma):
sigma = (sigma,) * data.ndim
baseline = data.mean()
filtered = data - baseline
for ax in range(data.ndim):
s = sigma[ax]
if s == 0:
continue
# generate 1D gaussian kernel
ksize = int(s * 6)
x = np.arange(-ksize, ksize)
kernel = np.exp(-x**2 / (2*s**2))
kshape = [1,] * data.ndim
kshape[ax] = len(kernel)
kernel = kernel.reshape(kshape)
# convolve as product of FFTs
shape = data.shape[ax] + ksize
scale = 1.0 / (abs(s) * (2*np.pi)**0.5)
filtered = scale * np.fft.irfft(np.fft.rfft(filtered, shape, axis=ax) *
np.fft.rfft(kernel, shape, axis=ax),
axis=ax)
# clip off extra data
sl = [slice(None)] * data.ndim
sl[ax] = slice(filtered.shape[ax]-data.shape[ax],None,None)
filtered = filtered[sl]
return filtered + baseline
def downsample(data, n, axis=0, xvals='subsample'):
"""Downsample by averaging points together across axis.
If multiple axes are specified, runs once per axis.
If a metaArray is given, then the axis values can be either subsampled
or downsampled to match.
"""
ma = None
if (hasattr(data, 'implements') and data.implements('MetaArray')):
ma = data
data = data.view(np.ndarray)
if hasattr(axis, '__len__'):
if not hasattr(n, '__len__'):
n = [n]*len(axis)
for i in range(len(axis)):
data = downsample(data, n[i], axis[i])
return data
if n <= 1:
return data
nPts = int(data.shape[axis] / n)
s = list(data.shape)
s[axis] = nPts
s.insert(axis+1, n)
sl = [slice(None)] * data.ndim
sl[axis] = slice(0, nPts*n)
d1 = data[tuple(sl)]
#print d1.shape, s
d1.shape = tuple(s)
d2 = d1.mean(axis+1)
if ma is None:
return d2
else:
info = ma.infoCopy()
if 'values' in info[axis]:
if xvals == 'subsample':
info[axis]['values'] = info[axis]['values'][::n][:nPts]
elif xvals == 'downsample':
info[axis]['values'] = downsample(info[axis]['values'], n)
return MetaArray(d2, info=info)
def arrayToQPath(x, y, connect='all'):
"""Convert an array of x,y coordinats to QPainterPath as efficiently as possible.
The *connect* argument may be 'all', indicating that each point should be
connected to the next; 'pairs', indicating that each pair of points
should be connected, or an array of int32 values (0 or 1) indicating
connections.
"""
## Create all vertices in path. The method used below creates a binary format so that all
## vertices can be read in at once. This binary format may change in future versions of Qt,
## so the original (slower) method is left here for emergencies:
#path.moveTo(x[0], y[0])
#if connect == 'all':
#for i in range(1, y.shape[0]):
#path.lineTo(x[i], y[i])
#elif connect == 'pairs':
#for i in range(1, y.shape[0]):
#if i%2 == 0:
#path.lineTo(x[i], y[i])
#else:
#path.moveTo(x[i], y[i])
#elif isinstance(connect, np.ndarray):
#for i in range(1, y.shape[0]):
#if connect[i] == 1:
#path.lineTo(x[i], y[i])
#else:
#path.moveTo(x[i], y[i])
#else:
#raise Exception('connect argument must be "all", "pairs", or array')
## Speed this up using >> operator
## Format is:
## numVerts(i4) 0(i4)
## x(f8) y(f8) 0(i4) <-- 0 means this vertex does not connect
## x(f8) y(f8) 1(i4) <-- 1 means this vertex connects to the previous vertex
## ...
## 0(i4)
##
## All values are big endian--pack using struct.pack('>d') or struct.pack('>i')
path = QtGui.QPainterPath()
#profiler = debug.Profiler()
n = x.shape[0]
# create empty array, pad with extra space on either end
arr = np.empty(n+2, dtype=[('x', '>f8'), ('y', '>f8'), ('c', '>i4')])
# write first two integers
#profiler('allocate empty')
byteview = arr.view(dtype=np.ubyte)
byteview[:12] = 0
byteview.data[12:20] = struct.pack('>ii', n, 0)
#profiler('pack header')
# Fill array with vertex values
arr[1:-1]['x'] = x
arr[1:-1]['y'] = y
# decide which points are connected by lines
if eq(connect, 'all'):
arr[1:-1]['c'] = 1
elif eq(connect, 'pairs'):
arr[1:-1]['c'][::2] = 1
arr[1:-1]['c'][1::2] = 0
elif eq(connect, 'finite'):
arr[1:-1]['c'] = np.isfinite(x) & np.isfinite(y)
elif isinstance(connect, np.ndarray):
arr[1:-1]['c'] = connect
else:
raise Exception('connect argument must be "all", "pairs", "finite", or array')
#profiler('fill array')
# write last 0
lastInd = 20*(n+1)
byteview.data[lastInd:lastInd+4] = struct.pack('>i', 0)
#profiler('footer')
# create datastream object and stream into path
## Avoiding this method because QByteArray(str) leaks memory in PySide
#buf = QtCore.QByteArray(arr.data[12:lastInd+4]) # I think one unnecessary copy happens here
path.strn = byteview.data[12:lastInd+4] # make sure data doesn't run away
try:
buf = QtCore.QByteArray.fromRawData(path.strn)
except TypeError:
buf = QtCore.QByteArray(bytes(path.strn))
#profiler('create buffer')
ds = QtCore.QDataStream(buf)
ds >> path
#profiler('load')
return path
#def isosurface(data, level):
#"""
#Generate isosurface from volumetric data using marching tetrahedra algorithm.
#See Paul Bourke, "Polygonising a Scalar Field Using Tetrahedrons" (http://local.wasp.uwa.edu.au/~pbourke/geometry/polygonise/)
#*data* 3D numpy array of scalar values
#*level* The level at which to generate an isosurface
#"""
#facets = []
### mark everything below the isosurface level
#mask = data < level
#### make eight sub-fields
#fields = np.empty((2,2,2), dtype=object)
#slices = [slice(0,-1), slice(1,None)]
#for i in [0,1]:
#for j in [0,1]:
#for k in [0,1]:
#fields[i,j,k] = mask[slices[i], slices[j], slices[k]]
### split each cell into 6 tetrahedra
### these all have the same 'orienation'; points 1,2,3 circle
### clockwise around point 0
#tetrahedra = [
#[(0,1,0), (1,1,1), (0,1,1), (1,0,1)],
#[(0,1,0), (0,1,1), (0,0,1), (1,0,1)],
#[(0,1,0), (0,0,1), (0,0,0), (1,0,1)],
#[(0,1,0), (0,0,0), (1,0,0), (1,0,1)],
#[(0,1,0), (1,0,0), (1,1,0), (1,0,1)],
#[(0,1,0), (1,1,0), (1,1,1), (1,0,1)]
#]
### each tetrahedron will be assigned an index
### which determines how to generate its facets.
### this structure is:
### facets[index][facet1, facet2, ...]
### where each facet is triangular and its points are each
### interpolated between two points on the tetrahedron
### facet = [(p1a, p1b), (p2a, p2b), (p3a, p3b)]
### facet points always circle clockwise if you are looking
### at them from below the isosurface.
#indexFacets = [
#[], ## all above
#[[(0,1), (0,2), (0,3)]], # 0 below
#[[(1,0), (1,3), (1,2)]], # 1 below
#[[(0,2), (1,3), (1,2)], [(0,2), (0,3), (1,3)]], # 0,1 below
#[[(2,0), (2,1), (2,3)]], # 2 below
#[[(0,3), (1,2), (2,3)], [(0,3), (0,1), (1,2)]], # 0,2 below
#[[(1,0), (2,3), (2,0)], [(1,0), (1,3), (2,3)]], # 1,2 below
#[[(3,0), (3,1), (3,2)]], # 3 above
#[[(3,0), (3,2), (3,1)]], # 3 below
#[[(1,0), (2,0), (2,3)], [(1,0), (2,3), (1,3)]], # 0,3 below
#[[(0,3), (2,3), (1,2)], [(0,3), (1,2), (0,1)]], # 1,3 below
#[[(2,0), (2,3), (2,1)]], # 0,1,3 below
#[[(0,2), (1,2), (1,3)], [(0,2), (1,3), (0,3)]], # 2,3 below
#[[(1,0), (1,2), (1,3)]], # 0,2,3 below
#[[(0,1), (0,3), (0,2)]], # 1,2,3 below
#[] ## all below
#]
#for tet in tetrahedra:
### get the 4 fields for this tetrahedron
#tetFields = [fields[c] for c in tet]
### generate an index for each grid cell
#index = tetFields[0] + tetFields[1]*2 + tetFields[2]*4 + tetFields[3]*8
### add facets
#for i in xrange(index.shape[0]): # data x-axis
#for j in xrange(index.shape[1]): # data y-axis
#for k in xrange(index.shape[2]): # data z-axis
#for f in indexFacets[index[i,j,k]]: # faces to generate for this tet
#pts = []
#for l in [0,1,2]: # points in this face
#p1 = tet[f[l][0]] # tet corner 1
#p2 = tet[f[l][1]] # tet corner 2
#pts.append([(p1[x]+p2[x])*0.5+[i,j,k][x]+0.5 for x in [0,1,2]]) ## interpolate between tet corners
#facets.append(pts)
#return facets
def isocurve(data, level, connected=False, extendToEdge=False, path=False):
"""
Generate isocurve from 2D data using marching squares algorithm.
============== =========================================================
**Arguments:**
data 2D numpy array of scalar values
level The level at which to generate an isosurface
connected If False, return a single long list of point pairs
If True, return multiple long lists of connected point
locations. (This is slower but better for drawing
continuous lines)
extendToEdge If True, extend the curves to reach the exact edges of
the data.
path if True, return a QPainterPath rather than a list of
vertex coordinates. This forces connected=True.
============== =========================================================
This function is SLOW; plenty of room for optimization here.
"""
if path is True:
connected = True
if extendToEdge:
d2 = np.empty((data.shape[0]+2, data.shape[1]+2), dtype=data.dtype)
d2[1:-1, 1:-1] = data
d2[0, 1:-1] = data[0]
d2[-1, 1:-1] = data[-1]
d2[1:-1, 0] = data[:, 0]
d2[1:-1, -1] = data[:, -1]
d2[0,0] = d2[0,1]
d2[0,-1] = d2[1,-1]
d2[-1,0] = d2[-1,1]
d2[-1,-1] = d2[-1,-2]
data = d2
sideTable = [
[],
[0,1],
[1,2],
[0,2],
[0,3],
[1,3],
[0,1,2,3],
[2,3],
[2,3],
[0,1,2,3],
[1,3],
[0,3],
[0,2],
[1,2],
[0,1],
[]
]
edgeKey=[
[(0,1), (0,0)],
[(0,0), (1,0)],
[(1,0), (1,1)],
[(1,1), (0,1)]
]
lines = []
## mark everything below the isosurface level
mask = data < level
### make four sub-fields and compute indexes for grid cells
index = np.zeros([x-1 for x in data.shape], dtype=np.ubyte)
fields = np.empty((2,2), dtype=object)
slices = [slice(0,-1), slice(1,None)]
for i in [0,1]:
for j in [0,1]:
fields[i,j] = mask[slices[i], slices[j]]
#vertIndex = i - 2*j*i + 3*j + 4*k ## this is just to match Bourk's vertex numbering scheme
vertIndex = i+2*j
#print i,j,k," : ", fields[i,j,k], 2**vertIndex
np.add(index, fields[i,j] * 2**vertIndex, out=index, casting='unsafe')
#print index
#print index
## add lines
for i in range(index.shape[0]): # data x-axis
for j in range(index.shape[1]): # data y-axis
sides = sideTable[index[i,j]]
for l in range(0, len(sides), 2): ## faces for this grid cell
edges = sides[l:l+2]
pts = []
for m in [0,1]: # points in this face
p1 = edgeKey[edges[m]][0] # p1, p2 are points at either side of an edge
p2 = edgeKey[edges[m]][1]
v1 = data[i+p1[0], j+p1[1]] # v1 and v2 are the values at p1 and p2
v2 = data[i+p2[0], j+p2[1]]
f = (level-v1) / (v2-v1)
fi = 1.0 - f
p = ( ## interpolate between corners
p1[0]*fi + p2[0]*f + i + 0.5,
p1[1]*fi + p2[1]*f + j + 0.5
)
if extendToEdge:
## check bounds
p = (
min(data.shape[0]-2, max(0, p[0]-1)),
min(data.shape[1]-2, max(0, p[1]-1)),
)
if connected:
gridKey = i + (1 if edges[m]==2 else 0), j + (1 if edges[m]==3 else 0), edges[m]%2
pts.append((p, gridKey)) ## give the actual position and a key identifying the grid location (for connecting segments)
else:
pts.append(p)
lines.append(pts)
if not connected:
return lines
## turn disjoint list of segments into continuous lines
#lines = [[2,5], [5,4], [3,4], [1,3], [6,7], [7,8], [8,6], [11,12], [12,15], [11,13], [13,14]]
#lines = [[(float(a), a), (float(b), b)] for a,b in lines]
points = {} ## maps each point to its connections
for a,b in lines:
if a[1] not in points:
points[a[1]] = []
points[a[1]].append([a,b])
if b[1] not in points:
points[b[1]] = []
points[b[1]].append([b,a])
## rearrange into chains
for k in list(points.keys()):
try:
chains = points[k]
except KeyError: ## already used this point elsewhere
continue
#print "===========", k
for chain in chains:
#print " chain:", chain
x = None
while True:
if x == chain[-1][1]:
break ## nothing left to do on this chain
x = chain[-1][1]
if x == k:
break ## chain has looped; we're done and can ignore the opposite chain
y = chain[-2][1]
connects = points[x]
for conn in connects[:]:
if conn[1][1] != y:
#print " ext:", conn
chain.extend(conn[1:])
#print " del:", x
del points[x]
if chain[0][1] == chain[-1][1]: # looped chain; no need to continue the other direction
chains.pop()
break
## extract point locations
lines = []
for chain in points.values():
if len(chain) == 2:
chain = chain[1][1:][::-1] + chain[0] # join together ends of chain
else:
chain = chain[0]
lines.append([p[0] for p in chain])
if not path:
return lines ## a list of pairs of points
path = QtGui.QPainterPath()
for line in lines:
path.moveTo(*line[0])
for p in line[1:]:
path.lineTo(*p)
return path
def traceImage(image, values, smooth=0.5):
"""
Convert an image to a set of QPainterPath curves.
One curve will be generated for each item in *values*; each curve outlines the area
of the image that is closer to its value than to any others.
If image is RGB or RGBA, then the shape of values should be (nvals, 3/4)
The parameter *smooth* is expressed in pixels.
"""
try:
import scipy.ndimage as ndi
except ImportError:
raise Exception("traceImage() requires the package scipy.ndimage, but it is not importable.")
if values.ndim == 2:
values = values.T
values = values[np.newaxis, np.newaxis, ...].astype(float)
image = image[..., np.newaxis].astype(float)
diff = np.abs(image-values)
if values.ndim == 4:
diff = diff.sum(axis=2)
labels = np.argmin(diff, axis=2)
paths = []
for i in range(diff.shape[-1]):
d = (labels==i).astype(float)
d = gaussianFilter(d, (smooth, smooth))
lines = isocurve(d, 0.5, connected=True, extendToEdge=True)
path = QtGui.QPainterPath()
for line in lines:
path.moveTo(*line[0])
for p in line[1:]:
path.lineTo(*p)
paths.append(path)
return paths
IsosurfaceDataCache = None
def isosurface(data, level):
"""
Generate isosurface from volumetric data using marching cubes algorithm.
See Paul Bourke, "Polygonising a Scalar Field"
(http://paulbourke.net/geometry/polygonise/)
*data* 3D numpy array of scalar values. Must be contiguous.
*level* The level at which to generate an isosurface
Returns an array of vertex coordinates (Nv, 3) and an array of
per-face vertex indexes (Nf, 3)
"""
## For improvement, see:
##
## Efficient implementation of Marching Cubes' cases with topological guarantees.
## Thomas Lewiner, Helio Lopes, Antonio Wilson Vieira and Geovan Tavares.
## Journal of Graphics Tools 8(2): pp. 1-15 (december 2003)
## Precompute lookup tables on the first run
global IsosurfaceDataCache
if IsosurfaceDataCache is None:
## map from grid cell index to edge index.
## grid cell index tells us which corners are below the isosurface,
## edge index tells us which edges are cut by the isosurface.
## (Data stolen from Bourk; see above.)
edgeTable = np.array([
0x0 , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,
0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,
0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,
0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,
0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,
0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,
0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,
0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,
0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,
0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,
0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0
], dtype=np.uint16)
## Table of triangles to use for filling each grid cell.
## Each set of three integers tells us which three edges to
## draw a triangle between.
## (Data stolen from Bourk; see above.)
triTable = [
[],
[0, 8, 3],
[0, 1, 9],
[1, 8, 3, 9, 8, 1],
[1, 2, 10],
[0, 8, 3, 1, 2, 10],
[9, 2, 10, 0, 2, 9],
[2, 8, 3, 2, 10, 8, 10, 9, 8],
[3, 11, 2],
[0, 11, 2, 8, 11, 0],
[1, 9, 0, 2, 3, 11],
[1, 11, 2, 1, 9, 11, 9, 8, 11],
[3, 10, 1, 11, 10, 3],
[0, 10, 1, 0, 8, 10, 8, 11, 10],
[3, 9, 0, 3, 11, 9, 11, 10, 9],
[9, 8, 10, 10, 8, 11],
[4, 7, 8],
[4, 3, 0, 7, 3, 4],
[0, 1, 9, 8, 4, 7],
[4, 1, 9, 4, 7, 1, 7, 3, 1],
[1, 2, 10, 8, 4, 7],
[3, 4, 7, 3, 0, 4, 1, 2, 10],
[9, 2, 10, 9, 0, 2, 8, 4, 7],
[2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4],
[8, 4, 7, 3, 11, 2],
[11, 4, 7, 11, 2, 4, 2, 0, 4],
[9, 0, 1, 8, 4, 7, 2, 3, 11],
[4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1],
[3, 10, 1, 3, 11, 10, 7, 8, 4],
[1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4],
[4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3],
[4, 7, 11, 4, 11, 9, 9, 11, 10],
[9, 5, 4],
[9, 5, 4, 0, 8, 3],
[0, 5, 4, 1, 5, 0],
[8, 5, 4, 8, 3, 5, 3, 1, 5],
[1, 2, 10, 9, 5, 4],
[3, 0, 8, 1, 2, 10, 4, 9, 5],
[5, 2, 10, 5, 4, 2, 4, 0, 2],
[2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8],
[9, 5, 4, 2, 3, 11],
[0, 11, 2, 0, 8, 11, 4, 9, 5],
[0, 5, 4, 0, 1, 5, 2, 3, 11],
[2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5],
[10, 3, 11, 10, 1, 3, 9, 5, 4],
[4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10],
[5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3],
[5, 4, 8, 5, 8, 10, 10, 8, 11],
[9, 7, 8, 5, 7, 9],
[9, 3, 0, 9, 5, 3, 5, 7, 3],
[0, 7, 8, 0, 1, 7, 1, 5, 7],
[1, 5, 3, 3, 5, 7],
[9, 7, 8, 9, 5, 7, 10, 1, 2],
[10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3],
[8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2],
[2, 10, 5, 2, 5, 3, 3, 5, 7],
[7, 9, 5, 7, 8, 9, 3, 11, 2],
[9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11],
[2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7],
[11, 2, 1, 11, 1, 7, 7, 1, 5],
[9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11],
[5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0],
[11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0],
[11, 10, 5, 7, 11, 5],
[10, 6, 5],
[0, 8, 3, 5, 10, 6],
[9, 0, 1, 5, 10, 6],
[1, 8, 3, 1, 9, 8, 5, 10, 6],
[1, 6, 5, 2, 6, 1],
[1, 6, 5, 1, 2, 6, 3, 0, 8],
[9, 6, 5, 9, 0, 6, 0, 2, 6],
[5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8],
[2, 3, 11, 10, 6, 5],
[11, 0, 8, 11, 2, 0, 10, 6, 5],
[0, 1, 9, 2, 3, 11, 5, 10, 6],
[5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11],
[6, 3, 11, 6, 5, 3, 5, 1, 3],
[0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6],
[3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9],
[6, 5, 9, 6, 9, 11, 11, 9, 8],
[5, 10, 6, 4, 7, 8],
[4, 3, 0, 4, 7, 3, 6, 5, 10],
[1, 9, 0, 5, 10, 6, 8, 4, 7],
[10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4],
[6, 1, 2, 6, 5, 1, 4, 7, 8],
[1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7],
[8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6],
[7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9],
[3, 11, 2, 7, 8, 4, 10, 6, 5],
[5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11],
[0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6],
[9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6],
[8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6],
[5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11],
[0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7],
[6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9],
[10, 4, 9, 6, 4, 10],
[4, 10, 6, 4, 9, 10, 0, 8, 3],
[10, 0, 1, 10, 6, 0, 6, 4, 0],
[8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10],
[1, 4, 9, 1, 2, 4, 2, 6, 4],
[3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4],
[0, 2, 4, 4, 2, 6],
[8, 3, 2, 8, 2, 4, 4, 2, 6],
[10, 4, 9, 10, 6, 4, 11, 2, 3],
[0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6],
[3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10],
[6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1],
[9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3],
[8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1],
[3, 11, 6, 3, 6, 0, 0, 6, 4],
[6, 4, 8, 11, 6, 8],
[7, 10, 6, 7, 8, 10, 8, 9, 10],
[0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10],
[10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0],
[10, 6, 7, 10, 7, 1, 1, 7, 3],
[1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7],
[2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9],
[7, 8, 0, 7, 0, 6, 6, 0, 2],
[7, 3, 2, 6, 7, 2],
[2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7],
[2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7],
[1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11],
[11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1],
[8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6],
[0, 9, 1, 11, 6, 7],
[7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0],
[7, 11, 6],
[7, 6, 11],
[3, 0, 8, 11, 7, 6],
[0, 1, 9, 11, 7, 6],
[8, 1, 9, 8, 3, 1, 11, 7, 6],
[10, 1, 2, 6, 11, 7],
[1, 2, 10, 3, 0, 8, 6, 11, 7],
[2, 9, 0, 2, 10, 9, 6, 11, 7],
[6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8],
[7, 2, 3, 6, 2, 7],
[7, 0, 8, 7, 6, 0, 6, 2, 0],
[2, 7, 6, 2, 3, 7, 0, 1, 9],
[1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6],
[10, 7, 6, 10, 1, 7, 1, 3, 7],
[10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8],
[0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7],
[7, 6, 10, 7, 10, 8, 8, 10, 9],
[6, 8, 4, 11, 8, 6],
[3, 6, 11, 3, 0, 6, 0, 4, 6],
[8, 6, 11, 8, 4, 6, 9, 0, 1],
[9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6],
[6, 8, 4, 6, 11, 8, 2, 10, 1],
[1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6],
[4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9],
[10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3],
[8, 2, 3, 8, 4, 2, 4, 6, 2],
[0, 4, 2, 4, 6, 2],
[1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8],
[1, 9, 4, 1, 4, 2, 2, 4, 6],
[8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1],
[10, 1, 0, 10, 0, 6, 6, 0, 4],
[4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3],
[10, 9, 4, 6, 10, 4],
[4, 9, 5, 7, 6, 11],
[0, 8, 3, 4, 9, 5, 11, 7, 6],
[5, 0, 1, 5, 4, 0, 7, 6, 11],
[11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5],
[9, 5, 4, 10, 1, 2, 7, 6, 11],
[6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5],
[7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2],
[3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6],
[7, 2, 3, 7, 6, 2, 5, 4, 9],
[9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7],
[3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0],
[6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8],
[9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7],
[1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4],
[4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10],
[7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10],
[6, 9, 5, 6, 11, 9, 11, 8, 9],
[3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5],
[0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11],
[6, 11, 3, 6, 3, 5, 5, 3, 1],
[1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6],
[0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10],
[11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5],
[6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3],
[5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2],
[9, 5, 6, 9, 6, 0, 0, 6, 2],
[1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8],
[1, 5, 6, 2, 1, 6],
[1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6],
[10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0],
[0, 3, 8, 5, 6, 10],
[10, 5, 6],
[11, 5, 10, 7, 5, 11],
[11, 5, 10, 11, 7, 5, 8, 3, 0],
[5, 11, 7, 5, 10, 11, 1, 9, 0],
[10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1],
[11, 1, 2, 11, 7, 1, 7, 5, 1],
[0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11],
[9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7],
[7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2],
[2, 5, 10, 2, 3, 5, 3, 7, 5],
[8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5],
[9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2],
[9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2],
[1, 3, 5, 3, 7, 5],
[0, 8, 7, 0, 7, 1, 1, 7, 5],
[9, 0, 3, 9, 3, 5, 5, 3, 7],
[9, 8, 7, 5, 9, 7],
[5, 8, 4, 5, 10, 8, 10, 11, 8],
[5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0],
[0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5],
[10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4],
[2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8],
[0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11],
[0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5],
[9, 4, 5, 2, 11, 3],
[2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4],
[5, 10, 2, 5, 2, 4, 4, 2, 0],
[3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9],
[5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2],
[8, 4, 5, 8, 5, 3, 3, 5, 1],
[0, 4, 5, 1, 0, 5],
[8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5],
[9, 4, 5],
[4, 11, 7, 4, 9, 11, 9, 10, 11],
[0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11],
[1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11],
[3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4],
[4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2],
[9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3],
[11, 7, 4, 11, 4, 2, 2, 4, 0],
[11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4],
[2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9],
[9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7],
[3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10],
[1, 10, 2, 8, 7, 4],
[4, 9, 1, 4, 1, 7, 7, 1, 3],
[4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1],
[4, 0, 3, 7, 4, 3],
[4, 8, 7],
[9, 10, 8, 10, 11, 8],
[3, 0, 9, 3, 9, 11, 11, 9, 10],
[0, 1, 10, 0, 10, 8, 8, 10, 11],
[3, 1, 10, 11, 3, 10],
[1, 2, 11, 1, 11, 9, 9, 11, 8],
[3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9],
[0, 2, 11, 8, 0, 11],
[3, 2, 11],
[2, 3, 8, 2, 8, 10, 10, 8, 9],
[9, 10, 2, 0, 9, 2],
[2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8],
[1, 10, 2],
[1, 3, 8, 9, 1, 8],
[0, 9, 1],
[0, 3, 8],
[]
]
edgeShifts = np.array([ ## maps edge ID (0-11) to (x,y,z) cell offset and edge ID (0-2)
[0, 0, 0, 0],
[1, 0, 0, 1],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 0, 1, 1],
[0, 1, 1, 0],
[0, 0, 1, 1],
[0, 0, 0, 2],
[1, 0, 0, 2],
[1, 1, 0, 2],
[0, 1, 0, 2],
#[9, 9, 9, 9] ## fake
], dtype=np.uint16) # don't use ubyte here! This value gets added to cell index later; will need the extra precision.
nTableFaces = np.array([len(f)/3 for f in triTable], dtype=np.ubyte)
faceShiftTables = [None]
for i in range(1,6):
## compute lookup table of index: vertexes mapping
faceTableI = np.zeros((len(triTable), i*3), dtype=np.ubyte)
faceTableInds = np.argwhere(nTableFaces == i)
faceTableI[faceTableInds[:,0]] = np.array([triTable[j[0]] for j in faceTableInds])
faceTableI = faceTableI.reshape((len(triTable), i, 3))
faceShiftTables.append(edgeShifts[faceTableI])
## Let's try something different:
#faceTable = np.empty((256, 5, 3, 4), dtype=np.ubyte) # (grid cell index, faces, vertexes, edge lookup)
#for i,f in enumerate(triTable):
#f = np.array(f + [12] * (15-len(f))).reshape(5,3)
#faceTable[i] = edgeShifts[f]
IsosurfaceDataCache = (faceShiftTables, edgeShifts, edgeTable, nTableFaces)
else:
faceShiftTables, edgeShifts, edgeTable, nTableFaces = IsosurfaceDataCache
# We use strides below, which means we need contiguous array input.
# Ideally we can fix this just by removing the dependency on strides.
if not data.flags['C_CONTIGUOUS']:
raise TypeError("isosurface input data must be c-contiguous.")
## mark everything below the isosurface level
mask = data < level
### make eight sub-fields and compute indexes for grid cells
index = np.zeros([x-1 for x in data.shape], dtype=np.ubyte)
fields = np.empty((2,2,2), dtype=object)
slices = [slice(0,-1), slice(1,None)]
for i in [0,1]:
for j in [0,1]:
for k in [0,1]:
fields[i,j,k] = mask[slices[i], slices[j], slices[k]]
vertIndex = i - 2*j*i + 3*j + 4*k ## this is just to match Bourk's vertex numbering scheme
np.add(index, fields[i,j,k] * 2**vertIndex, out=index, casting='unsafe')
### Generate table of edges that have been cut
cutEdges = np.zeros([x+1 for x in index.shape]+[3], dtype=np.uint32)
edges = edgeTable[index]
for i, shift in enumerate(edgeShifts[:12]):
slices = [slice(shift[j],cutEdges.shape[j]+(shift[j]-1)) for j in range(3)]
cutEdges[slices[0], slices[1], slices[2], shift[3]] += edges & 2**i
## for each cut edge, interpolate to see where exactly the edge is cut and generate vertex positions
m = cutEdges > 0
vertexInds = np.argwhere(m) ## argwhere is slow!
vertexes = vertexInds[:,:3].astype(np.float32)
dataFlat = data.reshape(data.shape[0]*data.shape[1]*data.shape[2])
## re-use the cutEdges array as a lookup table for vertex IDs
cutEdges[vertexInds[:,0], vertexInds[:,1], vertexInds[:,2], vertexInds[:,3]] = np.arange(vertexInds.shape[0])
for i in [0,1,2]:
vim = vertexInds[:,3] == i
vi = vertexInds[vim, :3]
viFlat = (vi * (np.array(data.strides[:3]) // data.itemsize)[np.newaxis,:]).sum(axis=1)
v1 = dataFlat[viFlat]
v2 = dataFlat[viFlat + data.strides[i]//data.itemsize]
vertexes[vim,i] += (level-v1) / (v2-v1)
### compute the set of vertex indexes for each face.
## This works, but runs a bit slower.
#cells = np.argwhere((index != 0) & (index != 255)) ## all cells with at least one face
#cellInds = index[cells[:,0], cells[:,1], cells[:,2]]
#verts = faceTable[cellInds]
#mask = verts[...,0,0] != 9
#verts[...,:3] += cells[:,np.newaxis,np.newaxis,:] ## we now have indexes into cutEdges
#verts = verts[mask]
#faces = cutEdges[verts[...,0], verts[...,1], verts[...,2], verts[...,3]] ## and these are the vertex indexes we want.
## To allow this to be vectorized efficiently, we count the number of faces in each
## grid cell and handle each group of cells with the same number together.
## determine how many faces to assign to each grid cell
nFaces = nTableFaces[index]
totFaces = nFaces.sum()
faces = np.empty((totFaces, 3), dtype=np.uint32)
ptr = 0
#import debug
#p = debug.Profiler()
## this helps speed up an indexing operation later on
cs = np.array(cutEdges.strides)//cutEdges.itemsize
cutEdges = cutEdges.flatten()
## this, strangely, does not seem to help.
#ins = np.array(index.strides)/index.itemsize
#index = index.flatten()
for i in range(1,6):
### expensive:
#profiler()
cells = np.argwhere(nFaces == i) ## all cells which require i faces (argwhere is expensive)
#profiler()
if cells.shape[0] == 0:
continue
cellInds = index[cells[:,0], cells[:,1], cells[:,2]] ## index values of cells to process for this round
#profiler()
### expensive:
verts = faceShiftTables[i][cellInds]
#profiler()
np.add(verts[...,:3], cells[:,np.newaxis,np.newaxis,:], out=verts[...,:3], casting='unsafe') ## we now have indexes into cutEdges
verts = verts.reshape((verts.shape[0]*i,)+verts.shape[2:])
#profiler()
### expensive:
verts = (verts * cs[np.newaxis, np.newaxis, :]).sum(axis=2)
vertInds = cutEdges[verts]
#profiler()
nv = vertInds.shape[0]
#profiler()
faces[ptr:ptr+nv] = vertInds #.reshape((nv, 3))
#profiler()
ptr += nv
return vertexes, faces
def invertQTransform(tr):
"""Return a QTransform that is the inverse of *tr*.
Rasises an exception if tr is not invertible.
Note that this function is preferred over QTransform.inverted() due to
bugs in that method. (specifically, Qt has floating-point precision issues
when determining whether a matrix is invertible)
"""
try:
import numpy.linalg
arr = np.array([[tr.m11(), tr.m12(), tr.m13()], [tr.m21(), tr.m22(), tr.m23()], [tr.m31(), tr.m32(), tr.m33()]])
inv = numpy.linalg.inv(arr)
return QtGui.QTransform(inv[0,0], inv[0,1], inv[0,2], inv[1,0], inv[1,1], inv[1,2], inv[2,0], inv[2,1])
except ImportError:
inv = tr.inverted()
if inv[1] is False:
raise Exception("Transform is not invertible.")
return inv[0]
def pseudoScatter(data, spacing=None, shuffle=True, bidir=False):
"""
Used for examining the distribution of values in a set. Produces scattering as in beeswarm or column scatter plots.
Given a list of x-values, construct a set of y-values such that an x,y scatter-plot
will not have overlapping points (it will look similar to a histogram).
"""
inds = np.arange(len(data))
if shuffle:
np.random.shuffle(inds)
data = data[inds]
if spacing is None:
spacing = 2.*np.std(data)/len(data)**0.5
s2 = spacing**2
yvals = np.empty(len(data))
if len(data) == 0:
return yvals
yvals[0] = 0
for i in range(1,len(data)):
x = data[i] # current x value to be placed
x0 = data[:i] # all x values already placed
y0 = yvals[:i] # all y values already placed
y = 0
dx = (x0-x)**2 # x-distance to each previous point
xmask = dx < s2 # exclude anything too far away
if xmask.sum() > 0:
if bidir:
dirs = [-1, 1]
else:
dirs = [1]
yopts = []
for direction in dirs:
y = 0
dx2 = dx[xmask]
dy = (s2 - dx2)**0.5
limits = np.empty((2,len(dy))) # ranges of y-values to exclude
limits[0] = y0[xmask] - dy
limits[1] = y0[xmask] + dy
while True:
# ignore anything below this y-value
if direction > 0:
mask = limits[1] >= y
else:
mask = limits[0] <= y
limits2 = limits[:,mask]
# are we inside an excluded region?
mask = (limits2[0] < y) & (limits2[1] > y)
if mask.sum() == 0:
break
if direction > 0:
y = limits2[:,mask].max()
else:
y = limits2[:,mask].min()
yopts.append(y)
if bidir:
y = yopts[0] if -yopts[0] < yopts[1] else yopts[1]
else:
y = yopts[0]
yvals[i] = y
return yvals[np.argsort(inds)] ## un-shuffle values before returning
def toposort(deps, nodes=None, seen=None, stack=None, depth=0):
"""Topological sort. Arguments are:
deps dictionary describing dependencies where a:[b,c] means "a depends on b and c"
nodes optional, specifies list of starting nodes (these should be the nodes
which are not depended on by any other nodes). Other candidate starting
nodes will be ignored.
Example::
# Sort the following graph:
#
# B ──┬─────> C <── D
# │ │
# E <─┴─> A <─┘
#
deps = {'a': ['b', 'c'], 'c': ['b', 'd'], 'e': ['b']}
toposort(deps)
=> ['b', 'd', 'c', 'a', 'e']
"""
# fill in empty dep lists
deps = deps.copy()
for k,v in list(deps.items()):
for k in v:
if k not in deps:
deps[k] = []
if nodes is None:
## run through deps to find nodes that are not depended upon
rem = set()
for dep in deps.values():
rem |= set(dep)
nodes = set(deps.keys()) - rem
if seen is None:
seen = set()
stack = []
sorted = []
for n in nodes:
if n in stack:
raise Exception("Cyclic dependency detected", stack + [n])
if n in seen:
continue
seen.add(n)
sorted.extend( toposort(deps, deps[n], seen, stack+[n], depth=depth+1))
sorted.append(n)
return sorted
def disconnect(signal, slot):
"""Disconnect a Qt signal from a slot.
This method augments Qt's Signal.disconnect():
* Return bool indicating whether disconnection was successful, rather than
raising an exception
* Attempt to disconnect prior versions of the slot when using pg.reload
"""
while True:
try:
signal.disconnect(slot)
return True
except (TypeError, RuntimeError):
slot = reload.getPreviousVersion(slot)
if slot is None:
return False
class SignalBlock(object):
"""Class used to temporarily block a Qt signal connection::
with SignalBlock(signal, slot):
# do something that emits a signal; it will
# not be delivered to slot
"""
def __init__(self, signal, slot):
self.signal = signal
self.slot = slot
def __enter__(self):
self.reconnect = disconnect(self.signal, self.slot)
return self
def __exit__(self, *args):
if self.reconnect:
self.signal.connect(self.slot)
| pbmanis/acq4 | acq4/pyqtgraph/functions.py | Python | mit | 94,557 | [
"Gaussian"
] | ab9e06c8727399c2c725f21e489f0368bf6ce3c356b74f57d05789cf944074fb |
# -*- coding: utf-8 -*-
import sys
sys.path[0:0] = [""]
import bson
import os
import pickle
import unittest
import uuid
from datetime import datetime
from bson import DBRef
from tests.fixtures import (PickleEmbedded, PickleTest, PickleSignalsTest,
PickleDyanmicEmbedded, PickleDynamicTest)
from mongoengine import *
from mongoengine.errors import (NotRegistered, InvalidDocumentError,
InvalidQueryError)
from mongoengine.queryset import NULLIFY, Q
from mongoengine.connection import get_db
from mongoengine.base import get_document
from mongoengine.context_managers import switch_db, query_counter
from mongoengine import signals
TEST_IMAGE_PATH = os.path.join(os.path.dirname(__file__),
'../fields/mongoengine.png')
__all__ = ("InstanceTest",)
class InstanceTest(unittest.TestCase):
def setUp(self):
connect(db='mongoenginetest')
self.db = get_db()
class Person(Document):
name = StringField()
age = IntField()
non_field = True
meta = {"allow_inheritance": True}
self.Person = Person
def tearDown(self):
for collection in self.db.collection_names():
if 'system.' in collection:
continue
self.db.drop_collection(collection)
def test_capped_collection(self):
"""Ensure that capped collections work properly.
"""
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_documents': 10,
'max_size': 90000,
}
Log.drop_collection()
# Ensure that the collection handles up to its maximum
for _ in range(10):
Log().save()
self.assertEqual(Log.objects.count(), 10)
# Check that extra documents don't increase the size
Log().save()
self.assertEqual(Log.objects.count(), 10)
options = Log.objects._collection.options()
self.assertEqual(options['capped'], True)
self.assertEqual(options['max'], 10)
self.assertEqual(options['size'], 90000)
# Check that the document cannot be redefined with different options
def recreate_log_document():
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_documents': 11,
}
# Create the collection by accessing Document.objects
Log.objects
self.assertRaises(InvalidCollectionError, recreate_log_document)
Log.drop_collection()
def test_repr(self):
"""Ensure that unicode representation works
"""
class Article(Document):
title = StringField()
def __unicode__(self):
return self.title
doc = Article(title=u'привет мир')
self.assertEqual('<Article: привет мир>', repr(doc))
def test_queryset_resurrects_dropped_collection(self):
self.Person.drop_collection()
self.assertEqual([], list(self.Person.objects()))
class Actor(self.Person):
pass
# Ensure works correctly with inhertited classes
Actor.objects()
self.Person.drop_collection()
self.assertEqual([], list(Actor.objects()))
def test_polymorphic_references(self):
"""Ensure that the correct subclasses are returned from a query when
using references / generic references
"""
class Animal(Document):
meta = {'allow_inheritance': True}
class Fish(Animal): pass
class Mammal(Animal): pass
class Dog(Mammal): pass
class Human(Mammal): pass
class Zoo(Document):
animals = ListField(ReferenceField(Animal))
Zoo.drop_collection()
Animal.drop_collection()
Animal().save()
Fish().save()
Mammal().save()
Dog().save()
Human().save()
# Save a reference to each animal
zoo = Zoo(animals=Animal.objects)
zoo.save()
zoo.reload()
classes = [a.__class__ for a in Zoo.objects.first().animals]
self.assertEqual(classes, [Animal, Fish, Mammal, Dog, Human])
Zoo.drop_collection()
class Zoo(Document):
animals = ListField(GenericReferenceField(Animal))
# Save a reference to each animal
zoo = Zoo(animals=Animal.objects)
zoo.save()
zoo.reload()
classes = [a.__class__ for a in Zoo.objects.first().animals]
self.assertEqual(classes, [Animal, Fish, Mammal, Dog, Human])
Zoo.drop_collection()
Animal.drop_collection()
def test_reference_inheritance(self):
class Stats(Document):
created = DateTimeField(default=datetime.now)
meta = {'allow_inheritance': False}
class CompareStats(Document):
generated = DateTimeField(default=datetime.now)
stats = ListField(ReferenceField(Stats))
Stats.drop_collection()
CompareStats.drop_collection()
list_stats = []
for i in xrange(10):
s = Stats()
s.save()
list_stats.append(s)
cmp_stats = CompareStats(stats=list_stats)
cmp_stats.save()
self.assertEqual(list_stats, CompareStats.objects.first().stats)
def test_db_field_load(self):
"""Ensure we load data correctly
"""
class Person(Document):
name = StringField(required=True)
_rank = StringField(required=False, db_field="rank")
@property
def rank(self):
return self._rank or "Private"
Person.drop_collection()
Person(name="Jack", _rank="Corporal").save()
Person(name="Fred").save()
self.assertEqual(Person.objects.get(name="Jack").rank, "Corporal")
self.assertEqual(Person.objects.get(name="Fred").rank, "Private")
def test_db_embedded_doc_field_load(self):
"""Ensure we load embedded document data correctly
"""
class Rank(EmbeddedDocument):
title = StringField(required=True)
class Person(Document):
name = StringField(required=True)
rank_ = EmbeddedDocumentField(Rank,
required=False,
db_field='rank')
@property
def rank(self):
if self.rank_ is None:
return "Private"
return self.rank_.title
Person.drop_collection()
Person(name="Jack", rank_=Rank(title="Corporal")).save()
Person(name="Fred").save()
self.assertEqual(Person.objects.get(name="Jack").rank, "Corporal")
self.assertEqual(Person.objects.get(name="Fred").rank, "Private")
def test_custom_id_field(self):
"""Ensure that documents may be created with custom primary keys.
"""
class User(Document):
username = StringField(primary_key=True)
name = StringField()
meta = {'allow_inheritance': True}
User.drop_collection()
self.assertEqual(User._fields['username'].db_field, '_id')
self.assertEqual(User._meta['id_field'], 'username')
def create_invalid_user():
User(name='test').save() # no primary key field
self.assertRaises(ValidationError, create_invalid_user)
def define_invalid_user():
class EmailUser(User):
email = StringField(primary_key=True)
self.assertRaises(ValueError, define_invalid_user)
class EmailUser(User):
email = StringField()
user = User(username='test', name='test user')
user.save()
user_obj = User.objects.first()
self.assertEqual(user_obj.id, 'test')
self.assertEqual(user_obj.pk, 'test')
user_son = User.objects._collection.find_one()
self.assertEqual(user_son['_id'], 'test')
self.assertTrue('username' not in user_son['_id'])
User.drop_collection()
user = User(pk='mongo', name='mongo user')
user.save()
user_obj = User.objects.first()
self.assertEqual(user_obj.id, 'mongo')
self.assertEqual(user_obj.pk, 'mongo')
user_son = User.objects._collection.find_one()
self.assertEqual(user_son['_id'], 'mongo')
self.assertTrue('username' not in user_son['_id'])
User.drop_collection()
def test_document_not_registered(self):
class Place(Document):
name = StringField()
meta = {'allow_inheritance': True}
class NicePlace(Place):
pass
Place.drop_collection()
Place(name="London").save()
NicePlace(name="Buckingham Palace").save()
# Mimic Place and NicePlace definitions being in a different file
# and the NicePlace model not being imported in at query time.
from mongoengine.base import _document_registry
del(_document_registry['Place.NicePlace'])
def query_without_importing_nice_place():
print Place.objects.all()
self.assertRaises(NotRegistered, query_without_importing_nice_place)
def test_document_registry_regressions(self):
class Location(Document):
name = StringField()
meta = {'allow_inheritance': True}
class Area(Location):
location = ReferenceField('Location', dbref=True)
Location.drop_collection()
self.assertEqual(Area, get_document("Area"))
self.assertEqual(Area, get_document("Location.Area"))
def test_creation(self):
"""Ensure that document may be created using keyword arguments.
"""
person = self.Person(name="Test User", age=30)
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 30)
def test_to_dbref(self):
"""Ensure that you can get a dbref of a document"""
person = self.Person(name="Test User", age=30)
self.assertRaises(OperationError, person.to_dbref)
person.save()
person.to_dbref()
def test_reload(self):
"""Ensure that attributes may be reloaded.
"""
person = self.Person(name="Test User", age=20)
person.save()
person_obj = self.Person.objects.first()
person_obj.name = "Mr Test User"
person_obj.age = 21
person_obj.save()
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 20)
person.reload()
self.assertEqual(person.name, "Mr Test User")
self.assertEqual(person.age, 21)
def test_reload_sharded(self):
class Animal(Document):
superphylum = StringField()
meta = {'shard_key': ('superphylum',)}
Animal.drop_collection()
doc = Animal(superphylum='Deuterostomia')
doc.save()
doc.reload()
Animal.drop_collection()
def test_reload_referencing(self):
"""Ensures reloading updates weakrefs correctly
"""
class Embedded(EmbeddedDocument):
dict_field = DictField()
list_field = ListField()
class Doc(Document):
dict_field = DictField()
list_field = ListField()
embedded_field = EmbeddedDocumentField(Embedded)
Doc.drop_collection()
doc = Doc()
doc.dict_field = {'hello': 'world'}
doc.list_field = ['1', 2, {'hello': 'world'}]
embedded_1 = Embedded()
embedded_1.dict_field = {'hello': 'world'}
embedded_1.list_field = ['1', 2, {'hello': 'world'}]
doc.embedded_field = embedded_1
doc.save()
doc = doc.reload(10)
doc.list_field.append(1)
doc.dict_field['woot'] = "woot"
doc.embedded_field.list_field.append(1)
doc.embedded_field.dict_field['woot'] = "woot"
self.assertEqual(doc._get_changed_fields(), [
'list_field', 'dict_field', 'embedded_field.list_field',
'embedded_field.dict_field'])
doc.save()
doc = doc.reload(10)
self.assertEqual(doc._get_changed_fields(), [])
self.assertEqual(len(doc.list_field), 4)
self.assertEqual(len(doc.dict_field), 2)
self.assertEqual(len(doc.embedded_field.list_field), 4)
self.assertEqual(len(doc.embedded_field.dict_field), 2)
def test_dictionary_access(self):
"""Ensure that dictionary-style field access works properly.
"""
person = self.Person(name='Test User', age=30)
self.assertEqual(person['name'], 'Test User')
self.assertRaises(KeyError, person.__getitem__, 'salary')
self.assertRaises(KeyError, person.__setitem__, 'salary', 50)
person['name'] = 'Another User'
self.assertEqual(person['name'], 'Another User')
# Length = length(assigned fields + id)
self.assertEqual(len(person), 3)
self.assertTrue('age' in person)
person.age = None
self.assertFalse('age' in person)
self.assertFalse('nationality' in person)
def test_embedded_document_to_mongo(self):
class Person(EmbeddedDocument):
name = StringField()
age = IntField()
meta = {"allow_inheritance": True}
class Employee(Person):
salary = IntField()
self.assertEqual(Person(name="Bob", age=35).to_mongo().keys(),
['_cls', 'name', 'age'])
self.assertEqual(Employee(name="Bob", age=35, salary=0).to_mongo().keys(),
['_cls', 'name', 'age', 'salary'])
def test_embedded_document_to_mongo_id(self):
class SubDoc(EmbeddedDocument):
id = StringField(required=True)
sub_doc = SubDoc(id="abc")
self.assertEqual(sub_doc.to_mongo().keys(), ['id'])
def test_embedded_document(self):
"""Ensure that embedded documents are set up correctly.
"""
class Comment(EmbeddedDocument):
content = StringField()
self.assertTrue('content' in Comment._fields)
self.assertFalse('id' in Comment._fields)
def test_embedded_document_instance(self):
"""Ensure that embedded documents can reference parent instance
"""
class Embedded(EmbeddedDocument):
string = StringField()
class Doc(Document):
embedded_field = EmbeddedDocumentField(Embedded)
Doc.drop_collection()
Doc(embedded_field=Embedded(string="Hi")).save()
doc = Doc.objects.get()
self.assertEqual(doc, doc.embedded_field._instance)
def test_embedded_document_complex_instance(self):
"""Ensure that embedded documents in complex fields can reference
parent instance"""
class Embedded(EmbeddedDocument):
string = StringField()
class Doc(Document):
embedded_field = ListField(EmbeddedDocumentField(Embedded))
Doc.drop_collection()
Doc(embedded_field=[Embedded(string="Hi")]).save()
doc = Doc.objects.get()
self.assertEqual(doc, doc.embedded_field[0]._instance)
def test_document_clean(self):
class TestDocument(Document):
status = StringField()
pub_date = DateTimeField()
def clean(self):
if self.status == 'draft' and self.pub_date is not None:
msg = 'Draft entries may not have a publication date.'
raise ValidationError(msg)
# Set the pub_date for published items if not set.
if self.status == 'published' and self.pub_date is None:
self.pub_date = datetime.now()
TestDocument.drop_collection()
t = TestDocument(status="draft", pub_date=datetime.now())
try:
t.save()
except ValidationError, e:
expect_msg = "Draft entries may not have a publication date."
self.assertTrue(expect_msg in e.message)
self.assertEqual(e.to_dict(), {'__all__': expect_msg})
t = TestDocument(status="published")
t.save(clean=False)
self.assertEqual(t.pub_date, None)
t = TestDocument(status="published")
t.save(clean=True)
self.assertEqual(type(t.pub_date), datetime)
def test_document_embedded_clean(self):
class TestEmbeddedDocument(EmbeddedDocument):
x = IntField(required=True)
y = IntField(required=True)
z = IntField(required=True)
meta = {'allow_inheritance': False}
def clean(self):
if self.z:
if self.z != self.x + self.y:
raise ValidationError('Value of z != x + y')
else:
self.z = self.x + self.y
class TestDocument(Document):
doc = EmbeddedDocumentField(TestEmbeddedDocument)
status = StringField()
TestDocument.drop_collection()
t = TestDocument(doc=TestEmbeddedDocument(x=10, y=25, z=15))
try:
t.save()
except ValidationError, e:
expect_msg = "Value of z != x + y"
self.assertTrue(expect_msg in e.message)
self.assertEqual(e.to_dict(), {'doc': {'__all__': expect_msg}})
t = TestDocument(doc=TestEmbeddedDocument(x=10, y=25)).save()
self.assertEqual(t.doc.z, 35)
# Asserts not raises
t = TestDocument(doc=TestEmbeddedDocument(x=15, y=35, z=5))
t.save(clean=False)
def test_save(self):
"""Ensure that a document may be saved in the database.
"""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30)
person.save()
# Ensure that the object is in the database
collection = self.db[self.Person._get_collection_name()]
person_obj = collection.find_one({'name': 'Test User'})
self.assertEqual(person_obj['name'], 'Test User')
self.assertEqual(person_obj['age'], 30)
self.assertEqual(person_obj['_id'], person.id)
# Test skipping validation on save
class Recipient(Document):
email = EmailField(required=True)
recipient = Recipient(email='root@localhost')
self.assertRaises(ValidationError, recipient.save)
try:
recipient.save(validate=False)
except ValidationError:
self.fail()
def test_save_to_a_value_that_equates_to_false(self):
class Thing(EmbeddedDocument):
count = IntField()
class User(Document):
thing = EmbeddedDocumentField(Thing)
User.drop_collection()
user = User(thing=Thing(count=1))
user.save()
user.reload()
user.thing.count = 0
user.save()
user.reload()
self.assertEqual(user.thing.count, 0)
def test_save_max_recursion_not_hit(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
friend = ReferenceField('self')
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p1.friend = p2
p1.save()
# Confirm can save and it resets the changed fields without hitting
# max recursion error
p0 = Person.objects.first()
p0.name = 'wpjunior'
p0.save()
def test_save_max_recursion_not_hit_with_file_field(self):
class Foo(Document):
name = StringField()
picture = FileField()
bar = ReferenceField('self')
Foo.drop_collection()
a = Foo(name='hello').save()
a.bar = a
with open(TEST_IMAGE_PATH, 'rb') as test_image:
a.picture = test_image
a.save()
# Confirm can save and it resets the changed fields without hitting
# max recursion error
b = Foo.objects.with_id(a.id)
b.name = 'world'
b.save()
self.assertEqual(b.picture, b.bar.picture, b.bar.bar.picture)
def test_save_cascades(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save(cascade=True)
p1.reload()
self.assertEqual(p1.name, p.parent.name)
def test_save_cascade_kwargs(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p1.name = "Daddy Wilson"
p2.save(force_insert=True, cascade_kwargs={"force_insert": False})
p1.reload()
p2.reload()
self.assertEqual(p1.name, p2.parent.name)
def test_save_cascade_meta_false(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
meta = {'cascade': False}
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save()
p1.reload()
self.assertNotEqual(p1.name, p.parent.name)
p.save(cascade=True)
p1.reload()
self.assertEqual(p1.name, p.parent.name)
def test_save_cascade_meta_true(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
meta = {'cascade': False}
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save(cascade=True)
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save()
p1.reload()
self.assertNotEqual(p1.name, p.parent.name)
def test_save_cascades_generically(self):
class Person(Document):
name = StringField()
parent = GenericReferenceField()
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save()
p1.reload()
self.assertNotEqual(p1.name, p.parent.name)
p.save(cascade=True)
p1.reload()
self.assertEqual(p1.name, p.parent.name)
def test_update(self):
"""Ensure that an existing document is updated instead of be
overwritten."""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30)
person.save()
# Create same person object, with same id, without age
same_person = self.Person(name='Test')
same_person.id = person.id
same_person.save()
# Confirm only one object
self.assertEqual(self.Person.objects.count(), 1)
# reload
person.reload()
same_person.reload()
# Confirm the same
self.assertEqual(person, same_person)
self.assertEqual(person.name, same_person.name)
self.assertEqual(person.age, same_person.age)
# Confirm the saved values
self.assertEqual(person.name, 'Test')
self.assertEqual(person.age, 30)
# Test only / exclude only updates included fields
person = self.Person.objects.only('name').get()
person.name = 'User'
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 30)
# test exclude only updates set fields
person = self.Person.objects.exclude('name').get()
person.age = 21
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 21)
# Test only / exclude can set non excluded / included fields
person = self.Person.objects.only('name').get()
person.name = 'Test'
person.age = 30
person.save()
person.reload()
self.assertEqual(person.name, 'Test')
self.assertEqual(person.age, 30)
# test exclude only updates set fields
person = self.Person.objects.exclude('name').get()
person.name = 'User'
person.age = 21
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 21)
# Confirm does remove unrequired fields
person = self.Person.objects.exclude('name').get()
person.age = None
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, None)
person = self.Person.objects.get()
person.name = None
person.age = None
person.save()
person.reload()
self.assertEqual(person.name, None)
self.assertEqual(person.age, None)
def test_inserts_if_you_set_the_pk(self):
p1 = self.Person(name='p1', id=bson.ObjectId()).save()
p2 = self.Person(name='p2')
p2.id = bson.ObjectId()
p2.save()
self.assertEqual(2, self.Person.objects.count())
def test_can_save_if_not_included(self):
class EmbeddedDoc(EmbeddedDocument):
pass
class Simple(Document):
pass
class Doc(Document):
string_field = StringField(default='1')
int_field = IntField(default=1)
float_field = FloatField(default=1.1)
boolean_field = BooleanField(default=True)
datetime_field = DateTimeField(default=datetime.now)
embedded_document_field = EmbeddedDocumentField(
EmbeddedDoc, default=lambda: EmbeddedDoc())
list_field = ListField(default=lambda: [1, 2, 3])
dict_field = DictField(default=lambda: {"hello": "world"})
objectid_field = ObjectIdField(default=bson.ObjectId)
reference_field = ReferenceField(Simple, default=lambda:
Simple().save())
map_field = MapField(IntField(), default=lambda: {"simple": 1})
decimal_field = DecimalField(default=1.0)
complex_datetime_field = ComplexDateTimeField(default=datetime.now)
url_field = URLField(default="http://mongoengine.org")
dynamic_field = DynamicField(default=1)
generic_reference_field = GenericReferenceField(
default=lambda: Simple().save())
sorted_list_field = SortedListField(IntField(),
default=lambda: [1, 2, 3])
email_field = EmailField(default="ross@example.com")
geo_point_field = GeoPointField(default=lambda: [1, 2])
sequence_field = SequenceField()
uuid_field = UUIDField(default=uuid.uuid4)
generic_embedded_document_field = GenericEmbeddedDocumentField(
default=lambda: EmbeddedDoc())
Simple.drop_collection()
Doc.drop_collection()
Doc().save()
my_doc = Doc.objects.only("string_field").first()
my_doc.string_field = "string"
my_doc.save()
my_doc = Doc.objects.get(string_field="string")
self.assertEqual(my_doc.string_field, "string")
self.assertEqual(my_doc.int_field, 1)
def test_document_update(self):
def update_not_saved_raises():
person = self.Person(name='dcrosta')
person.update(set__name='Dan Crosta')
self.assertRaises(OperationError, update_not_saved_raises)
author = self.Person(name='dcrosta')
author.save()
author.update(set__name='Dan Crosta')
author.reload()
p1 = self.Person.objects.first()
self.assertEqual(p1.name, author.name)
def update_no_value_raises():
person = self.Person.objects.first()
person.update()
self.assertRaises(OperationError, update_no_value_raises)
def update_no_op_raises():
person = self.Person.objects.first()
person.update(name="Dan")
self.assertRaises(InvalidQueryError, update_no_op_raises)
def test_embedded_update(self):
"""
Test update on `EmbeddedDocumentField` fields
"""
class Page(EmbeddedDocument):
log_message = StringField(verbose_name="Log message",
required=True)
class Site(Document):
page = EmbeddedDocumentField(Page)
Site.drop_collection()
site = Site(page=Page(log_message="Warning: Dummy message"))
site.save()
# Update
site = Site.objects.first()
site.page.log_message = "Error: Dummy message"
site.save()
site = Site.objects.first()
self.assertEqual(site.page.log_message, "Error: Dummy message")
def test_embedded_update_db_field(self):
"""
Test update on `EmbeddedDocumentField` fields when db_field is other
than default.
"""
class Page(EmbeddedDocument):
log_message = StringField(verbose_name="Log message",
db_field="page_log_message",
required=True)
class Site(Document):
page = EmbeddedDocumentField(Page)
Site.drop_collection()
site = Site(page=Page(log_message="Warning: Dummy message"))
site.save()
# Update
site = Site.objects.first()
site.page.log_message = "Error: Dummy message"
site.save()
site = Site.objects.first()
self.assertEqual(site.page.log_message, "Error: Dummy message")
def test_save_only_changed_fields(self):
"""Ensure save only sets / unsets changed fields
"""
class User(self.Person):
active = BooleanField(default=True)
User.drop_collection()
# Create person object and save it to the database
user = User(name='Test User', age=30, active=True)
user.save()
user.reload()
# Simulated Race condition
same_person = self.Person.objects.get()
same_person.active = False
user.age = 21
user.save()
same_person.name = 'User'
same_person.save()
person = self.Person.objects.get()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 21)
self.assertEqual(person.active, False)
def test_query_count_when_saving(self):
"""Ensure references don't cause extra fetches when saving"""
class Organization(Document):
name = StringField()
class User(Document):
name = StringField()
orgs = ListField(ReferenceField('Organization'))
class Feed(Document):
name = StringField()
class UserSubscription(Document):
name = StringField()
user = ReferenceField(User)
feed = ReferenceField(Feed)
Organization.drop_collection()
User.drop_collection()
Feed.drop_collection()
UserSubscription.drop_collection()
o1 = Organization(name="o1").save()
o2 = Organization(name="o2").save()
u1 = User(name="Ross", orgs=[o1, o2]).save()
f1 = Feed(name="MongoEngine").save()
sub = UserSubscription(user=u1, feed=f1).save()
user = User.objects.first()
# Even if stored as ObjectId's internally mongoengine uses DBRefs
# As ObjectId's aren't automatically derefenced
self.assertTrue(isinstance(user._data['orgs'][0], DBRef))
self.assertTrue(isinstance(user.orgs[0], Organization))
self.assertTrue(isinstance(user._data['orgs'][0], Organization))
# Changing a value
with query_counter() as q:
self.assertEqual(q, 0)
sub = UserSubscription.objects.first()
self.assertEqual(q, 1)
sub.name = "Test Sub"
sub.save()
self.assertEqual(q, 2)
# Changing a value that will cascade
with query_counter() as q:
self.assertEqual(q, 0)
sub = UserSubscription.objects.first()
self.assertEqual(q, 1)
sub.user.name = "Test"
self.assertEqual(q, 2)
sub.save(cascade=True)
self.assertEqual(q, 3)
# Changing a value and one that will cascade
with query_counter() as q:
self.assertEqual(q, 0)
sub = UserSubscription.objects.first()
sub.name = "Test Sub 2"
self.assertEqual(q, 1)
sub.user.name = "Test 2"
self.assertEqual(q, 2)
sub.save(cascade=True)
self.assertEqual(q, 4) # One for the UserSub and one for the User
# Saving with just the refs
with query_counter() as q:
self.assertEqual(q, 0)
sub = UserSubscription(user=u1.pk, feed=f1.pk)
self.assertEqual(q, 0)
sub.save()
self.assertEqual(q, 1)
# Saving with just the refs on a ListField
with query_counter() as q:
self.assertEqual(q, 0)
User(name="Bob", orgs=[o1.pk, o2.pk]).save()
self.assertEqual(q, 1)
# Saving new objects
with query_counter() as q:
self.assertEqual(q, 0)
user = User.objects.first()
self.assertEqual(q, 1)
feed = Feed.objects.first()
self.assertEqual(q, 2)
sub = UserSubscription(user=user, feed=feed)
self.assertEqual(q, 2) # Check no change
sub.save()
self.assertEqual(q, 3)
def test_set_unset_one_operation(self):
"""Ensure that $set and $unset actions are performed in the same
operation.
"""
class FooBar(Document):
foo = StringField(default=None)
bar = StringField(default=None)
FooBar.drop_collection()
# write an entity with a single prop
foo = FooBar(foo='foo').save()
self.assertEqual(foo.foo, 'foo')
del foo.foo
foo.bar = 'bar'
with query_counter() as q:
self.assertEqual(0, q)
foo.save()
self.assertEqual(1, q)
def test_save_only_changed_fields_recursive(self):
"""Ensure save only sets / unsets changed fields
"""
class Comment(EmbeddedDocument):
published = BooleanField(default=True)
class User(self.Person):
comments_dict = DictField()
comments = ListField(EmbeddedDocumentField(Comment))
active = BooleanField(default=True)
User.drop_collection()
# Create person object and save it to the database
person = User(name='Test User', age=30, active=True)
person.comments.append(Comment())
person.save()
person.reload()
person = self.Person.objects.get()
self.assertTrue(person.comments[0].published)
person.comments[0].published = False
person.save()
person = self.Person.objects.get()
self.assertFalse(person.comments[0].published)
# Simple dict w
person.comments_dict['first_post'] = Comment()
person.save()
person = self.Person.objects.get()
self.assertTrue(person.comments_dict['first_post'].published)
person.comments_dict['first_post'].published = False
person.save()
person = self.Person.objects.get()
self.assertFalse(person.comments_dict['first_post'].published)
def test_delete(self):
"""Ensure that document may be deleted using the delete method.
"""
person = self.Person(name="Test User", age=30)
person.save()
self.assertEqual(self.Person.objects.count(), 1)
person.delete()
self.assertEqual(self.Person.objects.count(), 0)
def test_save_custom_id(self):
"""Ensure that a document may be saved with a custom _id.
"""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30,
id='497ce96f395f2f052a494fd4')
person.save()
# Ensure that the object is in the database with the correct _id
collection = self.db[self.Person._get_collection_name()]
person_obj = collection.find_one({'name': 'Test User'})
self.assertEqual(str(person_obj['_id']), '497ce96f395f2f052a494fd4')
def test_save_custom_pk(self):
"""Ensure that a document may be saved with a custom _id using pk alias.
"""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30,
pk='497ce96f395f2f052a494fd4')
person.save()
# Ensure that the object is in the database with the correct _id
collection = self.db[self.Person._get_collection_name()]
person_obj = collection.find_one({'name': 'Test User'})
self.assertEqual(str(person_obj['_id']), '497ce96f395f2f052a494fd4')
def test_save_list(self):
"""Ensure that a list field may be properly saved.
"""
class Comment(EmbeddedDocument):
content = StringField()
class BlogPost(Document):
content = StringField()
comments = ListField(EmbeddedDocumentField(Comment))
tags = ListField(StringField())
BlogPost.drop_collection()
post = BlogPost(content='Went for a walk today...')
post.tags = tags = ['fun', 'leisure']
comments = [Comment(content='Good for you'), Comment(content='Yay.')]
post.comments = comments
post.save()
collection = self.db[BlogPost._get_collection_name()]
post_obj = collection.find_one()
self.assertEqual(post_obj['tags'], tags)
for comment_obj, comment in zip(post_obj['comments'], comments):
self.assertEqual(comment_obj['content'], comment['content'])
BlogPost.drop_collection()
def test_list_search_by_embedded(self):
class User(Document):
username = StringField(required=True)
meta = {'allow_inheritance': False}
class Comment(EmbeddedDocument):
comment = StringField()
user = ReferenceField(User,
required=True)
meta = {'allow_inheritance': False}
class Page(Document):
comments = ListField(EmbeddedDocumentField(Comment))
meta = {'allow_inheritance': False,
'indexes': [
{'fields': ['comments.user']}
]}
User.drop_collection()
Page.drop_collection()
u1 = User(username="wilson")
u1.save()
u2 = User(username="rozza")
u2.save()
u3 = User(username="hmarr")
u3.save()
p1 = Page(comments=[Comment(user=u1, comment="Its very good"),
Comment(user=u2, comment="Hello world"),
Comment(user=u3, comment="Ping Pong"),
Comment(user=u1, comment="I like a beer")])
p1.save()
p2 = Page(comments=[Comment(user=u1, comment="Its very good"),
Comment(user=u2, comment="Hello world")])
p2.save()
p3 = Page(comments=[Comment(user=u3, comment="Its very good")])
p3.save()
p4 = Page(comments=[Comment(user=u2, comment="Heavy Metal song")])
p4.save()
self.assertEqual([p1, p2], list(Page.objects.filter(comments__user=u1)))
self.assertEqual([p1, p2, p4], list(Page.objects.filter(comments__user=u2)))
self.assertEqual([p1, p3], list(Page.objects.filter(comments__user=u3)))
def test_save_embedded_document(self):
"""Ensure that a document with an embedded document field may be
saved in the database.
"""
class EmployeeDetails(EmbeddedDocument):
position = StringField()
class Employee(self.Person):
salary = IntField()
details = EmbeddedDocumentField(EmployeeDetails)
# Create employee object and save it to the database
employee = Employee(name='Test Employee', age=50, salary=20000)
employee.details = EmployeeDetails(position='Developer')
employee.save()
# Ensure that the object is in the database
collection = self.db[self.Person._get_collection_name()]
employee_obj = collection.find_one({'name': 'Test Employee'})
self.assertEqual(employee_obj['name'], 'Test Employee')
self.assertEqual(employee_obj['age'], 50)
# Ensure that the 'details' embedded object saved correctly
self.assertEqual(employee_obj['details']['position'], 'Developer')
def test_embedded_update_after_save(self):
"""
Test update of `EmbeddedDocumentField` attached to a newly saved
document.
"""
class Page(EmbeddedDocument):
log_message = StringField(verbose_name="Log message",
required=True)
class Site(Document):
page = EmbeddedDocumentField(Page)
Site.drop_collection()
site = Site(page=Page(log_message="Warning: Dummy message"))
site.save()
# Update
site.page.log_message = "Error: Dummy message"
site.save()
site = Site.objects.first()
self.assertEqual(site.page.log_message, "Error: Dummy message")
def test_updating_an_embedded_document(self):
"""Ensure that a document with an embedded document field may be
saved in the database.
"""
class EmployeeDetails(EmbeddedDocument):
position = StringField()
class Employee(self.Person):
salary = IntField()
details = EmbeddedDocumentField(EmployeeDetails)
# Create employee object and save it to the database
employee = Employee(name='Test Employee', age=50, salary=20000)
employee.details = EmployeeDetails(position='Developer')
employee.save()
# Test updating an embedded document
promoted_employee = Employee.objects.get(name='Test Employee')
promoted_employee.details.position = 'Senior Developer'
promoted_employee.save()
promoted_employee.reload()
self.assertEqual(promoted_employee.name, 'Test Employee')
self.assertEqual(promoted_employee.age, 50)
# Ensure that the 'details' embedded object saved correctly
self.assertEqual(promoted_employee.details.position, 'Senior Developer')
# Test removal
promoted_employee.details = None
promoted_employee.save()
promoted_employee.reload()
self.assertEqual(promoted_employee.details, None)
def test_object_mixins(self):
class NameMixin(object):
name = StringField()
class Foo(EmbeddedDocument, NameMixin):
quantity = IntField()
self.assertEqual(['name', 'quantity'], sorted(Foo._fields.keys()))
class Bar(Document, NameMixin):
widgets = StringField()
self.assertEqual(['id', 'name', 'widgets'], sorted(Bar._fields.keys()))
def test_mixin_inheritance(self):
class BaseMixIn(object):
count = IntField()
data = StringField()
class DoubleMixIn(BaseMixIn):
comment = StringField()
class TestDoc(Document, DoubleMixIn):
age = IntField()
TestDoc.drop_collection()
t = TestDoc(count=12, data="test",
comment="great!", age=19)
t.save()
t = TestDoc.objects.first()
self.assertEqual(t.age, 19)
self.assertEqual(t.comment, "great!")
self.assertEqual(t.data, "test")
self.assertEqual(t.count, 12)
def test_save_reference(self):
"""Ensure that a document reference field may be saved in the database.
"""
class BlogPost(Document):
meta = {'collection': 'blogpost_1'}
content = StringField()
author = ReferenceField(self.Person)
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
post = BlogPost(content='Watched some TV today... how exciting.')
# Should only reference author when saving
post.author = author
post.save()
post_obj = BlogPost.objects.first()
# Test laziness
self.assertTrue(isinstance(post_obj._data['author'],
bson.DBRef))
self.assertTrue(isinstance(post_obj.author, self.Person))
self.assertEqual(post_obj.author.name, 'Test User')
# Ensure that the dereferenced object may be changed and saved
post_obj.author.age = 25
post_obj.author.save()
author = list(self.Person.objects(name='Test User'))[-1]
self.assertEqual(author.age, 25)
BlogPost.drop_collection()
def test_duplicate_db_fields_raise_invalid_document_error(self):
"""Ensure a InvalidDocumentError is thrown if duplicate fields
declare the same db_field"""
def throw_invalid_document_error():
class Foo(Document):
name = StringField()
name2 = StringField(db_field='name')
self.assertRaises(InvalidDocumentError, throw_invalid_document_error)
def test_invalid_son(self):
"""Raise an error if loading invalid data"""
class Occurrence(EmbeddedDocument):
number = IntField()
class Word(Document):
stem = StringField()
count = IntField(default=1)
forms = ListField(StringField(), default=list)
occurs = ListField(EmbeddedDocumentField(Occurrence), default=list)
def raise_invalid_document():
Word._from_son({'stem': [1, 2, 3], 'forms': 1, 'count': 'one',
'occurs': {"hello": None}})
self.assertRaises(InvalidDocumentError, raise_invalid_document)
def test_reverse_delete_rule_cascade_and_nullify(self):
"""Ensure that a referenced document is also deleted upon deletion.
"""
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
reviewer = ReferenceField(self.Person, reverse_delete_rule=NULLIFY)
self.Person.drop_collection()
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
reviewer = self.Person(name='Re Viewer')
reviewer.save()
post = BlogPost(content='Watched some TV')
post.author = author
post.reviewer = reviewer
post.save()
reviewer.delete()
self.assertEqual(BlogPost.objects.count(), 1) # No effect on the BlogPost
self.assertEqual(BlogPost.objects.get().reviewer, None)
# Delete the Person, which should lead to deletion of the BlogPost, too
author.delete()
self.assertEqual(BlogPost.objects.count(), 0)
def test_reverse_delete_rule_with_document_inheritance(self):
"""Ensure that a referenced document is also deleted upon deletion
of a child document.
"""
class Writer(self.Person):
pass
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
reviewer = ReferenceField(self.Person, reverse_delete_rule=NULLIFY)
self.Person.drop_collection()
BlogPost.drop_collection()
author = Writer(name='Test User')
author.save()
reviewer = Writer(name='Re Viewer')
reviewer.save()
post = BlogPost(content='Watched some TV')
post.author = author
post.reviewer = reviewer
post.save()
reviewer.delete()
self.assertEqual(BlogPost.objects.count(), 1)
self.assertEqual(BlogPost.objects.get().reviewer, None)
# Delete the Writer should lead to deletion of the BlogPost
author.delete()
self.assertEqual(BlogPost.objects.count(), 0)
def test_reverse_delete_rule_cascade_and_nullify_complex_field(self):
"""Ensure that a referenced document is also deleted upon deletion for
complex fields.
"""
class BlogPost(Document):
content = StringField()
authors = ListField(ReferenceField(self.Person, reverse_delete_rule=CASCADE))
reviewers = ListField(ReferenceField(self.Person, reverse_delete_rule=NULLIFY))
self.Person.drop_collection()
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
reviewer = self.Person(name='Re Viewer')
reviewer.save()
post = BlogPost(content='Watched some TV')
post.authors = [author]
post.reviewers = [reviewer]
post.save()
# Deleting the reviewer should have no effect on the BlogPost
reviewer.delete()
self.assertEqual(BlogPost.objects.count(), 1)
self.assertEqual(BlogPost.objects.get().reviewers, [])
# Delete the Person, which should lead to deletion of the BlogPost, too
author.delete()
self.assertEqual(BlogPost.objects.count(), 0)
def test_reverse_delete_rule_cascade_triggers_pre_delete_signal(self):
''' ensure the pre_delete signal is triggered upon a cascading deletion
setup a blog post with content, an author and editor
delete the author which triggers deletion of blogpost via cascade
blog post's pre_delete signal alters an editor attribute
'''
class Editor(self.Person):
review_queue = IntField(default=0)
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
editor = ReferenceField(Editor)
@classmethod
def pre_delete(cls, sender, document, **kwargs):
# decrement the docs-to-review count
document.editor.update(dec__review_queue=1)
signals.pre_delete.connect(BlogPost.pre_delete, sender=BlogPost)
self.Person.drop_collection()
BlogPost.drop_collection()
Editor.drop_collection()
author = self.Person(name='Will S.').save()
editor = Editor(name='Max P.', review_queue=1).save()
BlogPost(content='wrote some books', author=author,
editor=editor).save()
# delete the author, the post is also deleted due to the CASCADE rule
author.delete()
# the pre-delete signal should have decremented the editor's queue
editor = Editor.objects(name='Max P.').get()
self.assertEqual(editor.review_queue, 0)
def test_two_way_reverse_delete_rule(self):
"""Ensure that Bi-Directional relationships work with
reverse_delete_rule
"""
class Bar(Document):
content = StringField()
foo = ReferenceField('Foo')
class Foo(Document):
content = StringField()
bar = ReferenceField(Bar)
Bar.register_delete_rule(Foo, 'bar', NULLIFY)
Foo.register_delete_rule(Bar, 'foo', NULLIFY)
Bar.drop_collection()
Foo.drop_collection()
b = Bar(content="Hello")
b.save()
f = Foo(content="world", bar=b)
f.save()
b.foo = f
b.save()
f.delete()
self.assertEqual(Bar.objects.count(), 1) # No effect on the BlogPost
self.assertEqual(Bar.objects.get().foo, None)
def test_invalid_reverse_delete_rules_raise_errors(self):
def throw_invalid_document_error():
class Blog(Document):
content = StringField()
authors = MapField(ReferenceField(self.Person, reverse_delete_rule=CASCADE))
reviewers = DictField(field=ReferenceField(self.Person, reverse_delete_rule=NULLIFY))
self.assertRaises(InvalidDocumentError, throw_invalid_document_error)
def throw_invalid_document_error_embedded():
class Parents(EmbeddedDocument):
father = ReferenceField('Person', reverse_delete_rule=DENY)
mother = ReferenceField('Person', reverse_delete_rule=DENY)
self.assertRaises(InvalidDocumentError, throw_invalid_document_error_embedded)
def test_reverse_delete_rule_cascade_recurs(self):
"""Ensure that a chain of documents is also deleted upon cascaded
deletion.
"""
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
class Comment(Document):
text = StringField()
post = ReferenceField(BlogPost, reverse_delete_rule=CASCADE)
self.Person.drop_collection()
BlogPost.drop_collection()
Comment.drop_collection()
author = self.Person(name='Test User')
author.save()
post = BlogPost(content = 'Watched some TV')
post.author = author
post.save()
comment = Comment(text = 'Kudos.')
comment.post = post
comment.save()
# Delete the Person, which should lead to deletion of the BlogPost, and,
# recursively to the Comment, too
author.delete()
self.assertEqual(Comment.objects.count(), 0)
self.Person.drop_collection()
BlogPost.drop_collection()
Comment.drop_collection()
def test_reverse_delete_rule_deny(self):
"""Ensure that a document cannot be referenced if there are still
documents referring to it.
"""
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=DENY)
self.Person.drop_collection()
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
post = BlogPost(content = 'Watched some TV')
post.author = author
post.save()
# Delete the Person should be denied
self.assertRaises(OperationError, author.delete) # Should raise denied error
self.assertEqual(BlogPost.objects.count(), 1) # No objects may have been deleted
self.assertEqual(self.Person.objects.count(), 1)
# Other users, that don't have BlogPosts must be removable, like normal
author = self.Person(name='Another User')
author.save()
self.assertEqual(self.Person.objects.count(), 2)
author.delete()
self.assertEqual(self.Person.objects.count(), 1)
self.Person.drop_collection()
BlogPost.drop_collection()
def subclasses_and_unique_keys_works(self):
class A(Document):
pass
class B(A):
foo = BooleanField(unique=True)
A.drop_collection()
B.drop_collection()
A().save()
A().save()
B(foo=True).save()
self.assertEqual(A.objects.count(), 2)
self.assertEqual(B.objects.count(), 1)
A.drop_collection()
B.drop_collection()
def test_document_hash(self):
"""Test document in list, dict, set
"""
class User(Document):
pass
class BlogPost(Document):
pass
# Clear old datas
User.drop_collection()
BlogPost.drop_collection()
u1 = User.objects.create()
u2 = User.objects.create()
u3 = User.objects.create()
u4 = User() # New object
b1 = BlogPost.objects.create()
b2 = BlogPost.objects.create()
# in List
all_user_list = list(User.objects.all())
self.assertTrue(u1 in all_user_list)
self.assertTrue(u2 in all_user_list)
self.assertTrue(u3 in all_user_list)
self.assertFalse(u4 in all_user_list) # New object
self.assertFalse(b1 in all_user_list) # Other object
self.assertFalse(b2 in all_user_list) # Other object
# in Dict
all_user_dic = {}
for u in User.objects.all():
all_user_dic[u] = "OK"
self.assertEqual(all_user_dic.get(u1, False), "OK")
self.assertEqual(all_user_dic.get(u2, False), "OK")
self.assertEqual(all_user_dic.get(u3, False), "OK")
self.assertEqual(all_user_dic.get(u4, False), False) # New object
self.assertEqual(all_user_dic.get(b1, False), False) # Other object
self.assertEqual(all_user_dic.get(b2, False), False) # Other object
# in Set
all_user_set = set(User.objects.all())
self.assertTrue(u1 in all_user_set)
def test_picklable(self):
pickle_doc = PickleTest(number=1, string="One", lists=['1', '2'])
pickle_doc.embedded = PickleEmbedded()
pickled_doc = pickle.dumps(pickle_doc) # make sure pickling works even before the doc is saved
pickle_doc.save()
pickled_doc = pickle.dumps(pickle_doc)
resurrected = pickle.loads(pickled_doc)
self.assertEqual(resurrected, pickle_doc)
# Test pickling changed data
pickle_doc.lists.append("3")
pickled_doc = pickle.dumps(pickle_doc)
resurrected = pickle.loads(pickled_doc)
self.assertEqual(resurrected, pickle_doc)
resurrected.string = "Two"
resurrected.save()
pickle_doc = PickleTest.objects.first()
self.assertEqual(resurrected, pickle_doc)
self.assertEqual(pickle_doc.string, "Two")
self.assertEqual(pickle_doc.lists, ["1", "2", "3"])
def test_dynamic_document_pickle(self):
pickle_doc = PickleDynamicTest(name="test", number=1, string="One", lists=['1', '2'])
pickle_doc.embedded = PickleDyanmicEmbedded(foo="Bar")
pickled_doc = pickle.dumps(pickle_doc) # make sure pickling works even before the doc is saved
pickle_doc.save()
pickled_doc = pickle.dumps(pickle_doc)
resurrected = pickle.loads(pickled_doc)
self.assertEqual(resurrected, pickle_doc)
self.assertEqual(resurrected._fields_ordered,
pickle_doc._fields_ordered)
self.assertEqual(resurrected._dynamic_fields.keys(),
pickle_doc._dynamic_fields.keys())
self.assertEqual(resurrected.embedded, pickle_doc.embedded)
self.assertEqual(resurrected.embedded._fields_ordered,
pickle_doc.embedded._fields_ordered)
self.assertEqual(resurrected.embedded._dynamic_fields.keys(),
pickle_doc.embedded._dynamic_fields.keys())
def test_picklable_on_signals(self):
pickle_doc = PickleSignalsTest(number=1, string="One", lists=['1', '2'])
pickle_doc.embedded = PickleEmbedded()
pickle_doc.save()
pickle_doc.delete()
def test_throw_invalid_document_error(self):
# test handles people trying to upsert
def throw_invalid_document_error():
class Blog(Document):
validate = DictField()
self.assertRaises(InvalidDocumentError, throw_invalid_document_error)
def test_mutating_documents(self):
class B(EmbeddedDocument):
field1 = StringField(default='field1')
class A(Document):
b = EmbeddedDocumentField(B, default=lambda: B())
A.drop_collection()
a = A()
a.save()
a.reload()
self.assertEqual(a.b.field1, 'field1')
class C(EmbeddedDocument):
c_field = StringField(default='cfield')
class B(EmbeddedDocument):
field1 = StringField(default='field1')
field2 = EmbeddedDocumentField(C, default=lambda: C())
class A(Document):
b = EmbeddedDocumentField(B, default=lambda: B())
a = A.objects()[0]
a.b.field2.c_field = 'new value'
a.save()
a.reload()
self.assertEqual(a.b.field2.c_field, 'new value')
def test_can_save_false_values(self):
"""Ensures you can save False values on save"""
class Doc(Document):
foo = StringField()
archived = BooleanField(default=False, required=True)
Doc.drop_collection()
d = Doc()
d.save()
d.archived = False
d.save()
self.assertEqual(Doc.objects(archived=False).count(), 1)
def test_can_save_false_values_dynamic(self):
"""Ensures you can save False values on dynamic docs"""
class Doc(DynamicDocument):
foo = StringField()
Doc.drop_collection()
d = Doc()
d.save()
d.archived = False
d.save()
self.assertEqual(Doc.objects(archived=False).count(), 1)
def test_do_not_save_unchanged_references(self):
"""Ensures cascading saves dont auto update"""
class Job(Document):
name = StringField()
class Person(Document):
name = StringField()
age = IntField()
job = ReferenceField(Job)
Job.drop_collection()
Person.drop_collection()
job = Job(name="Job 1")
# job should not have any changed fields after the save
job.save()
person = Person(name="name", age=10, job=job)
from pymongo.collection import Collection
orig_update = Collection.update
try:
def fake_update(*args, **kwargs):
self.fail("Unexpected update for %s" % args[0].name)
return orig_update(*args, **kwargs)
Collection.update = fake_update
person.save()
finally:
Collection.update = orig_update
def test_db_alias_tests(self):
""" DB Alias tests """
# mongoenginetest - Is default connection alias from setUp()
# Register Aliases
register_connection('testdb-1', 'mongoenginetest2')
register_connection('testdb-2', 'mongoenginetest3')
register_connection('testdb-3', 'mongoenginetest4')
class User(Document):
name = StringField()
meta = {"db_alias": "testdb-1"}
class Book(Document):
name = StringField()
meta = {"db_alias": "testdb-2"}
# Drops
User.drop_collection()
Book.drop_collection()
# Create
bob = User.objects.create(name="Bob")
hp = Book.objects.create(name="Harry Potter")
# Selects
self.assertEqual(User.objects.first(), bob)
self.assertEqual(Book.objects.first(), hp)
# DeReference
class AuthorBooks(Document):
author = ReferenceField(User)
book = ReferenceField(Book)
meta = {"db_alias": "testdb-3"}
# Drops
AuthorBooks.drop_collection()
ab = AuthorBooks.objects.create(author=bob, book=hp)
# select
self.assertEqual(AuthorBooks.objects.first(), ab)
self.assertEqual(AuthorBooks.objects.first().book, hp)
self.assertEqual(AuthorBooks.objects.first().author, bob)
self.assertEqual(AuthorBooks.objects.filter(author=bob).first(), ab)
self.assertEqual(AuthorBooks.objects.filter(book=hp).first(), ab)
# DB Alias
self.assertEqual(User._get_db(), get_db("testdb-1"))
self.assertEqual(Book._get_db(), get_db("testdb-2"))
self.assertEqual(AuthorBooks._get_db(), get_db("testdb-3"))
# Collections
self.assertEqual(User._get_collection(), get_db("testdb-1")[User._get_collection_name()])
self.assertEqual(Book._get_collection(), get_db("testdb-2")[Book._get_collection_name()])
self.assertEqual(AuthorBooks._get_collection(), get_db("testdb-3")[AuthorBooks._get_collection_name()])
def test_db_alias_overrides(self):
"""db_alias can be overriden
"""
# Register a connection with db_alias testdb-2
register_connection('testdb-2', 'mongoenginetest2')
class A(Document):
"""Uses default db_alias
"""
name = StringField()
meta = {"allow_inheritance": True}
class B(A):
"""Uses testdb-2 db_alias
"""
meta = {"db_alias": "testdb-2"}
A.objects.all()
self.assertEqual('testdb-2', B._meta.get('db_alias'))
self.assertEqual('mongoenginetest',
A._get_collection().database.name)
self.assertEqual('mongoenginetest2',
B._get_collection().database.name)
def test_db_alias_propagates(self):
"""db_alias propagates?
"""
register_connection('testdb-1', 'mongoenginetest2')
class A(Document):
name = StringField()
meta = {"db_alias": "testdb-1", "allow_inheritance": True}
class B(A):
pass
self.assertEqual('testdb-1', B._meta.get('db_alias'))
def test_db_ref_usage(self):
""" DB Ref usage in dict_fields"""
class User(Document):
name = StringField()
class Book(Document):
name = StringField()
author = ReferenceField(User)
extra = DictField()
meta = {
'ordering': ['+name']
}
def __unicode__(self):
return self.name
def __str__(self):
return self.name
# Drops
User.drop_collection()
Book.drop_collection()
# Authors
bob = User.objects.create(name="Bob")
jon = User.objects.create(name="Jon")
# Redactors
karl = User.objects.create(name="Karl")
susan = User.objects.create(name="Susan")
peter = User.objects.create(name="Peter")
# Bob
Book.objects.create(name="1", author=bob, extra={
"a": bob.to_dbref(), "b": [karl.to_dbref(), susan.to_dbref()]})
Book.objects.create(name="2", author=bob, extra={
"a": bob.to_dbref(), "b": karl.to_dbref()})
Book.objects.create(name="3", author=bob, extra={
"a": bob.to_dbref(), "c": [jon.to_dbref(), peter.to_dbref()]})
Book.objects.create(name="4", author=bob)
# Jon
Book.objects.create(name="5", author=jon)
Book.objects.create(name="6", author=peter)
Book.objects.create(name="7", author=jon)
Book.objects.create(name="8", author=jon)
Book.objects.create(name="9", author=jon,
extra={"a": peter.to_dbref()})
# Checks
self.assertEqual(",".join([str(b) for b in Book.objects.all()]),
"1,2,3,4,5,6,7,8,9")
# bob related books
self.assertEqual(",".join([str(b) for b in Book.objects.filter(
Q(extra__a=bob) |
Q(author=bob) |
Q(extra__b=bob))]),
"1,2,3,4")
# Susan & Karl related books
self.assertEqual(",".join([str(b) for b in Book.objects.filter(
Q(extra__a__all=[karl, susan]) |
Q(author__all=[karl, susan]) |
Q(extra__b__all=[
karl.to_dbref(), susan.to_dbref()]))
]), "1")
# $Where
self.assertEqual(u",".join([str(b) for b in Book.objects.filter(
__raw__={
"$where": """
function(){
return this.name == '1' ||
this.name == '2';}"""
})]),
"1,2")
def test_switch_db_instance(self):
register_connection('testdb-1', 'mongoenginetest2')
class Group(Document):
name = StringField()
Group.drop_collection()
with switch_db(Group, 'testdb-1') as Group:
Group.drop_collection()
Group(name="hello - default").save()
self.assertEqual(1, Group.objects.count())
group = Group.objects.first()
group.switch_db('testdb-1')
group.name = "hello - testdb!"
group.save()
with switch_db(Group, 'testdb-1') as Group:
group = Group.objects.first()
self.assertEqual("hello - testdb!", group.name)
group = Group.objects.first()
self.assertEqual("hello - default", group.name)
# Slightly contrived now - perform an update
# Only works as they have the same object_id
group.switch_db('testdb-1')
group.update(set__name="hello - update")
with switch_db(Group, 'testdb-1') as Group:
group = Group.objects.first()
self.assertEqual("hello - update", group.name)
Group.drop_collection()
self.assertEqual(0, Group.objects.count())
group = Group.objects.first()
self.assertEqual("hello - default", group.name)
# Totally contrived now - perform a delete
# Only works as they have the same object_id
group.switch_db('testdb-1')
group.delete()
with switch_db(Group, 'testdb-1') as Group:
self.assertEqual(0, Group.objects.count())
group = Group.objects.first()
self.assertEqual("hello - default", group.name)
def test_no_overwritting_no_data_loss(self):
class User(Document):
username = StringField(primary_key=True)
name = StringField()
@property
def foo(self):
return True
User.drop_collection()
user = User(username="Ross", foo="bar")
self.assertTrue(user.foo)
User._get_collection().save({"_id": "Ross", "foo": "Bar",
"data": [1, 2, 3]})
user = User.objects.first()
self.assertEqual("Ross", user.username)
self.assertEqual(True, user.foo)
self.assertEqual("Bar", user._data["foo"])
self.assertEqual([1, 2, 3], user._data["data"])
def test_spaces_in_keys(self):
class Embedded(DynamicEmbeddedDocument):
pass
class Doc(DynamicDocument):
pass
Doc.drop_collection()
doc = Doc()
setattr(doc, 'hello world', 1)
doc.save()
one = Doc.objects.filter(**{'hello world': 1}).count()
self.assertEqual(1, one)
def test_shard_key(self):
class LogEntry(Document):
machine = StringField()
log = StringField()
meta = {
'shard_key': ('machine',)
}
LogEntry.drop_collection()
log = LogEntry()
log.machine = "Localhost"
log.save()
log.log = "Saving"
log.save()
def change_shard_key():
log.machine = "127.0.0.1"
self.assertRaises(OperationError, change_shard_key)
def test_shard_key_primary(self):
class LogEntry(Document):
machine = StringField(primary_key=True)
log = StringField()
meta = {
'shard_key': ('machine',)
}
LogEntry.drop_collection()
log = LogEntry()
log.machine = "Localhost"
log.save()
log.log = "Saving"
log.save()
def change_shard_key():
log.machine = "127.0.0.1"
self.assertRaises(OperationError, change_shard_key)
def test_kwargs_simple(self):
class Embedded(EmbeddedDocument):
name = StringField()
class Doc(Document):
doc_name = StringField()
doc = EmbeddedDocumentField(Embedded)
classic_doc = Doc(doc_name="my doc", doc=Embedded(name="embedded doc"))
dict_doc = Doc(**{"doc_name": "my doc",
"doc": {"name": "embedded doc"}})
self.assertEqual(classic_doc, dict_doc)
self.assertEqual(classic_doc._data, dict_doc._data)
def test_kwargs_complex(self):
class Embedded(EmbeddedDocument):
name = StringField()
class Doc(Document):
doc_name = StringField()
docs = ListField(EmbeddedDocumentField(Embedded))
classic_doc = Doc(doc_name="my doc", docs=[
Embedded(name="embedded doc1"),
Embedded(name="embedded doc2")])
dict_doc = Doc(**{"doc_name": "my doc",
"docs": [{"name": "embedded doc1"},
{"name": "embedded doc2"}]})
self.assertEqual(classic_doc, dict_doc)
self.assertEqual(classic_doc._data, dict_doc._data)
def test_positional_creation(self):
"""Ensure that document may be created using positional arguments.
"""
person = self.Person("Test User", 42)
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 42)
def test_mixed_creation(self):
"""Ensure that document may be created using mixed arguments.
"""
person = self.Person("Test User", age=42)
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 42)
def test_mixed_creation_dynamic(self):
"""Ensure that document may be created using mixed arguments.
"""
class Person(DynamicDocument):
name = StringField()
person = Person("Test User", age=42)
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 42)
def test_bad_mixed_creation(self):
"""Ensure that document gives correct error when duplicating arguments
"""
def construct_bad_instance():
return self.Person("Test User", 42, name="Bad User")
self.assertRaises(TypeError, construct_bad_instance)
def test_data_contains_id_field(self):
"""Ensure that asking for _data returns 'id'
"""
class Person(Document):
name = StringField()
Person.drop_collection()
Person(name="Harry Potter").save()
person = Person.objects.first()
self.assertTrue('id' in person._data.keys())
self.assertEqual(person._data.get('id'), person.id)
def test_complex_nesting_document_and_embedded_document(self):
class Macro(EmbeddedDocument):
value = DynamicField(default="UNDEFINED")
class Parameter(EmbeddedDocument):
macros = MapField(EmbeddedDocumentField(Macro))
def expand(self):
self.macros["test"] = Macro()
class Node(Document):
parameters = MapField(EmbeddedDocumentField(Parameter))
def expand(self):
self.flattened_parameter = {}
for parameter_name, parameter in self.parameters.iteritems():
parameter.expand()
class System(Document):
name = StringField(required=True)
nodes = MapField(ReferenceField(Node, dbref=False))
def save(self, *args, **kwargs):
for node_name, node in self.nodes.iteritems():
node.expand()
node.save(*args, **kwargs)
super(System, self).save(*args, **kwargs)
System.drop_collection()
Node.drop_collection()
system = System(name="system")
system.nodes["node"] = Node()
system.save()
system.nodes["node"].parameters["param"] = Parameter()
system.save()
system = System.objects.first()
self.assertEqual("UNDEFINED", system.nodes["node"].parameters["param"].macros["test"].value)
if __name__ == '__main__':
unittest.main()
| LethusTI/supportcenter | vendor/mongoengine/tests/document/instance.py | Python | gpl-3.0 | 77,386 | [
"exciting"
] | e927b2ca0972193bd6051fb814bc6abb49ec71ba0481282b7915af04b49d65e5 |
# -*- coding: utf-8 -*-
"""
General utilities
"""
import obspy.signal.filter
import numpy as np
from numpy.lib.stride_tricks import as_strided
from numpy.fft import rfft, irfft, rfftfreq
import os
import glob
import shutil
import pickle
import shapefile
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import pyproj
import itertools as it
from pyPdf import PdfFileReader, PdfFileWriter
from obspy.core import Stream
# import CONFIG class initalised in ./configs/tmp_config.pickle
config_pickle = 'configs/tmp_config.pickle'
f = open(name=config_pickle, mode='rb')
CONFIG = pickle.load(f)
f.close()
# import variables from initialised CONFIG class.
CROSSCORR_SKIPLOCS = CONFIG.CROSSCORR_SKIPLOCS
COAST_SHP = CONFIG.COAST_SHP
TECTO_SHP = CONFIG.TECTO_SHP
TECTO_LABELS = CONFIG.TECTO_LABELS
TECTO_COLORS = CONFIG.TECTO_COLORS
# reference elipsoid to calculate distance
wgs84 = pyproj.Geod(ellps='WGS84')
def filelist(basedir, ext=None, subdirs=True, verbose = False):
"""
Returns the list of files in *basedir* (and subdirs if
*subdirs* is True) whose extendion is *ext*
"""
# list of files and dirs
flist = os.listdir(basedir)
files = []
for f in flist:
if os.path.isfile(os.path.join(basedir, f)):
if not ext:
files.append(f)
elif os.path.splitext(f)[1].lower() == "." + ext.lower():
files.append(f)
subdir_len = len(f)
#oh it checks if there is a directory
if subdirs:
for d in flist:
if os.path.isdir(os.path.join(basedir, d)):
subdir = os.path.join(basedir, d)
sublist = filelist(subdir, ext=ext, subdirs=True, verbose=True)
for f in sublist[0]:
files.append('%s/%s' %(d,f))
return files, subdir_len
def openandbackup(filename, mode='w'):
"""
Opens file, backing up older version if file exists.
@type filename: str or unicode
@rtype: file
"""
if os.path.exists(filename):
# backup
shutil.copyfile(filename, filename + '~')
# opening file
f = open(filename, mode=mode)
return f
def get_fill(st, starttime=None, endtime=None):
"""
Subroutine to get data fill
@rtype: float
"""
if len(st) == 0:
# no trace
return 0.0
ststart = min(tr.stats.starttime for tr in st)
stend = max(tr.stats.endtime for tr in st)
dttot = (stend if not endtime else endtime) - \
(ststart if not starttime else starttime)
gaps = st.getGaps()
fill = 1.0
if starttime:
fill -= max(ststart - starttime, 0.0) / dttot
if endtime:
fill -= max(endtime - stend, 0.0) / dttot
for g in gaps:
gapstart = g[4]
gapend = g[5]
if starttime:
gapstart = max(gapstart, starttime)
gapend = max(gapend, starttime)
if endtime:
gapstart = min(gapstart, endtime)
gapend = min(gapend, endtime)
fill -= (gapend - gapstart) / dttot
return fill
def get_fill_trace(tr):
"""
Subroutine to get data fill for single trace!
@rtype: float
"""
st = Stream(traces=[tr])
trstart = tr.stats.starttime
trend = tr.stats.endtime
dttot = (trend) - (trstart)
gaps = st.getGaps()
fill = 1.0
for g in gaps:
gapstart = g[4]
gapend = g[5]
fill -= (gapend - gapstart) / dttot
return fill
def clean_stream(stream, skiplocs=CROSSCORR_SKIPLOCS, verbose=False):
"""
1 - Removes traces whose location is in skiplocs.
2 - Select trace from 1st location if several ids.
@type stream: L{obspy.core.Stream}
@type skiplocs: tuple of str
@rtype: None
"""
if not skiplocs:
skiplocs = []
# removing traces of stream from locations to skip
for tr in [tr for tr in stream if tr.stats.location in skiplocs]:
stream.remove(tr)
# if more than one id -> taking first location (sorted alphanumerically)
if len(set(tr.id for tr in stream)) > 1:
locs = sorted(set(tr.stats.location for tr in stream))
select_loc = locs[0]
if verbose:
s = "warning: selecting loc '{loc}', discarding locs {locs}"
print s.format(loc=select_loc, locs=','.join(locs[1:])),
for tr in [tr for tr in stream if tr.stats.location != select_loc]:
stream.remove(tr)
def plot_nb_pairs():
"""
Plot the total nb of group velocity measurements and the remaining
nb of measurements (after applying selection criteria), function of
period, for the selected dispersion curves.
"""
# parsing some parameters of configuration file
from pysismo.psconfig import (FTAN_DIR, MINSPECTSNR, MINSPECTSNR_NOSDEV,
MINNBTRIMESTER, MAXSDEV)
# selecting dispersion curves
flist = sorted(glob.glob(os.path.join(FTAN_DIR, 'FTAN*.pickle*')))
print 'Select file(s) containing dispersion curves to process:'
print '\n'.join('{} - {}'.format(i, os.path.basename(f))
for i, f in enumerate(flist))
res = raw_input('\n')
pickle_files = [flist[int(i)] for i in res.split()]
for curves_file in pickle_files:
# loading dispersion curves of file
print "Loading file: " + curves_file
f = open(curves_file, 'rb')
curves = pickle.load(f)
f.close()
periods = curves[0].periods
# updating selection parameters of dispersion curves
for c in curves:
c.update_parameters(minspectSNR=MINSPECTSNR,
minspectSNR_nosdev=MINSPECTSNR_NOSDEV,
minnbtrimester=MINNBTRIMESTER,
maxsdev=MAXSDEV)
# list of arrays of filtered velocities
list_filtered_vels = [c.filtered_vels_sdevs()[0] for c in curves]
n_init = []
n_final = []
for period in periods:
iperiods = [c.get_period_index(period) for c in curves]
# total nb of mesurements
vels = np.array([c.v[i] for c, i in zip(curves, iperiods)])
n_init.append(np.count_nonzero(~np.isnan(vels)))
# remaining nb of measurements after selection criteria
vels = np.array([v[i] for v, i in zip(list_filtered_vels, iperiods)])
n_final.append(np.count_nonzero(~np.isnan(vels)))
lines = plt.plot(periods, n_init, label=os.path.basename(curves_file))
plt.plot(periods, n_final, color=lines[0].get_color())
# finalizing and showing plot
plt.xlabel('Period (s)')
plt.ylabel('Nb of measurements')
plt.legend(fontsize=11, loc='best')
plt.grid(True)
plt.show()
def resample(trace, dt_resample):
"""
Subroutine to resample trace
@type trace: L{obspy.core.trace.Trace}
@type dt_resample: float
@rtype: L{obspy.core.trace.Trace}
"""
dt = 1.0 / trace.stats.sampling_rate
factor = dt_resample / dt
if int(factor) == factor:
# simple decimation (no filt because it shifts the data)
trace.decimate(int(factor), no_filter=True)
else:
# linear interpolation
tp = np.arange(0, trace.stats.npts) * trace.stats.delta
zp = trace.data
ninterp = int(max(tp) / dt_resample) + 1
tinterp = np.arange(0, ninterp) * dt_resample
trace.data = np.interp(tinterp, tp, zp)
trace.stats.npts = ninterp
trace.stats.delta = dt_resample
trace.stats.sampling_rate = 1.0 / dt_resample
#trace.stats.endtime = trace.stats.endtime + max(tinterp)-max(tp)
def moving_avg(a, halfwindow, mask=None):
"""
Performs a fast n-point moving average of (the last
dimension of) array *a*, by using stride tricks to roll
a window on *a*.
Note that *halfwindow* gives the nb of points on each side,
so that n = 2*halfwindow + 1.
If *mask* is provided, values of *a* where mask = False are
skipped.
Returns an array of same size as *a* (which means that near
the edges, the averaging window is actually < *npt*).
"""
# padding array with zeros on the left and on the right:
# e.g., if halfwindow = 2:
# a_padded = [0 0 a0 a1 ... aN 0 0]
# mask_padded = [F F ? ? ? F F]
if mask is None:
mask = np.ones_like(a, dtype='bool')
zeros = np.zeros(a.shape[:-1] + (halfwindow,))
falses = zeros.astype('bool')
a_padded = np.concatenate((zeros, np.where(mask, a, 0), zeros), axis=-1)
mask_padded = np.concatenate((falses, mask, falses), axis=-1)
# rolling window on padded array using stride trick
#
# E.g., if halfwindow=2:
# rolling_a[:, 0] = [0 0 a0 a1 ... aN]
# rolling_a[:, 1] = [0 a0 a1 a2 ... aN 0 ]
# ...
# rolling_a[:, 4] = [a2 a3 ... aN 0 0]
npt = 2 * halfwindow + 1 # total size of the averaging window
rolling_a = as_strided(a_padded,
shape=a.shape + (npt,),
strides=a_padded.strides + (a.strides[-1],))
rolling_mask = as_strided(mask_padded,
shape=mask.shape + (npt,),
strides=mask_padded.strides + (mask.strides[-1],))
# moving average
n = rolling_mask.sum(axis=-1)
return np.where(n > 0, rolling_a.sum(axis=-1).astype('float') / n, np.nan)
def local_maxima_indices(x, include_edges=True):
"""
Returns the indices of all local maxima of an array x
(larger maxima first)
@type x: L{numpy.ndarray}
@rtype: list of int
"""
mask = (x[1:-1] >= x[:-2]) & (x[1:-1] >= x[2:])
indices = np.nonzero(mask)[0] + 1
if include_edges:
# local maxima on edges?
if x[0] >= x[1]:
indices = np.r_[0, indices]
if x[-1] >= x[-2]:
indices = np.r_[len(x) - 1, indices]
indices = sorted(indices, key=lambda index: x[index], reverse=True)
return indices
def bandpass(data, dt, filtertype='Butterworth', **kwargs):
"""
Bandpassing array *data* (whose sampling step is *dt*)
using either a Butterworth filter (filtertype='Butterworth')
or a Gaussian filter (filtertype='Gaussian')
Additional arguments in *kwargs* are sent to
bandpass_butterworth() (arguments: periodmin, periodmax,
corners, zerophase) or bandpass_gaussian() (arguments:
period, alpha)
@type data: L{numpy.ndarray}
@type dt: float
@rtype: L{numpy.ndarray}
"""
if filtertype.lower().strip() == 'butterworth':
return bandpass_butterworth(data, dt, **kwargs)
elif filtertype.lower().strip() == 'gaussian':
return bandpass_gaussian(data, dt, **kwargs)
else:
raise Exception("Unknown filter: " + filtertype)
def bandpass_butterworth(data, dt, periodmin, periodmax, corners=2, zerophase=True):
"""
Bandpassing data (in array *data*) between periods
*periodmin* and *periodmax* with a Butterworth filter.
*dt* is the sampling interval of the data.
@type data: L{numpy.ndarray}
@type dt: float
@type periodmin: float or int or None
@type periodmax: float or int or None
@type corners: int
@type zerophase: bool
@rtype: L{numpy.ndarray}
"""
return obspy.signal.filter.bandpass(data=data, freqmin=1.0 / periodmax,
freqmax=1.0 / periodmin, df=1.0 / dt,
corners=corners, zerophase=zerophase)
def bandpass_gaussian(data, dt, period, alpha):
"""
Bandpassing real data (in array *data*) with a Gaussian
filter centered at *period* whose width is controlled
by *alpha*:
exp[-alpha * ((f-f0)/f0)**2],
with f the frequency and f0 = 1 / *period*.
*dt* is the sampling interval of the data.
@type data: L{numpy.ndarray}
@type dt: float
@type period: float
@type alpha: float
@rtype: L{numpy.ndarray}
"""
# Fourier transform
fft_data = rfft(data)
# aray of frequencies
freq = rfftfreq(len(data), d=dt)
# bandpassing data
f0 = 1.0 / period
fft_data *= np.exp(-alpha * ((freq - f0) / f0) ** 2)
# back to time domain
return irfft(fft_data, n=len(data))
def dist(lons1, lats1, lons2, lats2):
"""
Returns an array of geodetic distance(s) in km between
points (lon1, lat1) and (lon2, lat2)
"""
_, _, d = wgs84.inv(lons1=lons1, lats1=lats1, lons2=lons2, lats2=lats2)
return np.array(d) / 1000.0
def geodesic(coord1, coord2, npts):
"""
Returns a list of *npts* points along the geodesic between
(and including) *coord1* and *coord2*, in an array of
shape (*npts*, 2).
@rtype: L{ndarray}
"""
if npts < 2:
raise Exception('nb of points must be at least 2')
path = wgs84.npts(lon1=coord1[0], lat1=coord1[1],
lon2=coord2[0], lat2=coord2[1],
npts=npts-2)
return np.array([coord1] + path + [coord2])
def geo2cartesian(lons, lats, r=1.0):
"""
Converts geographic coordinates to cartesian coordinates
"""
# spherical coordinates
phi = np.array(lons) * np.pi / 180.0
theta = np.pi / 2.0 - np.array(lats) * np.pi / 180.0
# cartesian coordinates
x = r * np.sin(theta) * np.cos(phi)
y = r * np.sin(theta) * np.sin(phi)
z = r * np.cos(theta)
return x, y, z
def projection(M, A, B, C):
"""
Orthogonal projection of point(s) M on plane(s) ABC.
Each point (M, A, B, C) should be a tuple of floats or
a tuple of arrays, (x, y, z)
"""
AB = vector(A, B)
AC = vector(A, C)
MA = vector(M, A)
# unit vector u perpendicular to ABC (u = AB x AC / |AB x AC|)
u = vectorial_product(AB, AC)
norm_u = norm(u)
u = [u[i] / norm_u for i in (0, 1, 2)]
# (MA.u)u = MM' (with M' the projection of M on the plane)
MA_dot_u = sum(MA[i] * u[i] for i in (0, 1, 2))
MMp = [MA_dot_u * u[i] for i in (0, 1, 2)]
xMp, yMp, zMp = [MMp[i] + M[i] for i in (0, 1, 2)]
return xMp, yMp, zMp
def barycentric_coords(M, A, B, C):
"""
Barycentric coordinates of point(s) M in triangle(s) ABC.
Each point (M, A, B, C) should be a tuple of floats or
a tuple of arrays, (x, y, z).
Barycentric coordinate wrt A (resp. B, C) is the relative
area of triangle MBC (resp. MAC, MAB).
"""
MA = vector(M, A)
MB = vector(M, B)
MC = vector(M, C)
# area of triangle = norm of vectorial product / 2
wA = norm(vectorial_product(MB, MC)) / 2.0
wB = norm(vectorial_product(MA, MC)) / 2.0
wC = norm(vectorial_product(MA, MB)) / 2.0
wtot = wA + wB + wC
return wA / wtot, wB / wtot, wC / wtot
def vector(A, B):
"""
Vector(s) AB. A and B should be tuple of floats or
tuple of arrays, (x, y, z).
"""
return tuple(np.array(B[i]) - np.array(A[i]) for i in (0, 1, 2))
def vectorial_product(u, v):
"""
Vectorial product u x v. Vectors u, v should be tuple of
floats or tuple of arrays, (ux, uy, uz) and (vx, vy, vz)
"""
return (u[1]*v[2] - u[2]*v[1],
u[2]*v[0] - u[0]*v[2],
u[0]*v[1] - u[1]*v[0])
def norm(u):
"""
Norm of vector(s) u, which should be a tuple of
floats or a tuple of arrays, (ux, uy, uz).
"""
return np.sqrt(u[0]**2 + u[1]**2 + u[2]**2)
def basemap(ax=None, labels=True, axeslabels=True, fill=True, bbox=None):
"""
Plots base map: coasts (file *COAST_SHP*), tectonic provinces
file *TECTO_SHP*) and labels (file *TECTO_LABELS*). Labels are
plotted if *labels* = True. Tectonic provinces are filled
(according to colors in dict *TECTO_COLORS*) if *fill* = True.
"""
fig = None
if not ax:
fig = plt.figure()
ax = fig.add_subplot(111)
# plotting coasts
if COAST_SHP:
sf = shapefile.Reader(COAST_SHP)
for shape in sf.shapes():
# adding polygon(s)
parts = list(shape.parts) + [len(shape.points)]
partlims = zip(parts[:-1], parts[1:])
for i1, i2 in partlims:
points = shape.points[i1:i2]
x, y = zip(*points)
ax.plot(x, y, '-', lw=0.75, color='k')
# plotting tectonic provinces
if TECTO_SHP != "False":
sf = shapefile.Reader(TECTO_SHP)
for sr in sf.shapeRecords():
tectcategory = sr.record[0]
color = next((TECTO_COLORS[k] for k in TECTO_COLORS.keys()
if k in tectcategory), 'white')
shape = sr.shape
parts = list(shape.parts) + [len(shape.points)]
partlims = zip(parts[:-1], parts[1:])
if fill:
polygons = [Polygon(shape.points[i1:i2]) for i1, i2 in partlims]
tectprovince = PatchCollection(polygons, facecolor=color,
edgecolor='0.663', linewidths=0.5)
ax.add_collection(tectprovince)
else:
for i1, i2 in partlims:
x, y = zip(*shape.points[i1:i2])
ax.plot(x, y, '-', color='0.663', lw=0.5)
if labels and TECTO_LABELS:
# plotting tectonic labels within bounding box
sf = shapefile.Reader(TECTO_LABELS)
for sr in sf.shapeRecords():
label, angle = sr.record
label = label.replace('\\', '\n')
label = label.replace('Guapore', u'Guaporé').replace('Sao', u'São')
x, y = sr.shape.points[0]
if not bbox or bbox[0] < x < bbox[1] and bbox[2] < y < bbox[3]:
ax.text(x, y, label, ha='center', va='center', color='grey',
fontsize=10, weight='bold', rotation=angle)
# setting up axes
ax.set_aspect('equal')
if axeslabels:
ax.set_xlabel('longitude (deg)')
ax.set_ylabel('latitude (deg)')
ax.grid(True)
else:
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.grid(False)
if bbox:
ax.set_xlim(bbox[:2])
ax.set_ylim(bbox[2:])
if fig:
fig.show()
def combine_pdf_pages(pdfpath, pagesgroups, verbose=False):
"""
Combines vertically groups of pages of a pdf file
@type pdfpath: str or unicode
@type pagesgroups: list of (list of int)
"""
# opening input file
if verbose:
print "Opening file " + pdfpath
fi = open(pdfpath, 'rb')
pdf = PdfFileReader(fi)
# opening output pdf
pdfout = PdfFileWriter()
# loop on groups of pages tom combine
for pagesgroup in pagesgroups:
if verbose:
print "Combining pages:",
# heights and widths
heights = [pdf.pages[i].mediaBox.getHeight() for i in pagesgroup]
widths = [pdf.pages[i].mediaBox.getWidth() for i in pagesgroup]
# adding new blank page
page_out = pdfout.addBlankPage(width=max(widths), height=sum(heights))
# merging pages of group
for i, p in enumerate(pagesgroup):
if verbose:
print p,
page_out.mergeTranslatedPage(pdf.pages[p], tx=0, ty=sum(heights[i+1:]))
print
# exporting merged pdf into temporary output file
fo = create_tmpfile('wb')
if verbose:
print "Exporting merged pdf in file {}".format(fo.name)
pdfout.write(fo)
# closing files
fi.close()
fo.close()
# removing original file and replacing it with merged pdf
if verbose:
print "Moving exported pdf to: " + pdfpath
os.remove(pdfpath)
os.rename(fo.name, pdfpath)
def create_tmpfile(*args, **kwargs):
"""
Creates, opens and returns the first file tmp<i> that does
not exist (with i = integer).
*args and **kwargs are sent to open() function
"""
for i in it.count():
filepath = 'tmp{}'.format(i)
if not os.path.exists(filepath):
f = open(filepath, *args, **kwargs)
return f
def groupbykey(iterable, key=None):
"""
Returns a list of sublists of *iterable* grouped by key:
all elements x of a given sublist have the same
value key(x).
key(x) must return a hashable object, such that
set(key(x) for x in iterable) is possible.
If not given, key() is the identity funcion.
"""
if not key:
key = lambda x: x
# unique keys
iterable = list(iterable)
keys = set(key(x) for x in iterable)
groups = []
for k in keys:
# group with key = k
groups.append([x for x in iterable if key(x) == k])
return groups
| boland1992/SeisSuite | build/lib/seissuite/ant/psutils.py | Python | gpl-3.0 | 20,742 | [
"Gaussian"
] | aa2efc338f9fc1dd58ba165e8cc551ee5b18d1d1dfd75888b8756bae3c3fb926 |
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
"""this module contains some utilities to navigate in the tree or to
extract information from it
"""
__docformat__ = "restructuredtext en"
from .exceptions import AstroidBuildingException
from .builder import parse
class ASTWalker:
"""a walker visiting a tree in preorder, calling on the handler:
* visit_<class name> on entering a node, where class name is the class of
the node in lower case
* leave_<class name> on leaving a node, where class name is the class of
the node in lower case
"""
def __init__(self, handler):
self.handler = handler
self._cache = {}
def walk(self, node, _done=None):
"""walk on the tree from <node>, getting callbacks from handler"""
if _done is None:
_done = set()
if node in _done:
raise AssertionError((id(node), node, node.parent))
_done.add(node)
self.visit(node)
for child_node in node.get_children():
self.handler.set_context(node, child_node)
assert child_node is not node
self.walk(child_node, _done)
self.leave(node)
assert node.parent is not node
def get_callbacks(self, node):
"""get callbacks from handler for the visited node"""
klass = node.__class__
methods = self._cache.get(klass)
if methods is None:
handler = self.handler
kid = klass.__name__.lower()
e_method = getattr(handler, 'visit_%s' % kid,
getattr(handler, 'visit_default', None))
l_method = getattr(handler, 'leave_%s' % kid,
getattr(handler, 'leave_default', None))
self._cache[klass] = (e_method, l_method)
else:
e_method, l_method = methods
return e_method, l_method
def visit(self, node):
"""walk on the tree from <node>, getting callbacks from handler"""
method = self.get_callbacks(node)[0]
if method is not None:
method(node)
def leave(self, node):
"""walk on the tree from <node>, getting callbacks from handler"""
method = self.get_callbacks(node)[1]
if method is not None:
method(node)
class LocalsVisitor(ASTWalker):
"""visit a project by traversing the locals dictionary"""
def __init__(self):
ASTWalker.__init__(self, self)
self._visited = {}
def visit(self, node):
"""launch the visit starting from the given node"""
if node in self._visited:
return
self._visited[node] = 1 # FIXME: use set ?
methods = self.get_callbacks(node)
if methods[0] is not None:
methods[0](node)
if 'locals' in node.__dict__: # skip Instance and other proxy
for name, local_node in node.items():
self.visit(local_node)
if methods[1] is not None:
return methods[1](node)
def _check_children(node):
"""a helper function to check children - parent relations"""
for child in node.get_children():
ok = False
if child is None:
print "Hm, child of %s is None" % node
continue
if not hasattr(child, 'parent'):
print " ERROR: %s has child %s %x with no parent" % (node, child, id(child))
elif not child.parent:
print " ERROR: %s has child %s %x with parent %r" % (node, child, id(child), child.parent)
elif child.parent is not node:
print " ERROR: %s %x has child %s %x with wrong parent %s" % (node,
id(node), child, id(child), child.parent)
else:
ok = True
if not ok:
print "lines;", node.lineno, child.lineno
print "of module", node.root(), node.root().name
raise AstroidBuildingException
_check_children(child)
class TreeTester(object):
'''A helper class to see _ast tree and compare with astroid tree
indent: string for tree indent representation
lineno: bool to tell if we should print the line numbers
>>> tester = TreeTester('print')
>>> print tester.native_tree_repr()
<Module>
. body = [
. <Print>
. . nl = True
. ]
>>> print tester.astroid_tree_repr()
Module()
body = [
Print()
dest =
values = [
]
]
'''
indent = '. '
lineno = False
def __init__(self, sourcecode):
self._string = ''
self.sourcecode = sourcecode
self._ast_node = None
self.build_ast()
def build_ast(self):
"""build the _ast tree from the source code"""
self._ast_node = parse(self.sourcecode)
def native_tree_repr(self, node=None, indent=''):
"""get a nice representation of the _ast tree"""
self._string = ''
if node is None:
node = self._ast_node
self._native_repr_tree(node, indent)
return self._string
def _native_repr_tree(self, node, indent, _done=None):
"""recursive method for the native tree representation"""
from _ast import Load as _Load, Store as _Store, Del as _Del
from _ast import AST as Node
if _done is None:
_done = set()
if node in _done:
self._string += '\nloop in tree: %r (%s)' % (node,
getattr(node, 'lineno', None))
return
_done.add(node)
self._string += '\n' + indent + '<%s>' % node.__class__.__name__
indent += self.indent
if not hasattr(node, '__dict__'):
self._string += '\n' + self.indent + " ** node has no __dict__ " + str(node)
return
node_dict = node.__dict__
if hasattr(node, '_attributes'):
for a in node._attributes:
attr = node_dict[a]
if attr is None:
continue
if a in ("lineno", "col_offset") and not self.lineno:
continue
self._string +='\n' + indent + a + " = " + repr(attr)
for field in node._fields or ():
attr = node_dict[field]
if attr is None:
continue
if isinstance(attr, list):
if not attr:
continue
self._string += '\n' + indent + field + ' = ['
for elt in attr:
self._native_repr_tree(elt, indent, _done)
self._string += '\n' + indent + ']'
continue
if isinstance(attr, (_Load, _Store, _Del)):
continue
if isinstance(attr, Node):
self._string += '\n' + indent + field + " = "
self._native_repr_tree(attr, indent, _done)
else:
self._string += '\n' + indent + field + " = " + repr(attr)
def build_astroid_tree(self):
"""build astroid tree from the _ast tree
"""
from .builder import AstroidBuilder
tree = AstroidBuilder().string_build(self.sourcecode)
return tree
def astroid_tree_repr(self, ids=False):
"""build the astroid tree and return a nice tree representation"""
mod = self.build_astroid_tree()
return mod.repr_tree(ids)
__all__ = ('LocalsVisitor', 'ASTWalker',)
| lukaszpiotr/pylama_with_gjslint | pylama/checkers/pylint/astroid/utils.py | Python | lgpl-3.0 | 8,246 | [
"VisIt"
] | f51f78e5d8a5fd8ede92ebb73be693bd016960ebd45e89644bbefc47fd137a5d |
from __future__ import (absolute_import, division, print_function)
import unittest
import os
from mantid.simpleapi import mtd, logger
import numpy as np
from mantid.simpleapi import Abins, DeleteWorkspace
from AbinsModules import AbinsParameters, AbinsTestHelpers
try:
from pathos.multiprocessing import ProcessingPool
PATHOS_FOUND = True
except ImportError:
PATHOS_FOUND = False
def old_modules():
"""" Check if there are proper versions of Python and numpy."""
is_python_old = AbinsTestHelpers.old_python()
if is_python_old:
logger.warning("Skipping AbinsBasicTest because Python is too old.")
is_numpy_old = AbinsTestHelpers.is_numpy_valid(np.__version__)
if is_numpy_old:
logger.warning("Skipping AbinsBasicTest because numpy is too old.")
return is_python_old or is_numpy_old
def skip_if(skipping_criteria):
"""
Skip all tests if the supplied function returns true.
Python unittest.skipIf is not available in 2.6 (RHEL6) so we'll roll our own.
"""
def decorate(cls):
if skipping_criteria():
for attr in cls.__dict__.keys():
if callable(getattr(cls, attr)) and 'test' in attr:
delattr(cls, attr)
return cls
return decorate
@skip_if(old_modules)
class AbinsAdvancedParametersTest(unittest.TestCase):
def setUp(self):
# set up input for Abins
self._Si2 = "Si2-sc_AbinsAdvancedParameters"
self._wrk_name = self._Si2 + "_ref"
# before each test set AbinsParameters to default values
AbinsParameters.fwhm = 3.0
AbinsParameters.delta_width = 0.0005
AbinsParameters.tosca_final_neutron_energy = 32.0
AbinsParameters.tosca_cos_scattering_angle = -0.7069
AbinsParameters.tosca_a = 0.0000001
AbinsParameters.tosca_b = 0.005
AbinsParameters.tosca_c = 2.5
AbinsParameters.dft_group = "PhononAB"
AbinsParameters.powder_data_group = "Powder"
AbinsParameters.crystal_data_group = "Crystal"
AbinsParameters.s_data_group = "S"
AbinsParameters.pkt_per_peak = 50
AbinsParameters.bin_width = 1.0
AbinsParameters.max_wavenumber = 4100.0
AbinsParameters.min_wavenumber = 0.0
AbinsParameters.s_relative_threshold = 0.001
AbinsParameters.s_absolute_threshold = 10e-8
AbinsParameters.optimal_size = 5000000
AbinsParameters.threads = 1
def tearDown(self):
AbinsTestHelpers.remove_output_files(list_of_names=["Abins", "explicit", "default", "total",
"squaricn_scale", "benzene_exp", "experimental"])
mtd.clear()
def test_wrong_fwhm(self):
# fwhm should be positive
AbinsParameters.fwhm = -1.0
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
# fwhm should be larger than 0
AbinsParameters.fwhm = 0.0
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
# fwhm should be smaller than 10
AbinsParameters.fwhm = 10.0
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
def test_wrong_delta_width(self):
# delta_width should be a number
AbinsParameters.delta_width = "fd"
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
# delta_with is positive so it cannot be negative
AbinsParameters.delta_width = -0.01
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
# delta_width should have non-zero value
AbinsParameters.delta_width = 0.0
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
# delta_width should be smaller than one
AbinsParameters.delta_width = 1.0
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
# Tests for TOSCA parameters
def test_wrong_tosca_final_energy(self):
# final energy should be a float not str
AbinsParameters.tosca_final_neutron_energy = "0"
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
# final energy should be of float type not integer
AbinsParameters.tosca_final_neutron_energy = 1
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
# final energy should be positive
AbinsParameters.tosca_final_neutron_energy = -1.0
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
def test_wrong_tosca_cos_scattering_angle(self):
# cosines of scattering angle is float
AbinsParameters.tosca_cos_scattering_angle = "0.0334"
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
# TOSCA_cos_scattering_angle cannot be integer
AbinsParameters.tosca_cos_scattering_angle = 1
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
def test_wrong_tosca_resolution_constant_A(self):
# TOSCA constant should be float
AbinsParameters.tosca_a = "wrong"
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
def test_wrong_tosca_resolution_constant_B(self):
AbinsParameters.tosca_b = "wrong"
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
def test_wrong_tosca_resolution_constant_C(self):
AbinsParameters.tosca_c = "wrong"
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
# tests for folders
def test_wrong_dft_group(self):
# name should be of type str
AbinsParameters.dft_group = 2
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
# name of group cannot be an empty string
AbinsParameters.dft_group = ""
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
def test_wrong_powder_data_group(self):
# name should be of type str
AbinsParameters.powder_data_group = 2
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
# name of group cannot be an empty string
AbinsParameters.powder_data_group = ""
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
def test_wrong_crystal_data_group(self):
# name should be of type str
AbinsParameters.crystal_data_group = 2
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
# name of group cannot be an empty string
AbinsParameters.crystal_data_group = ""
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
def test_wrong_powder_s_data_group(self):
# name should be of type str
AbinsParameters.s_data_group = 2
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
# name of group cannot be an empty string
AbinsParameters.s_data_group = ""
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
def test_doubled_name(self):
# Wrong scenario: two groups with the same name
AbinsParameters.dft_group = "NiceName"
AbinsParameters.powder_data_group = "NiceName"
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
# Test for rebinning parameters
def test_wrong_bin_width(self):
# width cannot be 0
AbinsParameters.bin_width = 0.0
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
# width must be float
AbinsParameters.bin_width = 5
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
# width must be positive
AbinsParameters.bin_width = -1.0
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
# width should be smaller than 10 cm^-1
AbinsParameters.bin_width = 20.0
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
def test_wrong_min_wavenumber(self):
# minimum wavenumber cannot be negative
AbinsParameters.min_wavenumber = -0.001
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
# minimum wavenumber cannot be int
AbinsParameters.min_wavenumber = 1
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
def test_wrong_max_wavenumber(self):
# maximum wavenumber cannot be negative
AbinsParameters.max_wavenumber = -0.01
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
# maximum wavenumber cannot be integer
AbinsParameters.max_wavenumber = 10
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
def test_wrong_energy_window(self):
# min_wavenumber must be smaller than max_wavenumber
AbinsParameters.min_wavenumber = 1000.0
AbinsParameters.max_wavenumber = 10.0
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
def test_wrong_s_absolute_threshold(self):
AbinsParameters.s_absolute_threshold = 1
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
AbinsParameters.s_absolute_threshold = -0.01
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
AbinsParameters.s_absolute_threshold = "Wrong value"
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
def test_wrong_s_relative_threshold(self):
AbinsParameters.s_relative_threshold = 1
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
AbinsParameters.s_relative_threshold = -0.01
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
AbinsParameters.s_relative_threshold = "Wrong value"
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
def test_wrong_optimal_size(self):
# optimal size cannot be negative
AbinsParameters.optimal_size = -10000
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
# optimal size must be of type int
AbinsParameters.optimal_size = 50.0
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
def test_wrong_threads(self):
if PATHOS_FOUND:
AbinsParameters.threads = -1
self.assertRaises(RuntimeError, Abins, PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
def test_good_case(self):
good_names = [self._wrk_name, self._wrk_name + "_Si", self._wrk_name + "_Si_total"]
Abins(PhononFile=self._Si2 + ".phonon", OutputWorkspace=self._wrk_name)
names = mtd.getObjectNames()
# Builtin cmp has been removed in Python 3
def _cmp(a, b):
return (a > b) - (a < b)
self.assertAlmostEqual(0, _cmp(good_names, names))
if __name__ == "__main__":
unittest.main()
| wdzhou/mantid | Framework/PythonInterface/test/python/plugins/algorithms/AbinsAdvancedParametersTest.py | Python | gpl-3.0 | 12,462 | [
"CRYSTAL"
] | b8d910b4a214947da2bd1a54d2e31ff53fe154b79ae90a1e30aa86ec32e7806f |
"""
cclib (http://cclib.sf.net) is (c) 2006, the cclib development team
and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html).
"""
__revision__ = "$Revision: 737 $"
from PyQuante.Molecule import Molecule
def makepyquante(atomcoords, atomnos, charge=0, mult=1):
"""Create a PyQuante Molecule.
>>> import numpy
>>> from PyQuante.hartree_fock import hf
>>> atomnos = numpy.array([1,8,1],"i")
>>> a = numpy.array([[-1,1,0],[0,0,0],[1,1,0]],"f")
>>> pyqmol = makepyquante(a,atomnos)
>>> en,orbe,orbs = hf(pyqmol)
>>> print int(en * 10) / 10. # Should be around -73.8
-73.8
"""
return Molecule("notitle", zip(atomnos, atomcoords), units="Angstrom",
charge=charge, multiplicity=mult)
if __name__ == "__main__":
import doctest
doctest.testmod()
| keceli/RMG-Java | source/cclib/bridge/cclib2pyquante.py | Python | mit | 828 | [
"cclib"
] | 0a27c2dfe6e19e67175d3d9128b1f486db97a5b9efdc9736cf56c073a90e68eb |
###############################################################################
# #
# Peekaboo Extended Email Attachment Behavior Observation Owl #
# #
# config.py #
###############################################################################
# #
# Copyright (C) 2016-2020 science + computing ag #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or (at #
# your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, but #
# WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
""" The configuration for the main program as well as the ruleset. Handles
defaults as well as reading a configuration file. """
import re
import sys
import logging
import configparser
from peekaboo.exceptions import PeekabooConfigException
from peekaboo.toolbox.cortex import tlp
logger = logging.getLogger(__name__)
class PeekabooConfigParser( # pylint: disable=too-many-ancestors
configparser.ConfigParser):
""" A config parser that gives error feedback if a required file does not
exist or cannot be opened. """
LOG_LEVEL = object()
OCTAL = object()
RELIST = object()
IRELIST = object()
def __init__(self, config_file):
super().__init__()
try:
self.read_file(open(config_file))
except IOError as ioerror:
raise PeekabooConfigException(
'Configuration file "%s" can not be opened for reading: %s' %
(config_file, ioerror))
except configparser.Error as cperror:
raise PeekabooConfigException(
'Configuration file "%s" can not be parsed: %s' %
(config_file, cperror))
self.lists = {}
self.relists = {}
def getlist(self, section, option, raw=False, vars=None, fallback=None):
""" Special getter where multiple options in the config file
distinguished by a .<no> suffix form a list. Matches the signature for
configparser getters. """
# cache results because the following is somewhat inefficient
if section not in self.lists:
self.lists[section] = {}
if option in self.lists[section]:
return self.lists[section][option]
if section not in self:
self.lists[section][option] = fallback
return fallback
# Go over all options in this section we want to allow "holes" in
# the lists, i.e setting.1, setting.2 but no setting.3 followed by
# setting.4. We use here that ConfigParser retains option order from
# the file.
value = []
for setting in self[section]:
if not setting.startswith(option):
continue
# Parse 'setting' into (key) and 'setting.subscript' into
# (key, subscript) and use it to determine if this setting is a
# list. Note how we do not use the subscript at all here.
name_parts = setting.split('.')
key = name_parts[0]
is_list = len(name_parts) > 1
if key != option:
continue
if not is_list:
raise PeekabooConfigException(
'Option %s in section %s is supposed to be a list '
'but given as individual setting' % (setting, section))
# Potential further checks:
# - There are no duplicate settings with ConfigParser. The last
# one always wins.
value.append(self[section].get(setting, raw=raw, vars=vars))
# it's not gonna get any better on the next call, so cache even the
# default
if not value:
value = fallback
self.lists[section][option] = value
return value
def getirelist(self, section, option, raw=False, vars=None, fallback=None, flags=None):
""" Special getter for lists of regular expressions that are compiled to match
case insesitive (IGNORECASE). Returns the compiled expression objects in a
list ready for matching and searching.
"""
return self.getrelist(section, option, raw=raw, vars=vars, fallback=fallback, flags=re.IGNORECASE)
def getrelist(self, section, option, raw=False, vars=None, fallback=None, flags=0):
""" Special getter for lists of regular expressions. Returns the
compiled expression objects in a list ready for matching and searching.
"""
if section not in self.relists:
self.relists[section] = {}
if option in self.relists[section]:
return self.relists[section][option]
if section not in self:
self.relists[section][option] = fallback
return fallback
strlist = self[section].getlist(option, raw=raw, vars=vars,
fallback=fallback)
if strlist is None:
self.relists[section][option] = None
return None
compiled_res = []
for regex in strlist:
try:
compiled_res.append(re.compile(regex, flags))
except (ValueError, TypeError) as error:
raise PeekabooConfigException(
'Failed to compile regular expression "%s" (section %s, '
'option %s): %s' % (re, section, option, error))
# it's not gonna get any better on the next call, so cache even the
# default
if not compiled_res:
compiled_res = fallback
self.relists[section][option] = compiled_res
return compiled_res
def get_log_level(self, section, option, raw=False, vars=None,
fallback=None):
""" Get the log level from the configuration file and parse the string
into a logging loglevel such as logging.CRITICAL. Raises config
exception if the log level is unknown. Options identical to get(). """
levels = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG
}
level = self.get(section, option, raw=raw, vars=vars, fallback=None)
if level is None:
return fallback
if level not in levels:
raise PeekabooConfigException('Unknown log level %s' % level)
return levels[level]
def gettlp(self, section, option, raw=False, vars=None, fallback=None):
levels = {
'red': tlp.RED,
'3': tlp.RED,
'amber': tlp.AMBER,
'2': tlp.AMBER,
'green': tlp.GREEN,
'1': tlp.GREEN,
'white': tlp.WHITE,
'0': tlp.WHITE,
}
level = self.get(section, option, raw=raw, vars=vars, fallback=None)
if level is None:
return fallback
level = level.lower()
if level not in levels:
raise PeekabooConfigException('Unknown tlp level %s' % level)
return levels[level]
def getoctal(self, section, option, raw=False, vars=None, fallback=None):
""" Get an integer in octal notation. Raises config
exception if the format is wrong. Options identical to get(). """
value = self.get(section, option, raw=raw, vars=vars, fallback=None)
if value is None:
return fallback
try:
octal = int(value, 8)
except ValueError:
raise PeekabooConfigException(
'Invalid value for octal option %s in section %s: %s'
% (option, section, value))
return octal
def get_by_type(self, section, option, fallback=None, option_type=None):
""" Get an option from the configuration file parser. Automatically
detects the type from the type of the default if given and calls the
right getter method to coerce the value to the correct type.
@param section: Which section to look for option in.
@type section: string
@param option: The option to read.
@type option: string
@param fallback: (optional) Default value to return if option is not
found. Defaults itself to None so that the method will
return None if the option is not found.
@type fallback: int, bool, str or None.
@param option_type: Override the option type.
@type option_type: int, bool, str or None. """
if option_type is None and fallback is not None:
option_type = type(fallback)
getter = {
int: self.getint,
float: self.getfloat,
bool: self.getboolean,
list: self.getlist,
tuple: self.getlist,
str: self.get,
None: self.get,
# these only work when given explicitly as option_type
self.LOG_LEVEL: self.get_log_level,
self.OCTAL: self.getoctal,
self.RELIST: self.getrelist,
self.IRELIST: self.getirelist,
tlp: self.gettlp,
}
return getter[option_type](section, option, fallback=fallback)
def set_known_options(self, config_options):
""" Set a number of known config options as member variables. Also
checks for unknown options being present.
@param config_options: the mapping of config option to section and
option name
@type config_options: Dict of two-item-tuples
(config_option: [section, option])
@raises PeekabooConfigException: if any unknown sections or options are
found. """
settings = vars(self)
check_options = {}
for (setting, config_names) in config_options.items():
section = config_names[0]
option = config_names[1]
# remember for later checking for unknown options
if section not in check_options:
check_options[section] = []
check_options[section].append(option)
# maybe force the option's value type
option_type = None
if len(config_names) == 3:
option_type = config_names[2]
# e.g.:
# self.log_format = self.get('logging', 'log_format',
# self.log_format)
settings[setting] = self.get_by_type(
section, option, fallback=settings[setting],
option_type=option_type)
# now check for unknown options
self.check_config(check_options)
def check_config(self, known_options):
""" Check this configuration against a list of known options. Raise an
exception if any unknown options are found.
@param known_options: A dict of sections and options, the key being the
section name and the value a list of option names.
@type known_options: dict
@returns: None
@raises PeekabooConfigException: if any unknown sections or options are
found.
"""
known_sections = known_options.keys()
self.check_sections(known_sections)
# go over sections both allowed and in the config
for section in known_sections:
self.check_section_options(section, known_options[section])
def check_sections(self, known_sections):
""" Check a list of known section names against this configuration
@param known_sections: names of known sections
@type known_sections: list(string)
@returns: None
@raises PeekabooConfigException: if any unknown sections are found in
the configuration.
"""
section_diff = set(self.sections()) - set(known_sections)
if section_diff:
raise PeekabooConfigException(
'Unknown section(s) found in config: %s'
% ', '.join(section_diff))
def check_section_options(self, section, known_options):
""" Check a config section for unknown options.
@param section: name of section to check
@type section: string
@param known_options: list of names of known options to check against
@type known_options: list(string)
@returns: None
@raises PeekabooConfigException: if any unknown options are found. """
try:
section_options = map(
# account for option.1 list syntax
lambda x: x.split('.')[0],
self.options(section))
except configparser.NoSectionError:
# a non-existant section can have no non-allowed options :)
return
option_diff = set(section_options) - set(known_options)
if option_diff:
raise PeekabooConfigException(
'Unknown config option(s) found in section %s: %s'
% (section, ', '.join(option_diff)))
class PeekabooConfig(PeekabooConfigParser):
""" This class represents the Peekaboo configuration. """
def __init__(self, config_file=None, log_level=None):
""" Initialise the configuration with defaults, overwrite with command
line options and finally read the configuration file. """
# hard defaults: The idea here is that every config option has a
# default that would in principle enable Peekaboo to run. Code using
# the option should still cope with no or an empty value being handed
# to it.
self.user = 'peekaboo'
self.group = None
self.host = '127.0.0.1'
self.port = 8100
self.pid_file = None
self.log_level = logging.INFO
self.log_format = '%(asctime)s - %(name)s - (%(threadName)s) - ' \
'%(levelname)s - %(message)s'
self.worker_count = 3
self.processing_info_dir = '/var/lib/peekaboo/malware_reports'
self.report_locale = None
self.db_url = 'sqlite:////var/lib/peekaboo/peekaboo.db'
self.db_async_driver = None
self.db_log_level = logging.WARNING
self.config_file = '/opt/peekaboo/etc/peekaboo.conf'
self.ruleset_config = '/opt/peekaboo/etc/ruleset.conf'
self.analyzer_config = '/opt/peekaboo/etc/analyzers.conf'
self.cluster_instance_id = 0
self.cluster_stale_in_flight_threshold = 15*60
self.cluster_duplicate_check_interval = 60
# section and option names for the configuration file. key is the above
# variable name whose value will be overwritten by the configuration
# file value. Third item can be getter function if special parsing is
# required.
config_options = {
'log_level': ['logging', 'log_level', self.LOG_LEVEL],
'log_format': ['logging', 'log_format'],
'user': ['global', 'user'],
'group': ['global', 'group'],
'pid_file': ['global', 'pid_file'],
'host': ['global', 'host'],
'port': ['global', 'port'],
'worker_count': ['global', 'worker_count'],
'processing_info_dir': ['global', 'processing_info_dir'],
'report_locale': ['global', 'report_locale'],
'db_url': ['db', 'url'],
'db_async_driver': ['db', 'async_driver'],
'db_log_level': ['db', 'log_level', self.LOG_LEVEL],
'ruleset_config': ['ruleset', 'config'],
'analyzer_config': ['analyzers', 'config'],
'cluster_instance_id': ['cluster', 'instance_id'],
'cluster_stale_in_flight_threshold': ['cluster', 'stale_in_flight_threshold'],
'cluster_duplicate_check_interval': ['cluster', 'duplicate_check_interval'],
}
# overrides from outside, e.g. by command line arguments whose values
# are needed while reading the configuration file already (most notably
# log level and path to the config file).
if log_level:
self.log_level = log_level
if config_file:
self.config_file = config_file
# setup default logging to log any errors during the
# parsing of the config file.
self.setup_logging()
# read configuration file. Note that we require a configuration file
# here. We may change that if we decide that we want to allow the user
# to run us with the above defaults only.
super().__init__(self.config_file)
# overwrite above defaults in our member variables via indirect access
self.set_known_options(config_options)
# Update logging with what we just parsed from the config
self.setup_logging()
# here we could overwrite defaults and config file with additional
# command line arguments if required
def setup_logging(self):
""" Setup logging to console by reconfiguring the root logger so that
it affects all loggers everywhere. """
_logger = logging.getLogger()
# Check if we already have a log handler
if _logger.handlers:
# Remove all handlers
for handler in _logger.handlers:
_logger.removeHandler(handler)
# log format
log_formatter = logging.Formatter(self.log_format)
# create console handler and set level to debug
to_console_log_handler = logging.StreamHandler(sys.stdout)
to_console_log_handler.setFormatter(log_formatter)
_logger.addHandler(to_console_log_handler)
_logger.setLevel(self.log_level)
def __str__(self):
settings = {}
for (option, value) in vars(self).items():
if not option.startswith('_'):
settings[option] = value
return '<PeekabooConfig(%s)>' % settings
__repr__ = __str__
class PeekabooAnalyzerConfig(PeekabooConfigParser):
""" This class represents the analyzer configuration. """
def __init__(self, config_file=None):
""" Initialise the configuration with defaults, overwrite with command
line options and finally read the configuration file. """
self.cuckoo_url = 'http://127.0.0.1:8090'
self.cuckoo_api_token = ''
self.cuckoo_poll_interval = 5
self.cuckoo_submit_original_filename = True
self.cuckoo_maximum_job_age = 15*60
self.cortex_url = 'http://127.0.0.1:9001'
self.cortex_tlp = tlp.AMBER
self.cortex_api_token = ''
self.cortex_poll_interval = 5
self.cortex_submit_original_filename = True
self.cortex_maximum_job_age = 15*60
config_options = {
'cuckoo_url': ['cuckoo', 'url'],
'cuckoo_api_token': ['cuckoo', 'api_token'],
'cuckoo_poll_interval': ['cuckoo', 'poll_interval'],
'cuckoo_submit_original_filename': [
'cuckoo', 'submit_original_filename'],
'cuckoo_maximum_job_age': ['cuckoo', 'maximum_job_age'],
'cortex_url': ['cortex', 'url'],
'cortex_tlp': ['cortex', 'tlp'],
'cortex_api_token': ['cortex', 'api_token'],
'cortex_poll_interval': ['cortex', 'poll_interval'],
'cortex_submit_original_filename': [
'cortex', 'submit_original_filename'],
'cortex_maximum_job_age': ['cortex', 'maximum_job_age'],
}
# read configuration file. Note that we require a configuration file
# here. We may change that if we decide that we want to allow the user
# to run us with the above defaults only.
super().__init__(config_file)
# overwrite above defaults in our member variables via indirect access
self.set_known_options(config_options)
def __str__(self):
settings = {}
for (option, value) in vars(self).items():
if not option.startswith('_'):
settings[option] = value
return '<PeekabooConfig(%s)>' % settings
__repr__ = __str__
| scVENUS/PeekabooAV | peekaboo/config.py | Python | gpl-3.0 | 21,417 | [
"Amber"
] | d429aca4c9e80cd74ea6155f364fc5b29e78090fcd135627fe80eca615930812 |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import six
import io
import gzip
import bz2file
from tempfile import gettempdir
import itertools
import requests
from cachecontrol import CacheControl
from cachecontrol.caches import FileCache
from skbio.io import IOSourceError
from ._fileobject import (IterableStringWriterIO, IterableStringReaderIO,
WrappedBufferedRandom)
def get_io_sources():
return (
# The order of these source is significant as they will short-circuit
HTTPSource,
FilePathSource,
BytesIOSource,
BufferedIOSource,
TextIOSource,
IterableSource
)
def _compressors():
return (
GzipCompressor,
BZ2Compressor
)
def get_compression_handler(name):
compressors = {c.name: c for c in _compressors()}
compressors['auto'] = AutoCompressor
return compressors.get(name, False)
class IOSource(object):
closeable = True
def __init__(self, file, options):
self.file = file
self.options = options
def can_read(self):
return False
def can_write(self):
return False
def get_reader(self):
raise NotImplementedError()
def get_writer(self):
raise NotImplementedError()
class Compressor(IOSource):
streamable = True
name = ''
def can_write(self):
return True
class FilePathSource(IOSource):
def can_read(self):
return isinstance(self.file, six.string_types)
def can_write(self):
return self.can_read()
def get_reader(self):
return io.open(self.file, mode='rb')
def get_writer(self):
return io.open(self.file, mode='wb')
class HTTPSource(IOSource):
def can_read(self):
return (
isinstance(self.file, six.string_types) and
requests.compat.urlparse(self.file).scheme in {'http', 'https'})
def get_reader(self):
sess = CacheControl(requests.Session(),
cache=FileCache(gettempdir()))
req = sess.get(self.file)
# if the response is not 200, an exception will be raised
req.raise_for_status()
return io.BufferedReader(io.BytesIO(req.content))
class BytesIOSource(IOSource):
closeable = False
def can_read(self):
return isinstance(self.file, io.BytesIO)
def can_write(self):
return self.can_read()
def get_reader(self):
return WrappedBufferedRandom(self.file)
def get_writer(self):
return self.get_reader()
class BufferedIOSource(IOSource):
closeable = False
def can_read(self):
# `peek` is part of the API we want to guarantee, so we can't just look
# for io.BufferedIOBase. Despite the fact that the C implementation of
# io.BufferedRandom inherits io.BufferedReader/Writer it is not
# reflected in an isinstance check, so we need to check for it manually
return isinstance(self.file, (io.BufferedReader, io.BufferedRandom))
def can_write(self):
return isinstance(self.file, (io.BufferedWriter, io.BufferedRandom))
def get_reader(self):
return self.file
def get_writer(self):
return self.file
class TextIOSource(IOSource):
closeable = False
def can_read(self):
return isinstance(self.file, io.TextIOBase) and self.file.readable()
def can_write(self):
return isinstance(self.file, io.TextIOBase) and self.file.writable()
def get_reader(self):
return self.file
def get_writer(self):
return self.file
class IterableSource(IOSource):
def can_read(self):
if hasattr(self.file, '__iter__'):
iterator = iter(self.file)
head = next(iterator, None)
if head is None:
self.repaired = []
return True
if isinstance(head, six.text_type):
self.repaired = itertools.chain([head], iterator)
return True
else:
# We may have mangled a generator at this point, so just abort
if six.PY2 and isinstance(head, bytes):
raise IOSourceError(
"Could not open source: %r (mode: %r).\n Prepend a "
r"`u` to the strings (e.g. [u'line1\n', u'line2\n'])" %
(self.file, self.options['mode']))
raise IOSourceError(
"Could not open source: %r (mode: %r)" %
(self.file, self.options['mode']))
return False
def can_write(self):
return hasattr(self.file, 'append') and hasattr(self.file, '__iter__')
def get_reader(self):
return IterableStringReaderIO(self.repaired,
newline=self.options['newline'])
def get_writer(self):
return IterableStringWriterIO(self.file,
newline=self.options['newline'])
class GzipCompressor(Compressor):
name = 'gzip'
streamable = True
def can_read(self):
return self.file.peek(2)[:2] == b'\x1f\x8b'
def get_reader(self):
return gzip.GzipFile(fileobj=self.file)
def get_writer(self):
return gzip.GzipFile(fileobj=self.file, mode='wb',
compresslevel=self.options['compresslevel'])
class BZ2Compressor(Compressor):
name = 'bz2'
streamable = False
def can_read(self):
return self.file.peek(3)[:3] == b'BZh'
def get_reader(self):
return bz2file.BZ2File(self.file, mode='rb')
def get_writer(self):
return bz2file.BZ2File(self.file, mode='wb',
compresslevel=self.options['compresslevel'])
class AutoCompressor(Compressor):
streamable = True # We can' write so it doesn't matter
name = 'auto'
def get_reader(self):
for compression_handler in _compressors():
compressor = compression_handler(self.file, self.options)
if compressor.can_read():
return compressor.get_reader()
return self.file
def get_writer(self):
return self.file
| corburn/scikit-bio | skbio/io/_iosources.py | Python | bsd-3-clause | 6,568 | [
"scikit-bio"
] | 82dc61f1ed6385494d3092c3e98ebeb3b511c416b0e55e9d91e6642a878bc291 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
from monty.serialization import loadfn
import warnings
import numpy as np
import multiprocessing
from pymatgen.analysis.pourbaix_diagram import PourbaixDiagram, PourbaixEntry,\
PourbaixPlotter, IonEntry, MultiEntry
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.core.ion import Ion
from pymatgen import SETTINGS
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class PourbaixEntryTest(unittest.TestCase):
_multiprocess_shared_ = True
"""
Test all functions using a fictitious entry
"""
def setUp(self):
# comp = Composition("Mn2O3")
self.solentry = ComputedEntry("Mn2O3", 49)
ion = Ion.from_formula("MnO4-")
self.ionentry = IonEntry(ion, 25)
self.PxIon = PourbaixEntry(self.ionentry)
self.PxSol = PourbaixEntry(self.solentry)
self.PxIon.concentration = 1e-4
def test_pourbaix_entry(self):
self.assertEqual(self.PxIon.entry.energy, 25, "Wrong Energy!")
self.assertEqual(self.PxIon.entry.name,
"MnO4[-]", "Wrong Entry!")
self.assertEqual(self.PxSol.entry.energy, 49, "Wrong Energy!")
self.assertEqual(self.PxSol.entry.name,
"Mn2O3", "Wrong Entry!")
# self.assertEqual(self.PxIon.energy, 25, "Wrong Energy!")
# self.assertEqual(self.PxSol.energy, 49, "Wrong Energy!")
self.assertEqual(self.PxIon.concentration, 1e-4, "Wrong concentration!")
def test_calc_coeff_terms(self):
self.assertEqual(self.PxIon.npH, -8, "Wrong npH!")
self.assertEqual(self.PxIon.nPhi, -7, "Wrong nPhi!")
self.assertEqual(self.PxIon.nH2O, 4, "Wrong nH2O!")
self.assertEqual(self.PxSol.npH, -6, "Wrong npH!")
self.assertEqual(self.PxSol.nPhi, -6, "Wrong nPhi!")
self.assertEqual(self.PxSol.nH2O, 3, "Wrong nH2O!")
def test_to_from_dict(self):
d = self.PxIon.as_dict()
ion_entry = self.PxIon.from_dict(d)
self.assertEqual(ion_entry.entry.name, "MnO4[-]", "Wrong Entry!")
d = self.PxSol.as_dict()
sol_entry = self.PxSol.from_dict(d)
self.assertEqual(sol_entry.name, "Mn2O3(s)", "Wrong Entry!")
self.assertEqual(sol_entry.energy, self.PxSol.energy,
"as_dict and from_dict energies unequal")
def test_energy_functions(self):
# TODO: test these for values
self.PxSol.energy_at_conditions(10, 0)
self.PxSol.energy_at_conditions(np.array([1, 2, 3]), 0)
self.PxSol.energy_at_conditions(10, np.array([1, 2, 3]))
self.PxSol.energy_at_conditions(np.array([1, 2, 3]),
np.array([1, 2, 3]))
def test_multi_entry(self):
# TODO: More robust multientry test
m_entry = MultiEntry([self.PxSol, self.PxIon])
for attr in ['energy', 'composition', 'nPhi']:
self.assertEqual(getattr(m_entry, attr),
getattr(self.PxSol, attr) + getattr(self.PxIon, attr))
# As dict, from dict
m_entry_dict = m_entry.as_dict()
m_entry_new = MultiEntry.from_dict(m_entry_dict)
self.assertEqual(m_entry_new.energy, m_entry.energy)
class PourbaixDiagramTest(unittest.TestCase):
_multiprocess_shared_ = True
@classmethod
def setUpClass(cls):
cls.test_data = loadfn(os.path.join(test_dir, 'pourbaix_test_data.json'))
cls.pbx = PourbaixDiagram(cls.test_data['Zn'], filter_solids=True)
cls.pbx_nofilter = PourbaixDiagram(cls.test_data['Zn'],
filter_solids=False)
def test_pourbaix_diagram(self):
self.assertEqual(set([e.name for e in self.pbx.stable_entries]),
{"ZnO(s)", "Zn[2+]", "ZnHO2[-]", "ZnO2[2-]", "Zn(s)"},
"List of stable entries does not match")
self.assertEqual(set([e.name for e in self.pbx_nofilter.stable_entries]),
{"ZnO(s)", "Zn[2+]", "ZnHO2[-]", "ZnO2[2-]", "Zn(s)",
"ZnO2(s)", "ZnH(s)"},
"List of stable entries for unfiltered pbx does not match")
pbx_lowconc = PourbaixDiagram(self.test_data['Zn'], conc_dict={"Zn": 1e-8},
filter_solids=True)
self.assertEqual(set([e.name for e in pbx_lowconc.stable_entries]),
{"Zn(HO)2(aq)", "Zn[2+]", "ZnHO2[-]", "ZnO2[2-]", "Zn(s)"})
def test_properties(self):
self.assertEqual(len(self.pbx.unstable_entries), 2)
def test_multicomponent(self):
# Assure no ions get filtered at high concentration
ag_n = [e for e in self.test_data['Ag-Te-N']
if not "Te" in e.composition]
highconc = PourbaixDiagram(ag_n, filter_solids=True,
conc_dict={"Ag": 1e-5, "N": 1})
entry_sets = [set(e.entry_id) for e in highconc.stable_entries]
self.assertIn({"mp-124", "ion-17"}, entry_sets)
# Binary system
pd_binary = PourbaixDiagram(self.test_data['Ag-Te'], filter_solids=True,
comp_dict={"Ag": 0.5, "Te": 0.5},
conc_dict={"Ag": 1e-8, "Te": 1e-8})
self.assertEqual(len(pd_binary.stable_entries), 30)
test_entry = pd_binary.find_stable_entry(8, 2)
self.assertTrue("mp-499" in test_entry.entry_id)
# Find a specific multientry to test
self.assertEqual(pd_binary.get_decomposition_energy(test_entry, 8, 2), 0)
self.assertEqual(pd_binary.get_decomposition_energy(
test_entry.entry_list[0], 8, 2), 0)
pd_ternary = PourbaixDiagram(self.test_data['Ag-Te-N'], filter_solids=True)
self.assertEqual(len(pd_ternary.stable_entries), 49)
ag = self.test_data['Ag-Te-N'][30]
self.assertAlmostEqual(pd_ternary.get_decomposition_energy(ag, 2, -1), 0)
self.assertAlmostEqual(pd_ternary.get_decomposition_energy(ag, 10, -2), 0)
# Test invocation of pourbaix diagram from ternary data
new_ternary = PourbaixDiagram(pd_ternary.all_entries)
self.assertEqual(len(new_ternary.stable_entries), 49)
self.assertAlmostEqual(new_ternary.get_decomposition_energy(ag, 2, -1), 0)
self.assertAlmostEqual(new_ternary.get_decomposition_energy(ag, 10, -2), 0)
def test_get_pourbaix_domains(self):
domains = PourbaixDiagram.get_pourbaix_domains(self.test_data['Zn'])
self.assertEqual(len(domains[0]), 7)
def test_get_decomposition(self):
# Test a stable entry to ensure that it's zero in the stable region
entry = self.test_data['Zn'][12] # Should correspond to mp-2133
self.assertAlmostEqual(self.pbx.get_decomposition_energy(entry, 10, 1),
0.0, 5, "Decomposition energy of ZnO is not 0.")
# Test an unstable entry to ensure that it's never zero
entry = self.test_data['Zn'][11]
ph, v = np.meshgrid(np.linspace(0, 14), np.linspace(-2, 4))
result = self.pbx_nofilter.get_decomposition_energy(entry, ph, v)
self.assertTrue((result >= 0).all(),
"Unstable energy has hull energy of 0 or less")
# Test an unstable hydride to ensure HER correction works
self.assertAlmostEqual(self.pbx.get_decomposition_energy(entry, -3, -2),
11.093744395)
# Test a list of pHs
self.pbx.get_decomposition_energy(entry, np.linspace(0, 2, 5), 2)
# Test a list of Vs
self.pbx.get_decomposition_energy(entry, 4, np.linspace(-3, 3, 10))
# Test a set of matching arrays
ph, v = np.meshgrid(np.linspace(0, 14), np.linspace(-3, 3))
self.pbx.get_decomposition_energy(entry, ph, v)
def test_multielement_parallel(self):
# Simple test to ensure that multiprocessing is working
test_entries = self.test_data["Ag-Te-N"]
nproc = multiprocessing.cpu_count()
pbx = PourbaixDiagram(test_entries, filter_solids=True, nproc=nproc)
self.assertEqual(len(pbx.stable_entries), 49)
@unittest.skipIf(not SETTINGS.get("PMG_MAPI_KEY"),
"PMG_MAPI_KEY environment variable not set.")
def test_mpr_pipeline(self):
from pymatgen import MPRester
mpr = MPRester()
data = mpr.get_pourbaix_entries(["Zn"])
pbx = PourbaixDiagram(data, filter_solids=True, conc_dict={"Zn": 1e-8})
pbx.find_stable_entry(10, 0)
data = mpr.get_pourbaix_entries(["Ag", "Te"])
pbx = PourbaixDiagram(data, filter_solids=True,
conc_dict={"Ag": 1e-8, "Te": 1e-8})
self.assertEqual(len(pbx.stable_entries), 30)
test_entry = pbx.find_stable_entry(8, 2)
self.assertAlmostEqual(test_entry.energy, 2.3894017960000009, 3)
# Test custom ions
entries = mpr.get_pourbaix_entries(["Sn", "C", "Na"])
ion = IonEntry(Ion.from_formula("NaO28H80Sn12C24+"), -161.676)
custom_ion_entry = PourbaixEntry(ion, entry_id='my_ion')
pbx = PourbaixDiagram(entries + [custom_ion_entry], filter_solids=True,
comp_dict={"Na": 1, "Sn": 12, "C": 24})
self.assertAlmostEqual(pbx.get_decomposition_energy(custom_ion_entry, 5, 2),
8.31202738629504, 1)
def test_nofilter(self):
entries = self.test_data['Ag-Te']
pbx = PourbaixDiagram(entries)
pbx.get_decomposition_energy(entries[0], 0, 0)
def test_solid_filter(self):
entries = self.test_data['Ag-Te-N']
pbx = PourbaixDiagram(entries, filter_solids=True)
pbx.get_decomposition_energy(entries[0], 0, 0)
def test_serialization(self):
d = self.pbx.as_dict()
new = PourbaixDiagram.from_dict(d)
self.assertEqual(set([e.name for e in new.stable_entries]),
{"ZnO(s)", "Zn[2+]", "ZnHO2[-]", "ZnO2[2-]", "Zn(s)"},
"List of stable entries does not match")
# Test with unprocessed entries included, this should result in the
# previously filtered entries being included
d = self.pbx.as_dict(include_unprocessed_entries=True)
new = PourbaixDiagram.from_dict(d)
self.assertEqual(
set([e.name for e in new.stable_entries]),
{"ZnO(s)", "Zn[2+]", "ZnHO2[-]", "ZnO2[2-]", "Zn(s)", "ZnO2(s)", "ZnH(s)"},
"List of stable entries for unfiltered pbx does not match")
pd_binary = PourbaixDiagram(self.test_data['Ag-Te'], filter_solids=True,
comp_dict={"Ag": 0.5, "Te": 0.5},
conc_dict={"Ag": 1e-8, "Te": 1e-8})
new_binary = PourbaixDiagram.from_dict(pd_binary.as_dict())
self.assertEqual(len(pd_binary.stable_entries),
len(new_binary.stable_entries))
class PourbaixPlotterTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
self.test_data = loadfn(os.path.join(test_dir, "pourbaix_test_data.json"))
self.pd = PourbaixDiagram(self.test_data["Zn"])
self.plotter = PourbaixPlotter(self.pd)
def tearDown(self):
warnings.simplefilter("default")
def test_plot_pourbaix(self):
plotter = PourbaixPlotter(self.pd)
# Default limits
plotter.get_pourbaix_plot()
# Non-standard limits
plotter.get_pourbaix_plot(limits=[[-5, 4], [-2, 2]])
def test_plot_entry_stability(self):
entry = self.pd.all_entries[0]
self.plotter.plot_entry_stability(entry, limits=[[-2, 14], [-3, 3]])
# binary system
pd_binary = PourbaixDiagram(self.test_data['Ag-Te'],
comp_dict = {"Ag": 0.5, "Te": 0.5})
binary_plotter = PourbaixPlotter(pd_binary)
test_entry = pd_binary._unprocessed_entries[0]
plt = binary_plotter.plot_entry_stability(test_entry)
plt.close()
if __name__ == '__main__':
unittest.main()
| dongsenfo/pymatgen | pymatgen/analysis/tests/test_pourbaix_diagram.py | Python | mit | 12,311 | [
"pymatgen"
] | e8e55ed4e1567af4b635e6e27af1c5791893f417df3d114aafb4b68c35e333c0 |
#!/usr/bin/python -OO
import re
r"""
--- Day 9: All in a Single Night ---
Every year, Santa manages to deliver all of his presents in a single night.
This year, however, he has some new locations to visit; his elves have provided him the distances between every pair of locations. He can start and end at any two (different) locations he wants, but he must visit each location exactly once. What is the shortest distance he can travel to achieve this?
For example, given the following distances:
London to Dublin = 464
London to Belfast = 518
Dublin to Belfast = 141
The possible routes are therefore:
Dublin -> London -> Belfast = 982
London -> Dublin -> Belfast = 605
London -> Belfast -> Dublin = 659
Dublin -> Belfast -> London = 659
Belfast -> Dublin -> London = 605
Belfast -> London -> Dublin = 982
The shortest of these is London -> Dublin -> Belfast = 605, and so the answer is 605 in this example.
What is the distance of the shortest route?
"""
f = open('in2.txt', 'r')
prog = re.compile(r"^(\S+)\sto\s(\S+)\s=\s([0-9]+)$", re.UNICODE)
places = dict()
"""
places:
0: (1, 217), (2, 123)...
1: (0, 217)...
2: (0, 123)...
...
"""
"""
for place:
if been_there:
continue
if fullpath:
return, continue
# there are options otherwise
get options
for option:
dig(option) # recurse
"""
def getpaths(path, poisoned = None):
if poisoned == None: poisoned = list([None]) # there must be a first step
padding = "...." * (len(poisoned) - 1) # display depth
if __debug__: print padding + "Examining option %s , and been at %s" % (path, poisoned)
if(path in poisoned and path != None):
if __debug__: print padding + "Been here. Ignoring"
if __debug__: print padding + "---\n"
return # try next place
poisoned.append(path) # mark as processed
if(len(poisoned) >= len(places) + 1):
if __debug__: print "\nPATH COVERING ALL THE PLACES FOUND AND SAVED: %s\n" % poisoned
possiblepaths.append(list(poisoned))
poisoned.pop()
if __debug__: print "--- ---\n"
return # try next place
# no success or total failure, yet => there are options, try them
if(path not in places.keys()):
return # stub place
if __debug__: print padding + "Possible subpaths are: %s" % places[path]
for possible_path in places[path]:
subname = possible_path[0]
getpaths(subname, poisoned) # go deeper
if __debug__: print "--- --- ---\n"
poisoned.pop()
for line in f:
line = line.strip()
if(not line): continue # no empty lines allowed
# store information about out graph connectivity and weights
result = prog.findall(line)[0]
if(result[0] not in places.keys()): places[result[0]] = list()
places[result[0]].append(list([result[1], int(result[2])])) # adding path from one town to another
if(result[1] not in places.keys()): places[result[1]] = list()
places[result[1]].append(list([result[0], int(result[2])])) # and a path back
if __debug__: print "File parsed. Matrix: %s\n\n" % places
# time to recursively solve the puzzle
possiblepaths = list()
for start in places.keys():
getpaths(start)
if __debug__: print "Total paths found: %s\n" % possiblepaths
# now it's time to sum all distances of all the paths. could be done earlier though
distances = list()
for path in possiblepaths:
distance = 0
if __debug__: print "Counting distance of %s" % path
for cnt in range(1, len(path) - 1):
if(path[cnt] not in places.keys()):
continue # stub place
for subcn in places[path[cnt]]:
if(subcn[0] == path[cnt + 1]):
if __debug__: print "Distance from %s to %s is %d" % (path[cnt], path[cnt + 1], subcn[1])
distance += subcn[1]
distances.append(distance)
if __debug__: print "Distance of %s is %d" % (path, distance)
print "Shortest distance is: %d" % min(distances)
print "Longest distance is: %d" % max(distances)
| hermes-jr/adventofcode-in-python | level_09/level_09.py | Python | mit | 3,815 | [
"VisIt"
] | 2c6b32cde83c68021924e95d9eac5427d3e143f38042720702e0047e0a199d03 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.iam.credentials_v1.services.iam_credentials import IAMCredentialsAsyncClient
from google.iam.credentials_v1.services.iam_credentials import IAMCredentialsClient
from google.iam.credentials_v1.services.iam_credentials import transports
from google.iam.credentials_v1.types import common
from google.oauth2 import service_account
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert IAMCredentialsClient._get_default_mtls_endpoint(None) is None
assert IAMCredentialsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert IAMCredentialsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert IAMCredentialsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert IAMCredentialsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert IAMCredentialsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class,transport_name", [
(IAMCredentialsClient, "grpc"),
(IAMCredentialsAsyncClient, "grpc_asyncio"),
])
def test_iam_credentials_client_from_service_account_info(client_class, transport_name):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info, transport=transport_name)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
'iamcredentials.googleapis.com:443'
)
@pytest.mark.parametrize("transport_class,transport_name", [
(transports.IAMCredentialsGrpcTransport, "grpc"),
(transports.IAMCredentialsGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_iam_credentials_client_service_account_always_use_jwt(transport_class, transport_name):
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class,transport_name", [
(IAMCredentialsClient, "grpc"),
(IAMCredentialsAsyncClient, "grpc_asyncio"),
])
def test_iam_credentials_client_from_service_account_file(client_class, transport_name):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
'iamcredentials.googleapis.com:443'
)
def test_iam_credentials_client_get_transport_class():
transport = IAMCredentialsClient.get_transport_class()
available_transports = [
transports.IAMCredentialsGrpcTransport,
]
assert transport in available_transports
transport = IAMCredentialsClient.get_transport_class("grpc")
assert transport == transports.IAMCredentialsGrpcTransport
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(IAMCredentialsClient, transports.IAMCredentialsGrpcTransport, "grpc"),
(IAMCredentialsAsyncClient, transports.IAMCredentialsGrpcAsyncIOTransport, "grpc_asyncio"),
])
@mock.patch.object(IAMCredentialsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IAMCredentialsClient))
@mock.patch.object(IAMCredentialsAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IAMCredentialsAsyncClient))
def test_iam_credentials_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(IAMCredentialsClient, 'get_transport_class') as gtc:
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials()
)
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(IAMCredentialsClient, 'get_transport_class') as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [
(IAMCredentialsClient, transports.IAMCredentialsGrpcTransport, "grpc", "true"),
(IAMCredentialsAsyncClient, transports.IAMCredentialsGrpcAsyncIOTransport, "grpc_asyncio", "true"),
(IAMCredentialsClient, transports.IAMCredentialsGrpcTransport, "grpc", "false"),
(IAMCredentialsAsyncClient, transports.IAMCredentialsGrpcAsyncIOTransport, "grpc_asyncio", "false"),
])
@mock.patch.object(IAMCredentialsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IAMCredentialsClient))
@mock.patch.object(IAMCredentialsAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IAMCredentialsAsyncClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_iam_credentials_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True):
with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [
IAMCredentialsClient, IAMCredentialsAsyncClient
])
@mock.patch.object(IAMCredentialsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IAMCredentialsClient))
@mock.patch.object(IAMCredentialsAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IAMCredentialsAsyncClient))
def test_iam_credentials_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True):
with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(IAMCredentialsClient, transports.IAMCredentialsGrpcTransport, "grpc"),
(IAMCredentialsAsyncClient, transports.IAMCredentialsGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_iam_credentials_client_client_options_scopes(client_class, transport_class, transport_name):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [
(IAMCredentialsClient, transports.IAMCredentialsGrpcTransport, "grpc", grpc_helpers),
(IAMCredentialsAsyncClient, transports.IAMCredentialsGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async),
])
def test_iam_credentials_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers):
# Check the case credentials file is provided.
options = client_options.ClientOptions(
credentials_file="credentials.json"
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_iam_credentials_client_client_options_from_dict():
with mock.patch('google.iam.credentials_v1.services.iam_credentials.transports.IAMCredentialsGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = IAMCredentialsClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [
(IAMCredentialsClient, transports.IAMCredentialsGrpcTransport, "grpc", grpc_helpers),
(IAMCredentialsAsyncClient, transports.IAMCredentialsGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async),
])
def test_iam_credentials_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers):
# Check the case credentials file is provided.
options = client_options.ClientOptions(
credentials_file="credentials.json"
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"iamcredentials.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
scopes=None,
default_host="iamcredentials.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [
common.GenerateAccessTokenRequest,
dict,
])
def test_generate_access_token(request_type, transport: str = 'grpc'):
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.generate_access_token),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = common.GenerateAccessTokenResponse(
access_token='access_token_value',
)
response = client.generate_access_token(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == common.GenerateAccessTokenRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, common.GenerateAccessTokenResponse)
assert response.access_token == 'access_token_value'
def test_generate_access_token_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.generate_access_token),
'__call__') as call:
client.generate_access_token()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == common.GenerateAccessTokenRequest()
@pytest.mark.asyncio
async def test_generate_access_token_async(transport: str = 'grpc_asyncio', request_type=common.GenerateAccessTokenRequest):
client = IAMCredentialsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.generate_access_token),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(common.GenerateAccessTokenResponse(
access_token='access_token_value',
))
response = await client.generate_access_token(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == common.GenerateAccessTokenRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, common.GenerateAccessTokenResponse)
assert response.access_token == 'access_token_value'
@pytest.mark.asyncio
async def test_generate_access_token_async_from_dict():
await test_generate_access_token_async(request_type=dict)
def test_generate_access_token_field_headers():
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = common.GenerateAccessTokenRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.generate_access_token),
'__call__') as call:
call.return_value = common.GenerateAccessTokenResponse()
client.generate_access_token(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_generate_access_token_field_headers_async():
client = IAMCredentialsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = common.GenerateAccessTokenRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.generate_access_token),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(common.GenerateAccessTokenResponse())
await client.generate_access_token(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_generate_access_token_flattened():
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.generate_access_token),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = common.GenerateAccessTokenResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.generate_access_token(
name='name_value',
delegates=['delegates_value'],
scope=['scope_value'],
lifetime=duration_pb2.Duration(seconds=751),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = 'name_value'
assert arg == mock_val
arg = args[0].delegates
mock_val = ['delegates_value']
assert arg == mock_val
arg = args[0].scope
mock_val = ['scope_value']
assert arg == mock_val
assert DurationRule().to_proto(args[0].lifetime) == duration_pb2.Duration(seconds=751)
def test_generate_access_token_flattened_error():
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.generate_access_token(
common.GenerateAccessTokenRequest(),
name='name_value',
delegates=['delegates_value'],
scope=['scope_value'],
lifetime=duration_pb2.Duration(seconds=751),
)
@pytest.mark.asyncio
async def test_generate_access_token_flattened_async():
client = IAMCredentialsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.generate_access_token),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = common.GenerateAccessTokenResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(common.GenerateAccessTokenResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.generate_access_token(
name='name_value',
delegates=['delegates_value'],
scope=['scope_value'],
lifetime=duration_pb2.Duration(seconds=751),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = 'name_value'
assert arg == mock_val
arg = args[0].delegates
mock_val = ['delegates_value']
assert arg == mock_val
arg = args[0].scope
mock_val = ['scope_value']
assert arg == mock_val
assert DurationRule().to_proto(args[0].lifetime) == duration_pb2.Duration(seconds=751)
@pytest.mark.asyncio
async def test_generate_access_token_flattened_error_async():
client = IAMCredentialsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.generate_access_token(
common.GenerateAccessTokenRequest(),
name='name_value',
delegates=['delegates_value'],
scope=['scope_value'],
lifetime=duration_pb2.Duration(seconds=751),
)
@pytest.mark.parametrize("request_type", [
common.GenerateIdTokenRequest,
dict,
])
def test_generate_id_token(request_type, transport: str = 'grpc'):
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.generate_id_token),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = common.GenerateIdTokenResponse(
token='token_value',
)
response = client.generate_id_token(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == common.GenerateIdTokenRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, common.GenerateIdTokenResponse)
assert response.token == 'token_value'
def test_generate_id_token_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.generate_id_token),
'__call__') as call:
client.generate_id_token()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == common.GenerateIdTokenRequest()
@pytest.mark.asyncio
async def test_generate_id_token_async(transport: str = 'grpc_asyncio', request_type=common.GenerateIdTokenRequest):
client = IAMCredentialsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.generate_id_token),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(common.GenerateIdTokenResponse(
token='token_value',
))
response = await client.generate_id_token(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == common.GenerateIdTokenRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, common.GenerateIdTokenResponse)
assert response.token == 'token_value'
@pytest.mark.asyncio
async def test_generate_id_token_async_from_dict():
await test_generate_id_token_async(request_type=dict)
def test_generate_id_token_field_headers():
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = common.GenerateIdTokenRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.generate_id_token),
'__call__') as call:
call.return_value = common.GenerateIdTokenResponse()
client.generate_id_token(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_generate_id_token_field_headers_async():
client = IAMCredentialsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = common.GenerateIdTokenRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.generate_id_token),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(common.GenerateIdTokenResponse())
await client.generate_id_token(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_generate_id_token_flattened():
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.generate_id_token),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = common.GenerateIdTokenResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.generate_id_token(
name='name_value',
delegates=['delegates_value'],
audience='audience_value',
include_email=True,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = 'name_value'
assert arg == mock_val
arg = args[0].delegates
mock_val = ['delegates_value']
assert arg == mock_val
arg = args[0].audience
mock_val = 'audience_value'
assert arg == mock_val
arg = args[0].include_email
mock_val = True
assert arg == mock_val
def test_generate_id_token_flattened_error():
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.generate_id_token(
common.GenerateIdTokenRequest(),
name='name_value',
delegates=['delegates_value'],
audience='audience_value',
include_email=True,
)
@pytest.mark.asyncio
async def test_generate_id_token_flattened_async():
client = IAMCredentialsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.generate_id_token),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = common.GenerateIdTokenResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(common.GenerateIdTokenResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.generate_id_token(
name='name_value',
delegates=['delegates_value'],
audience='audience_value',
include_email=True,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = 'name_value'
assert arg == mock_val
arg = args[0].delegates
mock_val = ['delegates_value']
assert arg == mock_val
arg = args[0].audience
mock_val = 'audience_value'
assert arg == mock_val
arg = args[0].include_email
mock_val = True
assert arg == mock_val
@pytest.mark.asyncio
async def test_generate_id_token_flattened_error_async():
client = IAMCredentialsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.generate_id_token(
common.GenerateIdTokenRequest(),
name='name_value',
delegates=['delegates_value'],
audience='audience_value',
include_email=True,
)
@pytest.mark.parametrize("request_type", [
common.SignBlobRequest,
dict,
])
def test_sign_blob(request_type, transport: str = 'grpc'):
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.sign_blob),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = common.SignBlobResponse(
key_id='key_id_value',
signed_blob=b'signed_blob_blob',
)
response = client.sign_blob(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == common.SignBlobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, common.SignBlobResponse)
assert response.key_id == 'key_id_value'
assert response.signed_blob == b'signed_blob_blob'
def test_sign_blob_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.sign_blob),
'__call__') as call:
client.sign_blob()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == common.SignBlobRequest()
@pytest.mark.asyncio
async def test_sign_blob_async(transport: str = 'grpc_asyncio', request_type=common.SignBlobRequest):
client = IAMCredentialsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.sign_blob),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(common.SignBlobResponse(
key_id='key_id_value',
signed_blob=b'signed_blob_blob',
))
response = await client.sign_blob(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == common.SignBlobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, common.SignBlobResponse)
assert response.key_id == 'key_id_value'
assert response.signed_blob == b'signed_blob_blob'
@pytest.mark.asyncio
async def test_sign_blob_async_from_dict():
await test_sign_blob_async(request_type=dict)
def test_sign_blob_field_headers():
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = common.SignBlobRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.sign_blob),
'__call__') as call:
call.return_value = common.SignBlobResponse()
client.sign_blob(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_sign_blob_field_headers_async():
client = IAMCredentialsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = common.SignBlobRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.sign_blob),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(common.SignBlobResponse())
await client.sign_blob(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_sign_blob_flattened():
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.sign_blob),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = common.SignBlobResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.sign_blob(
name='name_value',
delegates=['delegates_value'],
payload=b'payload_blob',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = 'name_value'
assert arg == mock_val
arg = args[0].delegates
mock_val = ['delegates_value']
assert arg == mock_val
arg = args[0].payload
mock_val = b'payload_blob'
assert arg == mock_val
def test_sign_blob_flattened_error():
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.sign_blob(
common.SignBlobRequest(),
name='name_value',
delegates=['delegates_value'],
payload=b'payload_blob',
)
@pytest.mark.asyncio
async def test_sign_blob_flattened_async():
client = IAMCredentialsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.sign_blob),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = common.SignBlobResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(common.SignBlobResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.sign_blob(
name='name_value',
delegates=['delegates_value'],
payload=b'payload_blob',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = 'name_value'
assert arg == mock_val
arg = args[0].delegates
mock_val = ['delegates_value']
assert arg == mock_val
arg = args[0].payload
mock_val = b'payload_blob'
assert arg == mock_val
@pytest.mark.asyncio
async def test_sign_blob_flattened_error_async():
client = IAMCredentialsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.sign_blob(
common.SignBlobRequest(),
name='name_value',
delegates=['delegates_value'],
payload=b'payload_blob',
)
@pytest.mark.parametrize("request_type", [
common.SignJwtRequest,
dict,
])
def test_sign_jwt(request_type, transport: str = 'grpc'):
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.sign_jwt),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = common.SignJwtResponse(
key_id='key_id_value',
signed_jwt='signed_jwt_value',
)
response = client.sign_jwt(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == common.SignJwtRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, common.SignJwtResponse)
assert response.key_id == 'key_id_value'
assert response.signed_jwt == 'signed_jwt_value'
def test_sign_jwt_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.sign_jwt),
'__call__') as call:
client.sign_jwt()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == common.SignJwtRequest()
@pytest.mark.asyncio
async def test_sign_jwt_async(transport: str = 'grpc_asyncio', request_type=common.SignJwtRequest):
client = IAMCredentialsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.sign_jwt),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(common.SignJwtResponse(
key_id='key_id_value',
signed_jwt='signed_jwt_value',
))
response = await client.sign_jwt(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == common.SignJwtRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, common.SignJwtResponse)
assert response.key_id == 'key_id_value'
assert response.signed_jwt == 'signed_jwt_value'
@pytest.mark.asyncio
async def test_sign_jwt_async_from_dict():
await test_sign_jwt_async(request_type=dict)
def test_sign_jwt_field_headers():
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = common.SignJwtRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.sign_jwt),
'__call__') as call:
call.return_value = common.SignJwtResponse()
client.sign_jwt(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_sign_jwt_field_headers_async():
client = IAMCredentialsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = common.SignJwtRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.sign_jwt),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(common.SignJwtResponse())
await client.sign_jwt(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_sign_jwt_flattened():
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.sign_jwt),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = common.SignJwtResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.sign_jwt(
name='name_value',
delegates=['delegates_value'],
payload='payload_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = 'name_value'
assert arg == mock_val
arg = args[0].delegates
mock_val = ['delegates_value']
assert arg == mock_val
arg = args[0].payload
mock_val = 'payload_value'
assert arg == mock_val
def test_sign_jwt_flattened_error():
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.sign_jwt(
common.SignJwtRequest(),
name='name_value',
delegates=['delegates_value'],
payload='payload_value',
)
@pytest.mark.asyncio
async def test_sign_jwt_flattened_async():
client = IAMCredentialsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.sign_jwt),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = common.SignJwtResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(common.SignJwtResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.sign_jwt(
name='name_value',
delegates=['delegates_value'],
payload='payload_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = 'name_value'
assert arg == mock_val
arg = args[0].delegates
mock_val = ['delegates_value']
assert arg == mock_val
arg = args[0].payload
mock_val = 'payload_value'
assert arg == mock_val
@pytest.mark.asyncio
async def test_sign_jwt_flattened_error_async():
client = IAMCredentialsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.sign_jwt(
common.SignJwtRequest(),
name='name_value',
delegates=['delegates_value'],
payload='payload_value',
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.IAMCredentialsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.IAMCredentialsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = IAMCredentialsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.IAMCredentialsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = IAMCredentialsClient(
client_options=options,
transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = IAMCredentialsClient(
client_options=options,
credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.IAMCredentialsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = IAMCredentialsClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.IAMCredentialsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = IAMCredentialsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.IAMCredentialsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.IAMCredentialsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize("transport_class", [
transports.IAMCredentialsGrpcTransport,
transports.IAMCredentialsGrpcAsyncIOTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.IAMCredentialsGrpcTransport,
)
def test_iam_credentials_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.IAMCredentialsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json"
)
def test_iam_credentials_base_transport():
# Instantiate the base transport.
with mock.patch('google.iam.credentials_v1.services.iam_credentials.transports.IAMCredentialsTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.IAMCredentialsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'generate_access_token',
'generate_id_token',
'sign_blob',
'sign_jwt',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_iam_credentials_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.iam.credentials_v1.services.iam_credentials.transports.IAMCredentialsTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.IAMCredentialsTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json",
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
def test_iam_credentials_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.iam.credentials_v1.services.iam_credentials.transports.IAMCredentialsTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.IAMCredentialsTransport()
adc.assert_called_once()
def test_iam_credentials_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
IAMCredentialsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.IAMCredentialsGrpcTransport,
transports.IAMCredentialsGrpcAsyncIOTransport,
],
)
def test_iam_credentials_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.IAMCredentialsGrpcTransport, grpc_helpers),
(transports.IAMCredentialsGrpcAsyncIOTransport, grpc_helpers_async)
],
)
def test_iam_credentials_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(
quota_project_id="octopus",
scopes=["1", "2"]
)
create_channel.assert_called_with(
"iamcredentials.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
scopes=["1", "2"],
default_host="iamcredentials.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("transport_class", [transports.IAMCredentialsGrpcTransport, transports.IAMCredentialsGrpcAsyncIOTransport])
def test_iam_credentials_grpc_transport_client_cert_source_for_mtls(
transport_class
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert,
private_key=expected_key
)
@pytest.mark.parametrize("transport_name", [
"grpc",
"grpc_asyncio",
])
def test_iam_credentials_host_no_port(transport_name):
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='iamcredentials.googleapis.com'),
transport=transport_name,
)
assert client.transport._host == (
'iamcredentials.googleapis.com:443'
)
@pytest.mark.parametrize("transport_name", [
"grpc",
"grpc_asyncio",
])
def test_iam_credentials_host_with_port(transport_name):
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='iamcredentials.googleapis.com:8000'),
transport=transport_name,
)
assert client.transport._host == (
'iamcredentials.googleapis.com:8000'
)
def test_iam_credentials_grpc_transport_channel():
channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.IAMCredentialsGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_iam_credentials_grpc_asyncio_transport_channel():
channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.IAMCredentialsGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.IAMCredentialsGrpcTransport, transports.IAMCredentialsGrpcAsyncIOTransport])
def test_iam_credentials_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.IAMCredentialsGrpcTransport, transports.IAMCredentialsGrpcAsyncIOTransport])
def test_iam_credentials_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_service_account_path():
project = "squid"
service_account = "clam"
expected = "projects/{project}/serviceAccounts/{service_account}".format(project=project, service_account=service_account, )
actual = IAMCredentialsClient.service_account_path(project, service_account)
assert expected == actual
def test_parse_service_account_path():
expected = {
"project": "whelk",
"service_account": "octopus",
}
path = IAMCredentialsClient.service_account_path(**expected)
# Check that the path construction is reversible.
actual = IAMCredentialsClient.parse_service_account_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = IAMCredentialsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = IAMCredentialsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = IAMCredentialsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder, )
actual = IAMCredentialsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = IAMCredentialsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = IAMCredentialsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization, )
actual = IAMCredentialsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = IAMCredentialsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = IAMCredentialsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project, )
actual = IAMCredentialsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = IAMCredentialsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = IAMCredentialsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = IAMCredentialsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = IAMCredentialsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = IAMCredentialsClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.IAMCredentialsTransport, '_prep_wrapped_messages') as prep:
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.IAMCredentialsTransport, '_prep_wrapped_messages') as prep:
transport_class = IAMCredentialsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = IAMCredentialsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc_asyncio",
)
with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport
)
with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
'grpc',
]
for transport in transports:
client = IAMCredentialsClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize("client_class,transport_class", [
(IAMCredentialsClient, transports.IAMCredentialsGrpcTransport),
(IAMCredentialsAsyncClient, transports.IAMCredentialsGrpcAsyncIOTransport),
])
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| googleapis/gapic-generator-python | tests/integration/goldens/credentials/tests/unit/gapic/credentials_v1/test_iam_credentials.py | Python | apache-2.0 | 82,980 | [
"Octopus"
] | e829456c2a250d226a919eab710baaa05b6662b9d9783b4f500ea790bd874741 |
#!/usr/bin/env python
'''
This file defines a Plotter actor. The plotting uses a Tkinter GUI and
runs in a seperate process.
The "actor" is composed of:
* a decorating actor *New_Process_Actor* which allows a class to run
in a separate process. It has an additional actor:
* Channel2Process which takes objects off the Channel and puts them
on a multiprocessing queue.
* a BasePlotter instance which runs entirely in a seperate process
doing the actual plotting.
Note: There is a process safe Queue in the multiprocessing module. It
MUST be used to communicate between processes.
@author: Brian Thorne
@author: Allan McInnes
'''
from multiprocessing import Process
from multiprocessing import Queue as MQueue
from multiprocessing import Event as MEvent
from scipysim import Actor, Channel, Event, LastEvent
from scipysim.core.actor import DisplayActor
import time
import logging
def target(cls, args, kwargs):
'''This is the function that gets run in a new process'''
run_gui_loop = issubclass(cls, DisplayActor)
if run_gui_loop:
import Tkinter as Tk
import matplotlib
matplotlib.use('TkAgg')
# Create new Tkinter window for the plot
root = Tk.Tk()
root.wm_title('ScipySim')
kwargs['root'] = root
# Create the Actor that we are wrapping
block = cls(**kwargs)
block.start()
if run_gui_loop:
Tk.mainloop()
block.join()
class New_Process_Actor(Actor):
'''Create an Actor in a new process. Connected as usual with scipysim
channels. When this Actor is started, it launches a new process, creates
an instance of the Actor class passed to it in a second thread, and starts
that actor.
'''
def __init__(self, cls, *args, **kwargs):
super(New_Process_Actor, self).__init__()
self.cls = cls
self.args = list(args)
self.kwargs = kwargs
self.mqueue = MQueue()
self.mevent = MEvent()
if 'input_channel' not in kwargs:
kwargs['input_channel'] = self.args[0]
chan = kwargs['input_channel']
kwargs['input_channel'] = self.mqueue
print 'chan: ', chan
self.c2p = Channel2Process(chan, self.mevent, self.mqueue)
self.c2p.start()
def run(self):
self.t = Process(target=target, args=(self.cls, self.args, self.kwargs))
self.t.start()
self.mevent.set() # signal that process is ready to receive
self.c2p.join()
self.t.join()
class Channel2Process(Actor):
'''
Gets objects off a Channel and puts them in a multiprocessing.Queue
This Actor (thread) must be called from the side which has the channel.
'''
def __init__(self, channel, ready_event, queue):
super(Channel2Process, self).__init__()
self.channel = channel
self.queue = queue
self.ready_event = ready_event
self.first_time = True
def process(self):
if self.first_time:
# Make sure the other end of the Queue can receive before trying to send
# (avoids race conditions on the underlying pipe, and Broken Pipe errors)
self.ready_event.wait()
logging.info("Channel2Process done waiting for process")
self.first_time = False
obj = self.channel.get(True)
self.queue.put(obj)
if obj.last:
# Indicate that nothing else from this process will be put in queue
self.queue.close()
# Block on flushing the data to the pipe. Must be called after close
self.queue.join_thread()
self.stop = True
logging.info("Channel2Process finished")
class BasePlotter(DisplayActor):
def __init__(self,
root,
input_channel,
live=True,
refresh_rate=50,
title='ScipySim Plot',
own_fig=True,
xlabel=None,
ylabel=None,
):
'''An Actor that creates a figure, canvas and axis and plots data from
a queue.
@param root : a reference to the main Tk window
@param input_channel : channel for plot data
@param live : True if the plot should perform live updating
@param refresh_rate : rate in updates/second of plot updates
@param title : title of the plot
@param own_fig : True if produces own figure
@param xlabel : x-axis label string
@param ylabel : y-axis label string
'''
super(BasePlotter, self).__init__(input_channel)
# Data arrays
self.x_axis_data = []
self.y_axis_data = []
# Live updating
self.live = live
if live:
assert refresh_rate > 0
self.refresh_rate = refresh_rate
self.min_refresh_time = 1.0 / self.refresh_rate
self.last_update = time.time()
# Doing imports here to keep in local scope (and so its in the correct process)
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg as FigureCanvas
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
# Save a reference to the main Tk window
self.root = root
# Creating the plot
self.fig = Figure()
self.axis = self.fig.add_subplot(1, 1, 1)
self.title = self.axis.set_title(title)
self.line = None
if xlabel is not None: self.axis.set_xlabel(xlabel)
if ylabel is not None: self.axis.set_ylabel(ylabel)
# Instantiate canvas
self.canvas = FigureCanvas(self.fig, master=self.root)
# Pack canvas into root window
self.canvas.get_tk_widget().pack(expand=1)
# Put the graph navigation toolbar in the window
toolbar = NavigationToolbar2TkAgg( self.canvas, self.root )
# We can have our own buttons etc here:
#Tk.Button(master=toolbar, text='Quit', command=sys.exit).pack()
self.canvas.show()
def process(self):
# Grab all values that are currently available
while not self.input_channel.empty():
obj = self.input_channel.get_nowait() # Non-blocking
if obj.last:
self.stop = True
self.input_channel.close()
self.input_channel.join_thread()
self.plot()
logging.info("Plotter finished")
return
self.x_axis_data.append(obj['tag'])
self.y_axis_data.append(obj['value'])
# Update the plot if necessary
if self.live:
self.plot()
# Wait for next update
time.sleep(self.min_refresh_time)
def plot(self):
if not self.line:
if len(self.x_axis_data) > 0:
self.line, = self.axis.plot(self.x_axis_data, self.y_axis_data)
else:
self.line.set_data(self.x_axis_data, self.y_axis_data)
axes = self.line.get_axes()
self.line.recache()
axes.relim()
axes.autoscale_view()
self.canvas.draw()
class BaseStemmer(BasePlotter):
def __init__(self,
root,
input_channel,
live=True,
refresh_rate=50,
title='ScipySim Stem-Plot',
own_fig=True,
xlabel=None,
ylabel=None,
):
'''Actor that creates a figure, canvas and axis, and stem-plots
data from a queue.
@param root : a reference to the main Tk window
@param input_channel : channel for plot data
@param refresh_rate : rate in updates/second of plot updates
@param title : title of the plot
@param own_fig : True if produces own figure
@param xlabel : x-axis label string
@param ylabel : y-axis label string
'''
super(BaseStemmer, self).__init__(root, input_channel, live,
refresh_rate, title, own_fig,
xlabel, ylabel)
self.markerline = None
self.stemlines = None
self.baseline = None
import matplotlib.lines as lines
self.lines = lines
def plot(self):
'''Override base-class plot function to use stemming instead.
Simply using the stem() function works, but is quite slow. The method
we use here provides much faster updating.
'''
if not self.markerline:
if len(self.x_axis_data) > 0:
self.markerline, self.stemlines, self.baseline = \
self.axis.stem(self.x_axis_data, self.y_axis_data)
else:
axes = self.markerline.get_axes()
# The markerline can be built directly from the data
self.markerline.set_data(self.x_axis_data, self.y_axis_data)
self.markerline.recache()
# The baseline should be built from the start and end points of
# the x axis
_ , baseline_y = self.baseline.get_data()
self.baseline.set_data([min(self.x_axis_data),
max(self.x_axis_data)], baseline_y.tolist())
self.baseline.recache()
# Construct new stemlines and add them to the current plot
for x, y in zip(self.x_axis_data, self.y_axis_data)[len(self.stemlines):]:
stemline = self.lines.Line2D([0], [0])
stemline.update_from(self.stemlines[0]) # Copy format
stemline.set_data([x, x], [baseline_y[0], y])
stemline.recache()
axes.add_line(stemline)
self.stemlines.append(stemline)
axes.relim()
axes.autoscale_view()
self.canvas.draw()
class Plotter(Actor):
'''Plot continuous data as a smooth line.'''
def __init__(self, *args, **kwargs):
super(Plotter, self).__init__()
self.npa = New_Process_Actor(BasePlotter, *args, **kwargs)
def run(self):
self.npa.start()
self.npa.join()
class StemPlotter(Actor):
'''Plot discrete data as a sequence of stems.'''
def __init__(self, *args, **kwargs):
super(StemPlotter, self).__init__()
self.npa = New_Process_Actor(BaseStemmer, *args, **kwargs)
def run(self):
self.npa.start()
self.npa.join()
def test_NPA():
data = [Event(i, i**2) for i in xrange( 100 )]
q1 = Channel()
q2 = Channel()
npa1 = New_Process_Actor(BasePlotter, input_channel=q1)
npa2 = New_Process_Actor(BaseStemmer, input_channel=q2)
import time, random
print 'starting other process actor...'
npa1.start()
npa2.start()
time.sleep(random.random() * 0.01)
print 'Adding data to queue.', q1
print 'Adding data to queue.', q2
for d in data:
q1.put(d)
q2.put(d)
time.sleep(0.1)
q1.put(LastEvent())
q2.put(LastEvent())
print 'other calculations keep going...'
npa1.join()
npa2.join()
print 'NPA is done'
if __name__ == "__main__":
print 'testing npa...'
test_NPA()
| mm318/scipysim-nogui | scipysim/actors/display/plotter.py | Python | gpl-3.0 | 11,317 | [
"Brian"
] | f41eb319ce0f6b2e1260d431dfd65ce9a547c4f9e4c571abeda4ebe1de31608b |
from basesynapse import BaseSynapse
import numpy as np
import pycuda.gpuarray as garray
from pycuda.tools import dtype_to_ctype
import pycuda.driver as cuda
from pycuda.compiler import SourceModule
cuda_src = """
__global__ void alpha_synapse(
int num,
%(type)s dt,
int *spike,
int *Pre,
%(type)s *Ar,
%(type)s *Ad,
%(type)s *Gmax,
%(type)s *a0,
%(type)s *a1,
%(type)s *a2,
%(type)s *cond )
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
int tot_threads = gridDim.x * blockDim.x;
int pre;
%(type)s ar,ad,gmax;
%(type)s old_a[3];
%(type)s new_a[3];
for( int i=tid; i<num; i+=tot_threads ){
// copy data from global memory to register
ar = Ar[i];
ad = Ad[i];
pre = Pre[i];
gmax = Gmax[i];
old_a[0] = a0[i];
old_a[1] = a1[i];
old_a[2] = a2[i];
// update the alpha function
new_a[0] = fmax( 0., old_a[0] + dt*old_a[1] );
new_a[1] = old_a[1] + dt*old_a[2];
if( spike[pre] )
new_a[1] += ar*ad;
new_a[2] = -( ar+ad )*old_a[1] - ar*ad*old_a[0];
// copy data from register to the global memory
a0[i] = new_a[0];
a1[i] = new_a[1];
a2[i] = new_a[2];
cond[i] = new_a[0]*gmax;
}
return;
}
"""
class AlphaSynapse(BaseSynapse):
def __init__( self, s_dict, synapse_state, dt, debug=False):
self.debug = debug
self.dt = dt
self.num = len( s_dict['id'] )
self.pre = garray.to_gpu( np.asarray( s_dict['pre'], dtype=np.int32 ))
self.ar = garray.to_gpu( np.asarray( s_dict['ar'], dtype=np.float64 ))
self.ad = garray.to_gpu( np.asarray( s_dict['ad'], dtype=np.float64 ))
self.gmax = garray.to_gpu( np.asarray( s_dict['gmax'], dtype=np.float64 ))
self.a0 = garray.zeros( (self.num,), dtype=np.float64 )
self.a1 = garray.zeros( (self.num,), dtype=np.float64 )
self.a2 = garray.zeros( (self.num,), dtype=np.float64 )
self.cond = synapse_state
self.update = self.get_gpu_kernel()
@property
def synapse_class(self): return int(0)
def update_state(self, buffer, st = None):
self.update.prepared_async_call(
self.gpu_grid,\
self.gpu_block,\
st,\
self.num,\
self.dt,\
buffer.spike_buffer.gpudata,\
self.pre.gpudata,\
self.ar.gpudata,\
self.ad.gpudata,\
self.gmax.gpudata,\
self.a0.gpudata,\
self.a1.gpudata,\
self.a2.gpudata,\
self.cond)
def get_gpu_kernel(self):
self.gpu_block = (128,1,1)
self.gpu_grid = (min( 6*cuda.Context.get_device().MULTIPROCESSOR_COUNT,\
(self.num-1)/self.gpu_block[0] + 1), 1)
# cuda_src = open('./alpha_synapse.cu','r')
mod = SourceModule( \
cuda_src % {"type": dtype_to_ctype(np.float64)},\
options=["--ptxas-options=-v"])
func = mod.get_function("alpha_synapse")
func.prepare( [ np.int32, # syn_num
np.float64, # dt
np.intp, # spike list
np.intp, # pre-synaptic neuron list
np.intp, # ar array
np.intp, # ad array
np.intp, # gmax array
np.intp, # a0 array
np.intp, # a1 array
np.intp, # a2 array
np.intp ] ) # cond array
return func
| AdamRTomkins/libSpineML2NK | libSpineML2NK/synapses/AlphaSynapse.py | Python | gpl-3.0 | 3,671 | [
"NEURON"
] | 9a38112a76b47ee3d8ffc2c4e638df132ff9411edaf196221ee5fdfa6d1bd2d3 |
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Provides objects for building up expressions useful for pattern matching.
"""
from collections.abc import Iterable, Mapping
import operator
import numpy as np
import iris.exceptions
class Constraint:
"""
Constraints are the mechanism by which cubes can be pattern matched and
filtered according to specific criteria.
Once a constraint has been defined, it can be applied to cubes using the
:meth:`Constraint.extract` method.
"""
def __init__(self, name=None, cube_func=None, coord_values=None, **kwargs):
"""
Creates a new instance of a Constraint which can be used for filtering
cube loading or cube list extraction.
Args:
* name: string or None
If a string, it is used as the name to match against the
`~iris.cube.Cube.names` property.
* cube_func: callable or None
If a callable, it must accept a Cube as its first and only argument
and return either True or False.
* coord_values: dict or None
If a dict, it must map coordinate name to the condition on the
associated coordinate.
* `**kwargs`:
The remaining keyword arguments are converted to coordinate
constraints. The name of the argument gives the name of a
coordinate, and the value of the argument is the condition to meet
on that coordinate::
Constraint(model_level_number=10)
Coordinate level constraints can be of several types:
* **string, int or float** - the value of the coordinate to match.
e.g. ``model_level_number=10``
* **list of values** - the possible values that the coordinate may
have to match. e.g. ``model_level_number=[10, 12]``
* **callable** - a function which accepts a
:class:`iris.coords.Cell` instance as its first and only argument
returning True or False if the value of the Cell is desired.
e.g. ``model_level_number=lambda cell: 5 < cell < 10``
The :ref:`user guide <loading_iris_cubes>` covers cube much of
constraining in detail, however an example which uses all of the
features of this class is given here for completeness::
Constraint(name='air_potential_temperature',
cube_func=lambda cube: cube.units == 'kelvin',
coord_values={'latitude':lambda cell: 0 < cell < 90},
model_level_number=[10, 12])
& Constraint(ensemble_member=2)
.. note::
Whilst ``&`` is supported, the ``|`` that might reasonably be expected
is not. This is because each constraint describes a boxlike region, and
thus the intersection of these constraints (obtained with ``&``) will
also describe a boxlike region. Allowing the union of two constraints
(with the ``|`` symbol) would allow the description of a non-boxlike
region. These are difficult to describe with cubes and so it would be
ambiguous what should be extracted.
To generate multiple cubes, each constrained to a different range of
the same coordinate, use :py:func:`iris.load_cubes` or
:py:func:`iris.cube.CubeList.extract_cubes`.
A cube can be constrained to multiple ranges within the same coordinate
using something like the following constraint::
def latitude_bands(cell):
return (0 < cell < 30) or (60 < cell < 90)
Constraint(cube_func=latitude_bands)
Constraint filtering is performed at the cell level.
For further details on how cell comparisons are performed see
:class:`iris.coords.Cell`.
"""
if not (name is None or isinstance(name, str)):
raise TypeError("name must be None or string, got %r" % name)
if not (cube_func is None or callable(cube_func)):
raise TypeError(
"cube_func must be None or callable, got %r" % cube_func
)
if not (coord_values is None or isinstance(coord_values, Mapping)):
raise TypeError(
"coord_values must be None or a "
"collections.Mapping, got %r" % coord_values
)
coord_values = coord_values or {}
duplicate_keys = set(coord_values.keys()) & set(kwargs.keys())
if duplicate_keys:
raise ValueError(
"Duplicate coordinate conditions specified for: "
"%s" % list(duplicate_keys)
)
self._name = name
self._cube_func = cube_func
self._coord_values = coord_values.copy()
self._coord_values.update(kwargs)
self._coord_constraints = []
for coord_name, coord_thing in self._coord_values.items():
self._coord_constraints.append(
_CoordConstraint(coord_name, coord_thing)
)
def __repr__(self):
args = []
if self._name:
args.append(("name", self._name))
if self._cube_func:
args.append(("cube_func", self._cube_func))
if self._coord_values:
args.append(("coord_values", self._coord_values))
return "Constraint(%s)" % ", ".join("%s=%r" % (k, v) for k, v in args)
def _coordless_match(self, cube):
"""
Return whether this constraint matches the given cube when not
taking coordinates into account.
"""
match = True
if self._name:
# Require to also check against cube.name() for the fallback
# "unknown" default case, when there is no name metadata available.
match = self._name in cube._names or self._name == cube.name()
if match and self._cube_func:
match = self._cube_func(cube)
return match
def extract(self, cube):
"""
Return the subset of the given cube which matches this constraint,
else return None.
"""
resultant_CIM = self._CIM_extract(cube)
slice_tuple = resultant_CIM.as_slice()
result = None
if slice_tuple is not None:
# Slicing the cube is an expensive operation.
if all([item == slice(None) for item in slice_tuple]):
# Don't perform a full slice, just return the cube.
result = cube
else:
# Performing the partial slice.
result = cube[slice_tuple]
return result
def _CIM_extract(self, cube):
# Returns _ColumnIndexManager
# Cater for scalar cubes by setting the dimensionality to 1
# when cube.ndim is 0.
resultant_CIM = _ColumnIndexManager(cube.ndim or 1)
if not self._coordless_match(cube):
resultant_CIM.all_false()
else:
for coord_constraint in self._coord_constraints:
resultant_CIM = resultant_CIM & coord_constraint.extract(cube)
return resultant_CIM
def __and__(self, other):
return ConstraintCombination(self, other, operator.__and__)
def __rand__(self, other):
return ConstraintCombination(other, self, operator.__and__)
class ConstraintCombination(Constraint):
"""Represents the binary combination of two Constraint instances."""
def __init__(self, lhs, rhs, operator):
"""
A ConstraintCombination instance is created by providing two
Constraint instances and the appropriate :mod:`operator`.
"""
try:
lhs_constraint = as_constraint(lhs)
rhs_constraint = as_constraint(rhs)
except TypeError:
raise TypeError(
"Can only combine Constraint instances, "
"got: %s and %s" % (type(lhs), type(rhs))
)
self.lhs = lhs_constraint
self.rhs = rhs_constraint
self.operator = operator
def _coordless_match(self, cube):
return self.operator(
self.lhs._coordless_match(cube), self.rhs._coordless_match(cube)
)
def __repr__(self):
return "ConstraintCombination(%r, %r, %r)" % (
self.lhs,
self.rhs,
self.operator,
)
def _CIM_extract(self, cube):
return self.operator(
self.lhs._CIM_extract(cube), self.rhs._CIM_extract(cube)
)
class _CoordConstraint:
"""Represents the atomic elements which might build up a Constraint."""
def __init__(self, coord_name, coord_thing):
"""
Create a coordinate constraint given the coordinate name and a
thing to compare it with.
Arguments:
* coord_name - string
The name of the coordinate to constrain
* coord_thing
The object to compare
"""
self.coord_name = coord_name
self._coord_thing = coord_thing
def __repr__(self):
return "_CoordConstraint(%r, %r)" % (
self.coord_name,
self._coord_thing,
)
def extract(self, cube):
"""
Returns the the column based indices of the given cube which
match the constraint.
"""
from iris.coords import Cell, DimCoord
# Cater for scalar cubes by setting the dimensionality to 1
# when cube.ndim is 0.
cube_cim = _ColumnIndexManager(cube.ndim or 1)
try:
coord = cube.coord(self.coord_name)
except iris.exceptions.CoordinateNotFoundError:
cube_cim.all_false()
return cube_cim
dims = cube.coord_dims(coord)
if len(dims) > 1:
msg = "Cannot apply constraints to multidimensional coordinates"
raise iris.exceptions.CoordinateMultiDimError(msg)
try_quick = False
if callable(self._coord_thing):
call_func = self._coord_thing
elif isinstance(self._coord_thing, Iterable) and not isinstance(
self._coord_thing, (str, Cell)
):
desired_values = list(self._coord_thing)
# A dramatic speedup can be had if we don't have bounds.
if coord.has_bounds():
def call_func(cell):
return cell in desired_values
else:
def call_func(cell):
return cell.point in desired_values
else:
def call_func(c):
return c == self._coord_thing
try_quick = isinstance(coord, DimCoord) and not isinstance(
self._coord_thing, Cell
)
# Simple, yet dramatic, optimisation for the monotonic case.
if try_quick:
try:
i = coord.nearest_neighbour_index(self._coord_thing)
except TypeError:
try_quick = False
if try_quick:
r = np.zeros(coord.shape, dtype=np.bool_)
if coord.cell(i) == self._coord_thing:
r[i] = True
else:
r = np.array([call_func(cell) for cell in coord.cells()])
if dims:
cube_cim[dims[0]] = r
elif not all(r):
cube_cim.all_false()
return cube_cim
class _ColumnIndexManager:
"""
A class to represent column aligned slices which can be operated on
using ``&``, ``|`` or ``^``.
::
# 4 Dimensional slices
import numpy as np
cim = _ColumnIndexManager(4)
cim[1] = np.array([3, 4, 5]) > 3
print(cim.as_slice())
"""
def __init__(self, ndims):
"""
A _ColumnIndexManager is always created to span the given
number of dimensions.
"""
self._column_arrays = [True] * ndims
self.ndims = ndims
def __and__(self, other):
return self._bitwise_operator(other, operator.__and__)
def __or__(self, other):
return self._bitwise_operator(other, operator.__or__)
def __xor__(self, other):
return self._bitwise_operator(other, operator.__xor__)
def _bitwise_operator(self, other, operator):
if not isinstance(other, _ColumnIndexManager):
return NotImplemented
if self.ndims != other.ndims:
raise ValueError(
"Cannot do %s for %r and %r as they have a "
"different number of dimensions." % operator
)
r = _ColumnIndexManager(self.ndims)
# iterate over each dimension an combine appropriately
for i, (lhs, rhs) in enumerate(zip(self, other)):
r[i] = operator(lhs, rhs)
return r
def all_false(self):
"""Turn all slices into False."""
for i in range(self.ndims):
self[i] = False
def __getitem__(self, key):
return self._column_arrays[key]
def __setitem__(self, key, value):
is_vector = isinstance(value, np.ndarray) and value.ndim == 1
if is_vector or isinstance(value, bool):
self._column_arrays[key] = value
else:
raise TypeError(
"Expecting value to be a 1 dimensional numpy array"
", or a boolean. Got %s" % (type(value))
)
def as_slice(self):
"""
Turns a _ColumnIndexManager into a tuple which can be used in an
indexing operation.
If no index is possible, None will be returned.
"""
result = [None] * self.ndims
for dim, dimension_array in enumerate(self):
# If dimension_array has not been set, span the entire dimension
if isinstance(dimension_array, np.ndarray):
where_true = np.where(dimension_array)[0]
# If the array had no True values in it, then the dimension
# is equivalent to False
if len(where_true) == 0:
result = None
break
# If there was exactly one match, the key should be an integer
if where_true.shape == (1,):
result[dim] = where_true[0]
else:
# Finally, we can either provide a slice if possible,
# or a tuple of indices which match. In order to determine
# if we can provide a slice, calculate the deltas between
# the indices and check if they are the same.
delta = np.diff(where_true, axis=0)
# if the diff is consistent we can create a slice object
if all(delta[0] == delta):
result[dim] = slice(
where_true[0], where_true[-1] + 1, delta[0]
)
else:
# otherwise, key is a tuple
result[dim] = tuple(where_true)
# Handle the case where dimension_array is a boolean
elif dimension_array:
result[dim] = slice(None, None)
else:
result = None
break
if result is None:
return result
else:
return tuple(result)
def list_of_constraints(constraints):
"""
Turns the given constraints into a list of valid constraints
using :func:`as_constraint`.
"""
if isinstance(constraints, str) or not isinstance(constraints, Iterable):
constraints = [constraints]
return [as_constraint(constraint) for constraint in constraints]
def as_constraint(thing):
"""
Casts an object into a cube constraint where possible, otherwise
a TypeError will be raised.
If the given object is already a valid constraint then the given object
will be returned, else a TypeError will be raised.
"""
if isinstance(thing, Constraint):
return thing
elif thing is None:
return Constraint()
elif isinstance(thing, str):
return Constraint(thing)
else:
raise TypeError("%r cannot be cast to a constraint." % thing)
class AttributeConstraint(Constraint):
"""Provides a simple Cube-attribute based :class:`Constraint`."""
def __init__(self, **attributes):
"""
Example usage::
iris.AttributeConstraint(STASH='m01s16i004')
iris.AttributeConstraint(
STASH=lambda stash: str(stash).endswith('i005'))
.. note:: Attribute constraint names are case sensitive.
"""
self._attributes = attributes
super().__init__(cube_func=self._cube_func)
def _cube_func(self, cube):
match = True
for name, value in self._attributes.items():
if name in cube.attributes:
cube_attr = cube.attributes.get(name)
# if we have a callable, then call it with the value,
# otherwise, assert equality
if callable(value):
if not value(cube_attr):
match = False
break
else:
if cube_attr != value:
match = False
break
else:
match = False
break
return match
def __repr__(self):
return "AttributeConstraint(%r)" % self._attributes
class NameConstraint(Constraint):
"""Provides a simple Cube name based :class:`Constraint`."""
def __init__(
self,
standard_name="none",
long_name="none",
var_name="none",
STASH="none",
):
"""
Provides a simple Cube name based :class:`Constraint`, which matches
against each of the names provided, which may be either standard name,
long name, NetCDF variable name and/or the STASH from the attributes
dictionary.
The name constraint will only succeed if *all* of the provided names
match.
Kwargs:
* standard_name:
A string or callable representing the standard name to match
against.
* long_name:
A string or callable representing the long name to match against.
* var_name:
A string or callable representing the NetCDF variable name to match
against.
* STASH:
A string or callable representing the UM STASH code to match
against.
.. note::
The default value of each of the keyword arguments is the string
"none", rather than the singleton None, as None may be a legitimate
value to be matched against e.g., to constrain against all cubes
where the standard_name is not set, then use standard_name=None.
Returns:
* Boolean
Example usage::
iris.NameConstraint(long_name='air temp', var_name=None)
iris.NameConstraint(long_name=lambda name: 'temp' in name)
iris.NameConstraint(standard_name='air_temperature',
STASH=lambda stash: stash.item == 203)
"""
self.standard_name = standard_name
self.long_name = long_name
self.var_name = var_name
self.STASH = STASH
self._names = ("standard_name", "long_name", "var_name", "STASH")
super().__init__(cube_func=self._cube_func)
def _cube_func(self, cube):
def matcher(target, value):
if callable(value):
result = False
if target is not None:
#
# Don't pass None through into the callable. Users should
# use the "name=None" pattern instead. Otherwise, users
# will need to explicitly handle the None case, which is
# unnecessary and pretty darn ugly e.g.,
#
# lambda name: name is not None and name.startswith('ick')
#
result = value(target)
else:
result = value == target
return result
match = True
for name in self._names:
expected = getattr(self, name)
if expected != "none":
if name == "STASH":
actual = cube.attributes.get(name)
else:
actual = getattr(cube, name)
match = matcher(actual, expected)
# Make this is a short-circuit match.
if match is False:
break
return match
def __repr__(self):
names = []
for name in self._names:
value = getattr(self, name)
if value != "none":
names.append("{}={!r}".format(name, value))
return "{}({})".format(self.__class__.__name__, ", ".join(names))
| SciTools/iris | lib/iris/_constraints.py | Python | lgpl-3.0 | 21,207 | [
"NetCDF"
] | 3dee11b5a30f4583212d386ed8bebfba65f17ade483d242425a9d194c8afce35 |
"""User-friendly public interface to polynomial functions. """
from __future__ import print_function, division
from sympy.core import (
S, Basic, Expr, I, Integer, Add, Mul, Dummy, Tuple
)
from sympy.core.basic import preorder_traversal
from sympy.core.compatibility import iterable, range, ordered
from sympy.core.decorators import _sympifyit
from sympy.core.function import Derivative
from sympy.core.mul import _keep_coeff
from sympy.core.relational import Relational
from sympy.core.symbol import Symbol
from sympy.core.sympify import sympify
from sympy.logic.boolalg import BooleanAtom
from sympy.polys import polyoptions as options
from sympy.polys.constructor import construct_domain
from sympy.polys.domains import FF, QQ, ZZ
from sympy.polys.fglmtools import matrix_fglm
from sympy.polys.groebnertools import groebner as _groebner
from sympy.polys.monomials import Monomial
from sympy.polys.orderings import monomial_key
from sympy.polys.polyclasses import DMP
from sympy.polys.polyerrors import (
OperationNotSupported, DomainError,
CoercionFailed, UnificationFailed,
GeneratorsNeeded, PolynomialError,
MultivariatePolynomialError,
ExactQuotientFailed,
PolificationFailed,
ComputationFailed,
GeneratorsError,
)
from sympy.polys.polyutils import (
basic_from_dict,
_sort_gens,
_unify_gens,
_dict_reorder,
_dict_from_expr,
_parallel_dict_from_expr,
)
from sympy.polys.rationaltools import together
from sympy.polys.rootisolation import dup_isolate_real_roots_list
from sympy.utilities import group, sift, public, filldedent
# Required to avoid errors
import sympy.polys
import mpmath
from mpmath.libmp.libhyper import NoConvergence
@public
class Poly(Expr):
"""
Generic class for representing and operating on polynomial expressions.
Subclasses Expr class.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
Create a univariate polynomial:
>>> Poly(x*(x**2 + x - 1)**2)
Poly(x**5 + 2*x**4 - x**3 - 2*x**2 + x, x, domain='ZZ')
Create a univariate polynomial with specific domain:
>>> from sympy import sqrt
>>> Poly(x**2 + 2*x + sqrt(3), domain='R')
Poly(1.0*x**2 + 2.0*x + 1.73205080756888, x, domain='RR')
Create a multivariate polynomial:
>>> Poly(y*x**2 + x*y + 1)
Poly(x**2*y + x*y + 1, x, y, domain='ZZ')
Create a univariate polynomial, where y is a constant:
>>> Poly(y*x**2 + x*y + 1,x)
Poly(y*x**2 + y*x + 1, x, domain='ZZ[y]')
You can evaluate the above polynomial as a function of y:
>>> Poly(y*x**2 + x*y + 1,x).eval(2)
6*y + 1
See Also
========
sympy.core.expr.Expr
"""
__slots__ = ['rep', 'gens']
is_commutative = True
is_Poly = True
_op_priority = 10.001
def __new__(cls, rep, *gens, **args):
"""Create a new polynomial instance out of something useful. """
opt = options.build_options(gens, args)
if 'order' in opt:
raise NotImplementedError("'order' keyword is not implemented yet")
if iterable(rep, exclude=str):
if isinstance(rep, dict):
return cls._from_dict(rep, opt)
else:
return cls._from_list(list(rep), opt)
else:
rep = sympify(rep)
if rep.is_Poly:
return cls._from_poly(rep, opt)
else:
return cls._from_expr(rep, opt)
@classmethod
def new(cls, rep, *gens):
"""Construct :class:`Poly` instance from raw representation. """
if not isinstance(rep, DMP):
raise PolynomialError(
"invalid polynomial representation: %s" % rep)
elif rep.lev != len(gens) - 1:
raise PolynomialError("invalid arguments: %s, %s" % (rep, gens))
obj = Basic.__new__(cls)
obj.rep = rep
obj.gens = gens
return obj
@classmethod
def from_dict(cls, rep, *gens, **args):
"""Construct a polynomial from a ``dict``. """
opt = options.build_options(gens, args)
return cls._from_dict(rep, opt)
@classmethod
def from_list(cls, rep, *gens, **args):
"""Construct a polynomial from a ``list``. """
opt = options.build_options(gens, args)
return cls._from_list(rep, opt)
@classmethod
def from_poly(cls, rep, *gens, **args):
"""Construct a polynomial from a polynomial. """
opt = options.build_options(gens, args)
return cls._from_poly(rep, opt)
@classmethod
def from_expr(cls, rep, *gens, **args):
"""Construct a polynomial from an expression. """
opt = options.build_options(gens, args)
return cls._from_expr(rep, opt)
@classmethod
def _from_dict(cls, rep, opt):
"""Construct a polynomial from a ``dict``. """
gens = opt.gens
if not gens:
raise GeneratorsNeeded(
"can't initialize from 'dict' without generators")
level = len(gens) - 1
domain = opt.domain
if domain is None:
domain, rep = construct_domain(rep, opt=opt)
else:
for monom, coeff in rep.items():
rep[monom] = domain.convert(coeff)
return cls.new(DMP.from_dict(rep, level, domain), *gens)
@classmethod
def _from_list(cls, rep, opt):
"""Construct a polynomial from a ``list``. """
gens = opt.gens
if not gens:
raise GeneratorsNeeded(
"can't initialize from 'list' without generators")
elif len(gens) != 1:
raise MultivariatePolynomialError(
"'list' representation not supported")
level = len(gens) - 1
domain = opt.domain
if domain is None:
domain, rep = construct_domain(rep, opt=opt)
else:
rep = list(map(domain.convert, rep))
return cls.new(DMP.from_list(rep, level, domain), *gens)
@classmethod
def _from_poly(cls, rep, opt):
"""Construct a polynomial from a polynomial. """
if cls != rep.__class__:
rep = cls.new(rep.rep, *rep.gens)
gens = opt.gens
field = opt.field
domain = opt.domain
if gens and rep.gens != gens:
if set(rep.gens) != set(gens):
return cls._from_expr(rep.as_expr(), opt)
else:
rep = rep.reorder(*gens)
if 'domain' in opt and domain:
rep = rep.set_domain(domain)
elif field is True:
rep = rep.to_field()
return rep
@classmethod
def _from_expr(cls, rep, opt):
"""Construct a polynomial from an expression. """
rep, opt = _dict_from_expr(rep, opt)
return cls._from_dict(rep, opt)
def _hashable_content(self):
"""Allow SymPy to hash Poly instances. """
return (self.rep, self.gens)
def __hash__(self):
return super(Poly, self).__hash__()
@property
def free_symbols(self):
"""
Free symbols of a polynomial expression.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(x**2 + 1).free_symbols
{x}
>>> Poly(x**2 + y).free_symbols
{x, y}
>>> Poly(x**2 + y, x).free_symbols
{x, y}
>>> Poly(x**2 + y, x, z).free_symbols
{x, y}
"""
symbols = set()
gens = self.gens
for i in range(len(gens)):
for monom in self.monoms():
if monom[i]:
symbols |= gens[i].free_symbols
break
return symbols | self.free_symbols_in_domain
@property
def free_symbols_in_domain(self):
"""
Free symbols of the domain of ``self``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1).free_symbols_in_domain
set()
>>> Poly(x**2 + y).free_symbols_in_domain
set()
>>> Poly(x**2 + y, x).free_symbols_in_domain
{y}
"""
domain, symbols = self.rep.dom, set()
if domain.is_Composite:
for gen in domain.symbols:
symbols |= gen.free_symbols
elif domain.is_EX:
for coeff in self.coeffs():
symbols |= coeff.free_symbols
return symbols
@property
def args(self):
"""
Don't mess up with the core.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).args
(x**2 + 1,)
"""
return (self.as_expr(),)
@property
def gen(self):
"""
Return the principal generator.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).gen
x
"""
return self.gens[0]
@property
def domain(self):
"""Get the ground domain of ``self``. """
return self.get_domain()
@property
def zero(self):
"""Return zero polynomial with ``self``'s properties. """
return self.new(self.rep.zero(self.rep.lev, self.rep.dom), *self.gens)
@property
def one(self):
"""Return one polynomial with ``self``'s properties. """
return self.new(self.rep.one(self.rep.lev, self.rep.dom), *self.gens)
@property
def unit(self):
"""Return unit polynomial with ``self``'s properties. """
return self.new(self.rep.unit(self.rep.lev, self.rep.dom), *self.gens)
def unify(f, g):
"""
Make ``f`` and ``g`` belong to the same domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f, g = Poly(x/2 + 1), Poly(2*x + 1)
>>> f
Poly(1/2*x + 1, x, domain='QQ')
>>> g
Poly(2*x + 1, x, domain='ZZ')
>>> F, G = f.unify(g)
>>> F
Poly(1/2*x + 1, x, domain='QQ')
>>> G
Poly(2*x + 1, x, domain='QQ')
"""
_, per, F, G = f._unify(g)
return per(F), per(G)
def _unify(f, g):
g = sympify(g)
if not g.is_Poly:
try:
return f.rep.dom, f.per, f.rep, f.rep.per(f.rep.dom.from_sympy(g))
except CoercionFailed:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if isinstance(f.rep, DMP) and isinstance(g.rep, DMP):
gens = _unify_gens(f.gens, g.gens)
dom, lev = f.rep.dom.unify(g.rep.dom, gens), len(gens) - 1
if f.gens != gens:
f_monoms, f_coeffs = _dict_reorder(
f.rep.to_dict(), f.gens, gens)
if f.rep.dom != dom:
f_coeffs = [dom.convert(c, f.rep.dom) for c in f_coeffs]
F = DMP(dict(list(zip(f_monoms, f_coeffs))), dom, lev)
else:
F = f.rep.convert(dom)
if g.gens != gens:
g_monoms, g_coeffs = _dict_reorder(
g.rep.to_dict(), g.gens, gens)
if g.rep.dom != dom:
g_coeffs = [dom.convert(c, g.rep.dom) for c in g_coeffs]
G = DMP(dict(list(zip(g_monoms, g_coeffs))), dom, lev)
else:
G = g.rep.convert(dom)
else:
raise UnificationFailed("can't unify %s with %s" % (f, g))
cls = f.__class__
def per(rep, dom=dom, gens=gens, remove=None):
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return dom.to_sympy(rep)
return cls.new(rep, *gens)
return dom, per, F, G
def per(f, rep, gens=None, remove=None):
"""
Create a Poly out of the given representation.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x, y
>>> from sympy.polys.polyclasses import DMP
>>> a = Poly(x**2 + 1)
>>> a.per(DMP([ZZ(1), ZZ(1)], ZZ), gens=[y])
Poly(y + 1, y, domain='ZZ')
"""
if gens is None:
gens = f.gens
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return f.rep.dom.to_sympy(rep)
return f.__class__.new(rep, *gens)
def set_domain(f, domain):
"""Set the ground domain of ``f``. """
opt = options.build_options(f.gens, {'domain': domain})
return f.per(f.rep.convert(opt.domain))
def get_domain(f):
"""Get the ground domain of ``f``. """
return f.rep.dom
def set_modulus(f, modulus):
"""
Set the modulus of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(5*x**2 + 2*x - 1, x).set_modulus(2)
Poly(x**2 + 1, x, modulus=2)
"""
modulus = options.Modulus.preprocess(modulus)
return f.set_domain(FF(modulus))
def get_modulus(f):
"""
Get the modulus of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, modulus=2).get_modulus()
2
"""
domain = f.get_domain()
if domain.is_FiniteField:
return Integer(domain.characteristic())
else:
raise PolynomialError("not a polynomial over a Galois field")
def _eval_subs(f, old, new):
"""Internal implementation of :func:`subs`. """
if old in f.gens:
if new.is_number:
return f.eval(old, new)
else:
try:
return f.replace(old, new)
except PolynomialError:
pass
return f.as_expr().subs(old, new)
def exclude(f):
"""
Remove unnecessary generators from ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import a, b, c, d, x
>>> Poly(a + x, a, b, c, d, x).exclude()
Poly(a + x, a, x, domain='ZZ')
"""
J, new = f.rep.exclude()
gens = []
for j in range(len(f.gens)):
if j not in J:
gens.append(f.gens[j])
return f.per(new, gens=gens)
def replace(f, x, y=None, *_ignore):
# XXX this does not match Basic's signature
"""
Replace ``x`` with ``y`` in generators list.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1, x).replace(x, y)
Poly(y**2 + 1, y, domain='ZZ')
"""
if y is None:
if f.is_univariate:
x, y = f.gen, x
else:
raise PolynomialError(
"syntax supported only in univariate case")
if x == y or x not in f.gens:
return f
if x in f.gens and y not in f.gens:
dom = f.get_domain()
if not dom.is_Composite or y not in dom.symbols:
gens = list(f.gens)
gens[gens.index(x)] = y
return f.per(f.rep, gens=gens)
raise PolynomialError("can't replace %s with %s in %s" % (x, y, f))
def reorder(f, *gens, **args):
"""
Efficiently apply new order of generators.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x*y**2, x, y).reorder(y, x)
Poly(y**2*x + x**2, y, x, domain='ZZ')
"""
opt = options.Options((), args)
if not gens:
gens = _sort_gens(f.gens, opt=opt)
elif set(f.gens) != set(gens):
raise PolynomialError(
"generators list can differ only up to order of elements")
rep = dict(list(zip(*_dict_reorder(f.rep.to_dict(), f.gens, gens))))
return f.per(DMP(rep, f.rep.dom, len(gens) - 1), gens=gens)
def ltrim(f, gen):
"""
Remove dummy generators from ``f`` that are to the left of
specified ``gen`` in the generators as ordered. When ``gen``
is an integer, it refers to the generator located at that
position within the tuple of generators of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(y**2 + y*z**2, x, y, z).ltrim(y)
Poly(y**2 + y*z**2, y, z, domain='ZZ')
>>> Poly(z, x, y, z).ltrim(-1)
Poly(z, z, domain='ZZ')
"""
rep = f.as_dict(native=True)
j = f._gen_to_level(gen)
terms = {}
for monom, coeff in rep.items():
if any(monom[:j]):
# some generator is used in the portion to be trimmed
raise PolynomialError("can't left trim %s" % f)
terms[monom[j:]] = coeff
gens = f.gens[j:]
return f.new(DMP.from_dict(terms, len(gens) - 1, f.rep.dom), *gens)
def has_only_gens(f, *gens):
"""
Return ``True`` if ``Poly(f, *gens)`` retains ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(x*y + 1, x, y, z).has_only_gens(x, y)
True
>>> Poly(x*y + z, x, y, z).has_only_gens(x, y)
False
"""
indices = set()
for gen in gens:
try:
index = f.gens.index(gen)
except ValueError:
raise GeneratorsError(
"%s doesn't have %s as generator" % (f, gen))
else:
indices.add(index)
for monom in f.monoms():
for i, elt in enumerate(monom):
if i not in indices and elt:
return False
return True
def to_ring(f):
"""
Make the ground domain a ring.
Examples
========
>>> from sympy import Poly, QQ
>>> from sympy.abc import x
>>> Poly(x**2 + 1, domain=QQ).to_ring()
Poly(x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'to_ring'):
result = f.rep.to_ring()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_ring')
return f.per(result)
def to_field(f):
"""
Make the ground domain a field.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x, domain=ZZ).to_field()
Poly(x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'to_field'):
result = f.rep.to_field()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_field')
return f.per(result)
def to_exact(f):
"""
Make the ground domain exact.
Examples
========
>>> from sympy import Poly, RR
>>> from sympy.abc import x
>>> Poly(x**2 + 1.0, x, domain=RR).to_exact()
Poly(x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'to_exact'):
result = f.rep.to_exact()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_exact')
return f.per(result)
def retract(f, field=None):
"""
Recalculate the ground domain of a polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**2 + 1, x, domain='QQ[y]')
>>> f
Poly(x**2 + 1, x, domain='QQ[y]')
>>> f.retract()
Poly(x**2 + 1, x, domain='ZZ')
>>> f.retract(field=True)
Poly(x**2 + 1, x, domain='QQ')
"""
dom, rep = construct_domain(f.as_dict(zero=True),
field=field, composite=f.domain.is_Composite or None)
return f.from_dict(rep, f.gens, domain=dom)
def slice(f, x, m, n=None):
"""Take a continuous subsequence of terms of ``f``. """
if n is None:
j, m, n = 0, x, m
else:
j = f._gen_to_level(x)
m, n = int(m), int(n)
if hasattr(f.rep, 'slice'):
result = f.rep.slice(m, n, j)
else: # pragma: no cover
raise OperationNotSupported(f, 'slice')
return f.per(result)
def coeffs(f, order=None):
"""
Returns all non-zero coefficients from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x + 3, x).coeffs()
[1, 2, 3]
See Also
========
all_coeffs
coeff_monomial
nth
"""
return [f.rep.dom.to_sympy(c) for c in f.rep.coeffs(order=order)]
def monoms(f, order=None):
"""
Returns all non-zero monomials from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 + x*y + 3*y, x, y).monoms()
[(2, 0), (1, 2), (1, 1), (0, 1)]
See Also
========
all_monoms
"""
return f.rep.monoms(order=order)
def terms(f, order=None):
"""
Returns all non-zero terms from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 + x*y + 3*y, x, y).terms()
[((2, 0), 1), ((1, 2), 2), ((1, 1), 1), ((0, 1), 3)]
See Also
========
all_terms
"""
return [(m, f.rep.dom.to_sympy(c)) for m, c in f.rep.terms(order=order)]
def all_coeffs(f):
"""
Returns all coefficients from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_coeffs()
[1, 0, 2, -1]
"""
return [f.rep.dom.to_sympy(c) for c in f.rep.all_coeffs()]
def all_monoms(f):
"""
Returns all monomials from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_monoms()
[(3,), (2,), (1,), (0,)]
See Also
========
all_terms
"""
return f.rep.all_monoms()
def all_terms(f):
"""
Returns all terms from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_terms()
[((3,), 1), ((2,), 0), ((1,), 2), ((0,), -1)]
"""
return [(m, f.rep.dom.to_sympy(c)) for m, c in f.rep.all_terms()]
def termwise(f, func, *gens, **args):
"""
Apply a function to all terms of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> def func(k, coeff):
... k = k[0]
... return coeff//10**(2-k)
>>> Poly(x**2 + 20*x + 400).termwise(func)
Poly(x**2 + 2*x + 4, x, domain='ZZ')
"""
terms = {}
for monom, coeff in f.terms():
result = func(monom, coeff)
if isinstance(result, tuple):
monom, coeff = result
else:
coeff = result
if coeff:
if monom not in terms:
terms[monom] = coeff
else:
raise PolynomialError(
"%s monomial was generated twice" % monom)
return f.from_dict(terms, *(gens or f.gens), **args)
def length(f):
"""
Returns the number of non-zero terms in ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 2*x - 1).length()
3
"""
return len(f.as_dict())
def as_dict(f, native=False, zero=False):
"""
Switch to a ``dict`` representation.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 - y, x, y).as_dict()
{(0, 1): -1, (1, 2): 2, (2, 0): 1}
"""
if native:
return f.rep.to_dict(zero=zero)
else:
return f.rep.to_sympy_dict(zero=zero)
def as_list(f, native=False):
"""Switch to a ``list`` representation. """
if native:
return f.rep.to_list()
else:
return f.rep.to_sympy_list()
def as_expr(f, *gens):
"""
Convert a Poly instance to an Expr instance.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2 + 2*x*y**2 - y, x, y)
>>> f.as_expr()
x**2 + 2*x*y**2 - y
>>> f.as_expr({x: 5})
10*y**2 - y + 25
>>> f.as_expr(5, 6)
379
"""
if not gens:
gens = f.gens
elif len(gens) == 1 and isinstance(gens[0], dict):
mapping = gens[0]
gens = list(f.gens)
for gen, value in mapping.items():
try:
index = gens.index(gen)
except ValueError:
raise GeneratorsError(
"%s doesn't have %s as generator" % (f, gen))
else:
gens[index] = value
return basic_from_dict(f.rep.to_sympy_dict(), *gens)
def lift(f):
"""
Convert algebraic coefficients to rationals.
Examples
========
>>> from sympy import Poly, I
>>> from sympy.abc import x
>>> Poly(x**2 + I*x + 1, x, extension=I).lift()
Poly(x**4 + 3*x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'lift'):
result = f.rep.lift()
else: # pragma: no cover
raise OperationNotSupported(f, 'lift')
return f.per(result)
def deflate(f):
"""
Reduce degree of ``f`` by mapping ``x_i**m`` to ``y_i``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**6*y**2 + x**3 + 1, x, y).deflate()
((3, 2), Poly(x**2*y + x + 1, x, y, domain='ZZ'))
"""
if hasattr(f.rep, 'deflate'):
J, result = f.rep.deflate()
else: # pragma: no cover
raise OperationNotSupported(f, 'deflate')
return J, f.per(result)
def inject(f, front=False):
"""
Inject ground domain generators into ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2*y + x*y**3 + x*y + 1, x)
>>> f.inject()
Poly(x**2*y + x*y**3 + x*y + 1, x, y, domain='ZZ')
>>> f.inject(front=True)
Poly(y**3*x + y*x**2 + y*x + 1, y, x, domain='ZZ')
"""
dom = f.rep.dom
if dom.is_Numerical:
return f
elif not dom.is_Poly:
raise DomainError("can't inject generators over %s" % dom)
if hasattr(f.rep, 'inject'):
result = f.rep.inject(front=front)
else: # pragma: no cover
raise OperationNotSupported(f, 'inject')
if front:
gens = dom.symbols + f.gens
else:
gens = f.gens + dom.symbols
return f.new(result, *gens)
def eject(f, *gens):
"""
Eject selected generators into the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2*y + x*y**3 + x*y + 1, x, y)
>>> f.eject(x)
Poly(x*y**3 + (x**2 + x)*y + 1, y, domain='ZZ[x]')
>>> f.eject(y)
Poly(y*x**2 + (y**3 + y)*x + 1, x, domain='ZZ[y]')
"""
dom = f.rep.dom
if not dom.is_Numerical:
raise DomainError("can't eject generators over %s" % dom)
k = len(gens)
if f.gens[:k] == gens:
_gens, front = f.gens[k:], True
elif f.gens[-k:] == gens:
_gens, front = f.gens[:-k], False
else:
raise NotImplementedError(
"can only eject front or back generators")
dom = dom.inject(*gens)
if hasattr(f.rep, 'eject'):
result = f.rep.eject(dom, front=front)
else: # pragma: no cover
raise OperationNotSupported(f, 'eject')
return f.new(result, *_gens)
def terms_gcd(f):
"""
Remove GCD of terms from the polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**6*y**2 + x**3*y, x, y).terms_gcd()
((3, 1), Poly(x**3*y + 1, x, y, domain='ZZ'))
"""
if hasattr(f.rep, 'terms_gcd'):
J, result = f.rep.terms_gcd()
else: # pragma: no cover
raise OperationNotSupported(f, 'terms_gcd')
return J, f.per(result)
def add_ground(f, coeff):
"""
Add an element of the ground domain to ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).add_ground(2)
Poly(x + 3, x, domain='ZZ')
"""
if hasattr(f.rep, 'add_ground'):
result = f.rep.add_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'add_ground')
return f.per(result)
def sub_ground(f, coeff):
"""
Subtract an element of the ground domain from ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).sub_ground(2)
Poly(x - 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'sub_ground'):
result = f.rep.sub_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'sub_ground')
return f.per(result)
def mul_ground(f, coeff):
"""
Multiply ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).mul_ground(2)
Poly(2*x + 2, x, domain='ZZ')
"""
if hasattr(f.rep, 'mul_ground'):
result = f.rep.mul_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'mul_ground')
return f.per(result)
def quo_ground(f, coeff):
"""
Quotient of ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x + 4).quo_ground(2)
Poly(x + 2, x, domain='ZZ')
>>> Poly(2*x + 3).quo_ground(2)
Poly(x + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'quo_ground'):
result = f.rep.quo_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'quo_ground')
return f.per(result)
def exquo_ground(f, coeff):
"""
Exact quotient of ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x + 4).exquo_ground(2)
Poly(x + 2, x, domain='ZZ')
>>> Poly(2*x + 3).exquo_ground(2)
Traceback (most recent call last):
...
ExactQuotientFailed: 2 does not divide 3 in ZZ
"""
if hasattr(f.rep, 'exquo_ground'):
result = f.rep.exquo_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'exquo_ground')
return f.per(result)
def abs(f):
"""
Make all coefficients in ``f`` positive.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).abs()
Poly(x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'abs'):
result = f.rep.abs()
else: # pragma: no cover
raise OperationNotSupported(f, 'abs')
return f.per(result)
def neg(f):
"""
Negate all coefficients in ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).neg()
Poly(-x**2 + 1, x, domain='ZZ')
>>> -Poly(x**2 - 1, x)
Poly(-x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'neg'):
result = f.rep.neg()
else: # pragma: no cover
raise OperationNotSupported(f, 'neg')
return f.per(result)
def add(f, g):
"""
Add two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).add(Poly(x - 2, x))
Poly(x**2 + x - 1, x, domain='ZZ')
>>> Poly(x**2 + 1, x) + Poly(x - 2, x)
Poly(x**2 + x - 1, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.add_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'add'):
result = F.add(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'add')
return per(result)
def sub(f, g):
"""
Subtract two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).sub(Poly(x - 2, x))
Poly(x**2 - x + 3, x, domain='ZZ')
>>> Poly(x**2 + 1, x) - Poly(x - 2, x)
Poly(x**2 - x + 3, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.sub_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'sub'):
result = F.sub(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'sub')
return per(result)
def mul(f, g):
"""
Multiply two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).mul(Poly(x - 2, x))
Poly(x**3 - 2*x**2 + x - 2, x, domain='ZZ')
>>> Poly(x**2 + 1, x)*Poly(x - 2, x)
Poly(x**3 - 2*x**2 + x - 2, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.mul_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'mul'):
result = F.mul(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'mul')
return per(result)
def sqr(f):
"""
Square a polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x - 2, x).sqr()
Poly(x**2 - 4*x + 4, x, domain='ZZ')
>>> Poly(x - 2, x)**2
Poly(x**2 - 4*x + 4, x, domain='ZZ')
"""
if hasattr(f.rep, 'sqr'):
result = f.rep.sqr()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqr')
return f.per(result)
def pow(f, n):
"""
Raise ``f`` to a non-negative power ``n``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x - 2, x).pow(3)
Poly(x**3 - 6*x**2 + 12*x - 8, x, domain='ZZ')
>>> Poly(x - 2, x)**3
Poly(x**3 - 6*x**2 + 12*x - 8, x, domain='ZZ')
"""
n = int(n)
if hasattr(f.rep, 'pow'):
result = f.rep.pow(n)
else: # pragma: no cover
raise OperationNotSupported(f, 'pow')
return f.per(result)
def pdiv(f, g):
"""
Polynomial pseudo-division of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).pdiv(Poly(2*x - 4, x))
(Poly(2*x + 4, x, domain='ZZ'), Poly(20, x, domain='ZZ'))
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pdiv'):
q, r = F.pdiv(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'pdiv')
return per(q), per(r)
def prem(f, g):
"""
Polynomial pseudo-remainder of ``f`` by ``g``.
Caveat: The function prem(f, g, x) can be safely used to compute
in Z[x] _only_ subresultant polynomial remainder sequences (prs's).
To safely compute Euclidean and Sturmian prs's in Z[x]
employ anyone of the corresponding functions found in
the module sympy.polys.subresultants_qq_zz. The functions
in the module with suffix _pg compute prs's in Z[x] employing
rem(f, g, x), whereas the functions with suffix _amv
compute prs's in Z[x] employing rem_z(f, g, x).
The function rem_z(f, g, x) differs from prem(f, g, x) in that
to compute the remainder polynomials in Z[x] it premultiplies
the divident times the absolute value of the leading coefficient
of the divisor raised to the power degree(f, x) - degree(g, x) + 1.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).prem(Poly(2*x - 4, x))
Poly(20, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'prem'):
result = F.prem(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'prem')
return per(result)
def pquo(f, g):
"""
Polynomial pseudo-quotient of ``f`` by ``g``.
See the Caveat note in the function prem(f, g).
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).pquo(Poly(2*x - 4, x))
Poly(2*x + 4, x, domain='ZZ')
>>> Poly(x**2 - 1, x).pquo(Poly(2*x - 2, x))
Poly(2*x + 2, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pquo'):
result = F.pquo(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'pquo')
return per(result)
def pexquo(f, g):
"""
Polynomial exact pseudo-quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).pexquo(Poly(2*x - 2, x))
Poly(2*x + 2, x, domain='ZZ')
>>> Poly(x**2 + 1, x).pexquo(Poly(2*x - 4, x))
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pexquo'):
try:
result = F.pexquo(G)
except ExactQuotientFailed as exc:
raise exc.new(f.as_expr(), g.as_expr())
else: # pragma: no cover
raise OperationNotSupported(f, 'pexquo')
return per(result)
def div(f, g, auto=True):
"""
Polynomial division with remainder of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).div(Poly(2*x - 4, x))
(Poly(1/2*x + 1, x, domain='QQ'), Poly(5, x, domain='QQ'))
>>> Poly(x**2 + 1, x).div(Poly(2*x - 4, x), auto=False)
(Poly(0, x, domain='ZZ'), Poly(x**2 + 1, x, domain='ZZ'))
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.is_Ring and not dom.is_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'div'):
q, r = F.div(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'div')
if retract:
try:
Q, R = q.to_ring(), r.to_ring()
except CoercionFailed:
pass
else:
q, r = Q, R
return per(q), per(r)
def rem(f, g, auto=True):
"""
Computes the polynomial remainder of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).rem(Poly(2*x - 4, x))
Poly(5, x, domain='ZZ')
>>> Poly(x**2 + 1, x).rem(Poly(2*x - 4, x), auto=False)
Poly(x**2 + 1, x, domain='ZZ')
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.is_Ring and not dom.is_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'rem'):
r = F.rem(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'rem')
if retract:
try:
r = r.to_ring()
except CoercionFailed:
pass
return per(r)
def quo(f, g, auto=True):
"""
Computes polynomial quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).quo(Poly(2*x - 4, x))
Poly(1/2*x + 1, x, domain='QQ')
>>> Poly(x**2 - 1, x).quo(Poly(x - 1, x))
Poly(x + 1, x, domain='ZZ')
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.is_Ring and not dom.is_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'quo'):
q = F.quo(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'quo')
if retract:
try:
q = q.to_ring()
except CoercionFailed:
pass
return per(q)
def exquo(f, g, auto=True):
"""
Computes polynomial exact quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).exquo(Poly(x - 1, x))
Poly(x + 1, x, domain='ZZ')
>>> Poly(x**2 + 1, x).exquo(Poly(2*x - 4, x))
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.is_Ring and not dom.is_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'exquo'):
try:
q = F.exquo(G)
except ExactQuotientFailed as exc:
raise exc.new(f.as_expr(), g.as_expr())
else: # pragma: no cover
raise OperationNotSupported(f, 'exquo')
if retract:
try:
q = q.to_ring()
except CoercionFailed:
pass
return per(q)
def _gen_to_level(f, gen):
"""Returns level associated with the given generator. """
if isinstance(gen, int):
length = len(f.gens)
if -length <= gen < length:
if gen < 0:
return length + gen
else:
return gen
else:
raise PolynomialError("-%s <= gen < %s expected, got %s" %
(length, length, gen))
else:
try:
return f.gens.index(sympify(gen))
except ValueError:
raise PolynomialError(
"a valid generator expected, got %s" % gen)
def degree(f, gen=0):
"""
Returns degree of ``f`` in ``x_j``.
The degree of 0 is negative infinity.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).degree()
2
>>> Poly(x**2 + y*x + y, x, y).degree(y)
1
>>> Poly(0, x).degree()
-oo
"""
j = f._gen_to_level(gen)
if hasattr(f.rep, 'degree'):
return f.rep.degree(j)
else: # pragma: no cover
raise OperationNotSupported(f, 'degree')
def degree_list(f):
"""
Returns a list of degrees of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).degree_list()
(2, 1)
"""
if hasattr(f.rep, 'degree_list'):
return f.rep.degree_list()
else: # pragma: no cover
raise OperationNotSupported(f, 'degree_list')
def total_degree(f):
"""
Returns the total degree of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).total_degree()
2
>>> Poly(x + y**5, x, y).total_degree()
5
"""
if hasattr(f.rep, 'total_degree'):
return f.rep.total_degree()
else: # pragma: no cover
raise OperationNotSupported(f, 'total_degree')
def homogenize(f, s):
"""
Returns the homogeneous polynomial of ``f``.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. If you only
want to check if a polynomial is homogeneous, then use
:func:`Poly.is_homogeneous`. If you want not only to check if a
polynomial is homogeneous but also compute its homogeneous order,
then use :func:`Poly.homogeneous_order`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> f = Poly(x**5 + 2*x**2*y**2 + 9*x*y**3)
>>> f.homogenize(z)
Poly(x**5 + 2*x**2*y**2*z + 9*x*y**3*z, x, y, z, domain='ZZ')
"""
if not isinstance(s, Symbol):
raise TypeError("``Symbol`` expected, got %s" % type(s))
if s in f.gens:
i = f.gens.index(s)
gens = f.gens
else:
i = len(f.gens)
gens = f.gens + (s,)
if hasattr(f.rep, 'homogenize'):
return f.per(f.rep.homogenize(i), gens=gens)
raise OperationNotSupported(f, 'homogeneous_order')
def homogeneous_order(f):
"""
Returns the homogeneous order of ``f``.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. This degree is
the homogeneous order of ``f``. If you only want to check if a
polynomial is homogeneous, then use :func:`Poly.is_homogeneous`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**5 + 2*x**3*y**2 + 9*x*y**4)
>>> f.homogeneous_order()
5
"""
if hasattr(f.rep, 'homogeneous_order'):
return f.rep.homogeneous_order()
else: # pragma: no cover
raise OperationNotSupported(f, 'homogeneous_order')
def LC(f, order=None):
"""
Returns the leading coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(4*x**3 + 2*x**2 + 3*x, x).LC()
4
"""
if order is not None:
return f.coeffs(order)[0]
if hasattr(f.rep, 'LC'):
result = f.rep.LC()
else: # pragma: no cover
raise OperationNotSupported(f, 'LC')
return f.rep.dom.to_sympy(result)
def TC(f):
"""
Returns the trailing coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x**2 + 3*x, x).TC()
0
"""
if hasattr(f.rep, 'TC'):
result = f.rep.TC()
else: # pragma: no cover
raise OperationNotSupported(f, 'TC')
return f.rep.dom.to_sympy(result)
def EC(f, order=None):
"""
Returns the last non-zero coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x**2 + 3*x, x).EC()
3
"""
if hasattr(f.rep, 'coeffs'):
return f.coeffs(order)[-1]
else: # pragma: no cover
raise OperationNotSupported(f, 'EC')
def coeff_monomial(f, monom):
"""
Returns the coefficient of ``monom`` in ``f`` if there, else None.
Examples
========
>>> from sympy import Poly, exp
>>> from sympy.abc import x, y
>>> p = Poly(24*x*y*exp(8) + 23*x, x, y)
>>> p.coeff_monomial(x)
23
>>> p.coeff_monomial(y)
0
>>> p.coeff_monomial(x*y)
24*exp(8)
Note that ``Expr.coeff()`` behaves differently, collecting terms
if possible; the Poly must be converted to an Expr to use that
method, however:
>>> p.as_expr().coeff(x)
24*y*exp(8) + 23
>>> p.as_expr().coeff(y)
24*x*exp(8)
>>> p.as_expr().coeff(x*y)
24*exp(8)
See Also
========
nth: more efficient query using exponents of the monomial's generators
"""
return f.nth(*Monomial(monom, f.gens).exponents)
def nth(f, *N):
"""
Returns the ``n``-th coefficient of ``f`` where ``N`` are the
exponents of the generators in the term of interest.
Examples
========
>>> from sympy import Poly, sqrt
>>> from sympy.abc import x, y
>>> Poly(x**3 + 2*x**2 + 3*x, x).nth(2)
2
>>> Poly(x**3 + 2*x*y**2 + y**2, x, y).nth(1, 2)
2
>>> Poly(4*sqrt(x)*y)
Poly(4*y*(sqrt(x)), y, sqrt(x), domain='ZZ')
>>> _.nth(1, 1)
4
See Also
========
coeff_monomial
"""
if hasattr(f.rep, 'nth'):
if len(N) != len(f.gens):
raise ValueError('exponent of each generator must be specified')
result = f.rep.nth(*list(map(int, N)))
else: # pragma: no cover
raise OperationNotSupported(f, 'nth')
return f.rep.dom.to_sympy(result)
def coeff(f, x, n=1, right=False):
# the semantics of coeff_monomial and Expr.coeff are different;
# if someone is working with a Poly, they should be aware of the
# differences and chose the method best suited for the query.
# Alternatively, a pure-polys method could be written here but
# at this time the ``right`` keyword would be ignored because Poly
# doesn't work with non-commutatives.
raise NotImplementedError(
'Either convert to Expr with `as_expr` method '
'to use Expr\'s coeff method or else use the '
'`coeff_monomial` method of Polys.')
def LM(f, order=None):
"""
Returns the leading monomial of ``f``.
The Leading monomial signifies the monomial having
the highest power of the principal generator in the
expression f.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).LM()
x**2*y**0
"""
return Monomial(f.monoms(order)[0], f.gens)
def EM(f, order=None):
"""
Returns the last non-zero monomial of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).EM()
x**0*y**1
"""
return Monomial(f.monoms(order)[-1], f.gens)
def LT(f, order=None):
"""
Returns the leading term of ``f``.
The Leading term signifies the term having
the highest power of the principal generator in the
expression f along with its coefficient.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).LT()
(x**2*y**0, 4)
"""
monom, coeff = f.terms(order)[0]
return Monomial(monom, f.gens), coeff
def ET(f, order=None):
"""
Returns the last non-zero term of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).ET()
(x**0*y**1, 3)
"""
monom, coeff = f.terms(order)[-1]
return Monomial(monom, f.gens), coeff
def max_norm(f):
"""
Returns maximum norm of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(-x**2 + 2*x - 3, x).max_norm()
3
"""
if hasattr(f.rep, 'max_norm'):
result = f.rep.max_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'max_norm')
return f.rep.dom.to_sympy(result)
def l1_norm(f):
"""
Returns l1 norm of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(-x**2 + 2*x - 3, x).l1_norm()
6
"""
if hasattr(f.rep, 'l1_norm'):
result = f.rep.l1_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'l1_norm')
return f.rep.dom.to_sympy(result)
def clear_denoms(self, convert=False):
"""
Clear denominators, but keep the ground domain.
Examples
========
>>> from sympy import Poly, S, QQ
>>> from sympy.abc import x
>>> f = Poly(x/2 + S(1)/3, x, domain=QQ)
>>> f.clear_denoms()
(6, Poly(3*x + 2, x, domain='QQ'))
>>> f.clear_denoms(convert=True)
(6, Poly(3*x + 2, x, domain='ZZ'))
"""
f = self
if not f.rep.dom.is_Field:
return S.One, f
dom = f.get_domain()
if dom.has_assoc_Ring:
dom = f.rep.dom.get_ring()
if hasattr(f.rep, 'clear_denoms'):
coeff, result = f.rep.clear_denoms()
else: # pragma: no cover
raise OperationNotSupported(f, 'clear_denoms')
coeff, f = dom.to_sympy(coeff), f.per(result)
if not convert or not dom.has_assoc_Ring:
return coeff, f
else:
return coeff, f.to_ring()
def rat_clear_denoms(self, g):
"""
Clear denominators in a rational function ``f/g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2/y + 1, x)
>>> g = Poly(x**3 + y, x)
>>> p, q = f.rat_clear_denoms(g)
>>> p
Poly(x**2 + y, x, domain='ZZ[y]')
>>> q
Poly(y*x**3 + y**2, x, domain='ZZ[y]')
"""
f = self
dom, per, f, g = f._unify(g)
f = per(f)
g = per(g)
if not (dom.is_Field and dom.has_assoc_Ring):
return f, g
a, f = f.clear_denoms(convert=True)
b, g = g.clear_denoms(convert=True)
f = f.mul_ground(b)
g = g.mul_ground(a)
return f, g
def integrate(self, *specs, **args):
"""
Computes indefinite integral of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x + 1, x).integrate()
Poly(1/3*x**3 + x**2 + x, x, domain='QQ')
>>> Poly(x*y**2 + x, x, y).integrate((0, 1), (1, 0))
Poly(1/2*x**2*y**2 + 1/2*x**2, x, y, domain='QQ')
"""
f = self
if args.get('auto', True) and f.rep.dom.is_Ring:
f = f.to_field()
if hasattr(f.rep, 'integrate'):
if not specs:
return f.per(f.rep.integrate(m=1))
rep = f.rep
for spec in specs:
if type(spec) is tuple:
gen, m = spec
else:
gen, m = spec, 1
rep = rep.integrate(int(m), f._gen_to_level(gen))
return f.per(rep)
else: # pragma: no cover
raise OperationNotSupported(f, 'integrate')
def diff(f, *specs, **kwargs):
"""
Computes partial derivative of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x + 1, x).diff()
Poly(2*x + 2, x, domain='ZZ')
>>> Poly(x*y**2 + x, x, y).diff((0, 0), (1, 1))
Poly(2*x*y, x, y, domain='ZZ')
"""
if not kwargs.get('evaluate', True):
return Derivative(f, *specs, **kwargs)
if hasattr(f.rep, 'diff'):
if not specs:
return f.per(f.rep.diff(m=1))
rep = f.rep
for spec in specs:
if type(spec) is tuple:
gen, m = spec
else:
gen, m = spec, 1
rep = rep.diff(int(m), f._gen_to_level(gen))
return f.per(rep)
else: # pragma: no cover
raise OperationNotSupported(f, 'diff')
_eval_derivative = diff
def eval(self, x, a=None, auto=True):
"""
Evaluate ``f`` at ``a`` in the given variable.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(x**2 + 2*x + 3, x).eval(2)
11
>>> Poly(2*x*y + 3*x + y + 2, x, y).eval(x, 2)
Poly(5*y + 8, y, domain='ZZ')
>>> f = Poly(2*x*y + 3*x + y + 2*z, x, y, z)
>>> f.eval({x: 2})
Poly(5*y + 2*z + 6, y, z, domain='ZZ')
>>> f.eval({x: 2, y: 5})
Poly(2*z + 31, z, domain='ZZ')
>>> f.eval({x: 2, y: 5, z: 7})
45
>>> f.eval((2, 5))
Poly(2*z + 31, z, domain='ZZ')
>>> f(2, 5)
Poly(2*z + 31, z, domain='ZZ')
"""
f = self
if a is None:
if isinstance(x, dict):
mapping = x
for gen, value in mapping.items():
f = f.eval(gen, value)
return f
elif isinstance(x, (tuple, list)):
values = x
if len(values) > len(f.gens):
raise ValueError("too many values provided")
for gen, value in zip(f.gens, values):
f = f.eval(gen, value)
return f
else:
j, a = 0, x
else:
j = f._gen_to_level(x)
if not hasattr(f.rep, 'eval'): # pragma: no cover
raise OperationNotSupported(f, 'eval')
try:
result = f.rep.eval(a, j)
except CoercionFailed:
if not auto:
raise DomainError("can't evaluate at %s in %s" % (a, f.rep.dom))
else:
a_domain, [a] = construct_domain([a])
new_domain = f.get_domain().unify_with_symbols(a_domain, f.gens)
f = f.set_domain(new_domain)
a = new_domain.convert(a, a_domain)
result = f.rep.eval(a, j)
return f.per(result, remove=j)
def __call__(f, *values):
"""
Evaluate ``f`` at the give values.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> f = Poly(2*x*y + 3*x + y + 2*z, x, y, z)
>>> f(2)
Poly(5*y + 2*z + 6, y, z, domain='ZZ')
>>> f(2, 5)
Poly(2*z + 31, z, domain='ZZ')
>>> f(2, 5, 7)
45
"""
return f.eval(values)
def half_gcdex(f, g, auto=True):
"""
Half extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> Poly(f).half_gcdex(Poly(g))
(Poly(-1/5*x + 3/5, x, domain='QQ'), Poly(x + 1, x, domain='QQ'))
"""
dom, per, F, G = f._unify(g)
if auto and dom.is_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'half_gcdex'):
s, h = F.half_gcdex(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'half_gcdex')
return per(s), per(h)
def gcdex(f, g, auto=True):
"""
Extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> Poly(f).gcdex(Poly(g))
(Poly(-1/5*x + 3/5, x, domain='QQ'),
Poly(1/5*x**2 - 6/5*x + 2, x, domain='QQ'),
Poly(x + 1, x, domain='QQ'))
"""
dom, per, F, G = f._unify(g)
if auto and dom.is_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'gcdex'):
s, t, h = F.gcdex(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'gcdex')
return per(s), per(t), per(h)
def invert(f, g, auto=True):
"""
Invert ``f`` modulo ``g`` when possible.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).invert(Poly(2*x - 1, x))
Poly(-4/3, x, domain='QQ')
>>> Poly(x**2 - 1, x).invert(Poly(x - 1, x))
Traceback (most recent call last):
...
NotInvertible: zero divisor
"""
dom, per, F, G = f._unify(g)
if auto and dom.is_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'invert'):
result = F.invert(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'invert')
return per(result)
def revert(f, n):
"""
Compute ``f**(-1)`` mod ``x**n``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(1, x).revert(2)
Poly(1, x, domain='ZZ')
>>> Poly(1 + x, x).revert(1)
Poly(1, x, domain='ZZ')
>>> Poly(x**2 - 1, x).revert(1)
Traceback (most recent call last):
...
NotReversible: only unity is reversible in a ring
>>> Poly(1/x, x).revert(1)
Traceback (most recent call last):
...
PolynomialError: 1/x contains an element of the generators set
"""
if hasattr(f.rep, 'revert'):
result = f.rep.revert(int(n))
else: # pragma: no cover
raise OperationNotSupported(f, 'revert')
return f.per(result)
def subresultants(f, g):
"""
Computes the subresultant PRS of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).subresultants(Poly(x**2 - 1, x))
[Poly(x**2 + 1, x, domain='ZZ'),
Poly(x**2 - 1, x, domain='ZZ'),
Poly(-2, x, domain='ZZ')]
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'subresultants'):
result = F.subresultants(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'subresultants')
return list(map(per, result))
def resultant(f, g, includePRS=False):
"""
Computes the resultant of ``f`` and ``g`` via PRS.
If includePRS=True, it includes the subresultant PRS in the result.
Because the PRS is used to calculate the resultant, this is more
efficient than calling :func:`subresultants` separately.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**2 + 1, x)
>>> f.resultant(Poly(x**2 - 1, x))
4
>>> f.resultant(Poly(x**2 - 1, x), includePRS=True)
(4, [Poly(x**2 + 1, x, domain='ZZ'), Poly(x**2 - 1, x, domain='ZZ'),
Poly(-2, x, domain='ZZ')])
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'resultant'):
if includePRS:
result, R = F.resultant(G, includePRS=includePRS)
else:
result = F.resultant(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'resultant')
if includePRS:
return (per(result, remove=0), list(map(per, R)))
return per(result, remove=0)
def discriminant(f):
"""
Computes the discriminant of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 2*x + 3, x).discriminant()
-8
"""
if hasattr(f.rep, 'discriminant'):
result = f.rep.discriminant()
else: # pragma: no cover
raise OperationNotSupported(f, 'discriminant')
return f.per(result, remove=0)
def dispersionset(f, g=None):
r"""Compute the *dispersion set* of two polynomials.
For two polynomials `f(x)` and `g(x)` with `\deg f > 0`
and `\deg g > 0` the dispersion set `\operatorname{J}(f, g)` is defined as:
.. math::
\operatorname{J}(f, g)
& := \{a \in \mathbb{N}_0 | \gcd(f(x), g(x+a)) \neq 1\} \\
& = \{a \in \mathbb{N}_0 | \deg \gcd(f(x), g(x+a)) \geq 1\}
For a single polynomial one defines `\operatorname{J}(f) := \operatorname{J}(f, f)`.
Examples
========
>>> from sympy import poly
>>> from sympy.polys.dispersion import dispersion, dispersionset
>>> from sympy.abc import x
Dispersion set and dispersion of a simple polynomial:
>>> fp = poly((x - 3)*(x + 3), x)
>>> sorted(dispersionset(fp))
[0, 6]
>>> dispersion(fp)
6
Note that the definition of the dispersion is not symmetric:
>>> fp = poly(x**4 - 3*x**2 + 1, x)
>>> gp = fp.shift(-3)
>>> sorted(dispersionset(fp, gp))
[2, 3, 4]
>>> dispersion(fp, gp)
4
>>> sorted(dispersionset(gp, fp))
[]
>>> dispersion(gp, fp)
-oo
Computing the dispersion also works over field extensions:
>>> from sympy import sqrt
>>> fp = poly(x**2 + sqrt(5)*x - 1, x, domain='QQ<sqrt(5)>')
>>> gp = poly(x**2 + (2 + sqrt(5))*x + sqrt(5), x, domain='QQ<sqrt(5)>')
>>> sorted(dispersionset(fp, gp))
[2]
>>> sorted(dispersionset(gp, fp))
[1, 4]
We can even perform the computations for polynomials
having symbolic coefficients:
>>> from sympy.abc import a
>>> fp = poly(4*x**4 + (4*a + 8)*x**3 + (a**2 + 6*a + 4)*x**2 + (a**2 + 2*a)*x, x)
>>> sorted(dispersionset(fp))
[0, 1]
See Also
========
dispersion
References
==========
1. [ManWright94]_
2. [Koepf98]_
3. [Abramov71]_
4. [Man93]_
"""
from sympy.polys.dispersion import dispersionset
return dispersionset(f, g)
def dispersion(f, g=None):
r"""Compute the *dispersion* of polynomials.
For two polynomials `f(x)` and `g(x)` with `\deg f > 0`
and `\deg g > 0` the dispersion `\operatorname{dis}(f, g)` is defined as:
.. math::
\operatorname{dis}(f, g)
& := \max\{ J(f,g) \cup \{0\} \} \\
& = \max\{ \{a \in \mathbb{N} | \gcd(f(x), g(x+a)) \neq 1\} \cup \{0\} \}
and for a single polynomial `\operatorname{dis}(f) := \operatorname{dis}(f, f)`.
Examples
========
>>> from sympy import poly
>>> from sympy.polys.dispersion import dispersion, dispersionset
>>> from sympy.abc import x
Dispersion set and dispersion of a simple polynomial:
>>> fp = poly((x - 3)*(x + 3), x)
>>> sorted(dispersionset(fp))
[0, 6]
>>> dispersion(fp)
6
Note that the definition of the dispersion is not symmetric:
>>> fp = poly(x**4 - 3*x**2 + 1, x)
>>> gp = fp.shift(-3)
>>> sorted(dispersionset(fp, gp))
[2, 3, 4]
>>> dispersion(fp, gp)
4
>>> sorted(dispersionset(gp, fp))
[]
>>> dispersion(gp, fp)
-oo
Computing the dispersion also works over field extensions:
>>> from sympy import sqrt
>>> fp = poly(x**2 + sqrt(5)*x - 1, x, domain='QQ<sqrt(5)>')
>>> gp = poly(x**2 + (2 + sqrt(5))*x + sqrt(5), x, domain='QQ<sqrt(5)>')
>>> sorted(dispersionset(fp, gp))
[2]
>>> sorted(dispersionset(gp, fp))
[1, 4]
We can even perform the computations for polynomials
having symbolic coefficients:
>>> from sympy.abc import a
>>> fp = poly(4*x**4 + (4*a + 8)*x**3 + (a**2 + 6*a + 4)*x**2 + (a**2 + 2*a)*x, x)
>>> sorted(dispersionset(fp))
[0, 1]
See Also
========
dispersionset
References
==========
1. [ManWright94]_
2. [Koepf98]_
3. [Abramov71]_
4. [Man93]_
"""
from sympy.polys.dispersion import dispersion
return dispersion(f, g)
def cofactors(f, g):
"""
Returns the GCD of ``f`` and ``g`` and their cofactors.
Returns polynomials ``(h, cff, cfg)`` such that ``h = gcd(f, g)``, and
``cff = quo(f, h)`` and ``cfg = quo(g, h)`` are, so called, cofactors
of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).cofactors(Poly(x**2 - 3*x + 2, x))
(Poly(x - 1, x, domain='ZZ'),
Poly(x + 1, x, domain='ZZ'),
Poly(x - 2, x, domain='ZZ'))
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'cofactors'):
h, cff, cfg = F.cofactors(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'cofactors')
return per(h), per(cff), per(cfg)
def gcd(f, g):
"""
Returns the polynomial GCD of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).gcd(Poly(x**2 - 3*x + 2, x))
Poly(x - 1, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'gcd'):
result = F.gcd(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'gcd')
return per(result)
def lcm(f, g):
"""
Returns polynomial LCM of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).lcm(Poly(x**2 - 3*x + 2, x))
Poly(x**3 - 2*x**2 - x + 2, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'lcm'):
result = F.lcm(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'lcm')
return per(result)
def trunc(f, p):
"""
Reduce ``f`` modulo a constant ``p``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 + 3*x**2 + 5*x + 7, x).trunc(3)
Poly(-x**3 - x + 1, x, domain='ZZ')
"""
p = f.rep.dom.convert(p)
if hasattr(f.rep, 'trunc'):
result = f.rep.trunc(p)
else: # pragma: no cover
raise OperationNotSupported(f, 'trunc')
return f.per(result)
def monic(self, auto=True):
"""
Divides all coefficients by ``LC(f)``.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x
>>> Poly(3*x**2 + 6*x + 9, x, domain=ZZ).monic()
Poly(x**2 + 2*x + 3, x, domain='QQ')
>>> Poly(3*x**2 + 4*x + 2, x, domain=ZZ).monic()
Poly(x**2 + 4/3*x + 2/3, x, domain='QQ')
"""
f = self
if auto and f.rep.dom.is_Ring:
f = f.to_field()
if hasattr(f.rep, 'monic'):
result = f.rep.monic()
else: # pragma: no cover
raise OperationNotSupported(f, 'monic')
return f.per(result)
def content(f):
"""
Returns the GCD of polynomial coefficients.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(6*x**2 + 8*x + 12, x).content()
2
"""
if hasattr(f.rep, 'content'):
result = f.rep.content()
else: # pragma: no cover
raise OperationNotSupported(f, 'content')
return f.rep.dom.to_sympy(result)
def primitive(f):
"""
Returns the content and a primitive form of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 + 8*x + 12, x).primitive()
(2, Poly(x**2 + 4*x + 6, x, domain='ZZ'))
"""
if hasattr(f.rep, 'primitive'):
cont, result = f.rep.primitive()
else: # pragma: no cover
raise OperationNotSupported(f, 'primitive')
return f.rep.dom.to_sympy(cont), f.per(result)
def compose(f, g):
"""
Computes the functional composition of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + x, x).compose(Poly(x - 1, x))
Poly(x**2 - x, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'compose'):
result = F.compose(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'compose')
return per(result)
def decompose(f):
"""
Computes a functional decomposition of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**4 + 2*x**3 - x - 1, x, domain='ZZ').decompose()
[Poly(x**2 - x - 1, x, domain='ZZ'), Poly(x**2 + x, x, domain='ZZ')]
"""
if hasattr(f.rep, 'decompose'):
result = f.rep.decompose()
else: # pragma: no cover
raise OperationNotSupported(f, 'decompose')
return list(map(f.per, result))
def shift(f, a):
"""
Efficiently compute Taylor shift ``f(x + a)``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 2*x + 1, x).shift(2)
Poly(x**2 + 2*x + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'shift'):
result = f.rep.shift(a)
else: # pragma: no cover
raise OperationNotSupported(f, 'shift')
return f.per(result)
def transform(f, p, q):
"""
Efficiently evaluate the functional transformation ``q**n * f(p/q)``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 2*x + 1, x).transform(Poly(x + 1, x), Poly(x - 1, x))
Poly(4, x, domain='ZZ')
"""
P, Q = p.unify(q)
F, P = f.unify(P)
F, Q = F.unify(Q)
if hasattr(F.rep, 'transform'):
result = F.rep.transform(P.rep, Q.rep)
else: # pragma: no cover
raise OperationNotSupported(F, 'transform')
return F.per(result)
def sturm(self, auto=True):
"""
Computes the Sturm sequence of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 - 2*x**2 + x - 3, x).sturm()
[Poly(x**3 - 2*x**2 + x - 3, x, domain='QQ'),
Poly(3*x**2 - 4*x + 1, x, domain='QQ'),
Poly(2/9*x + 25/9, x, domain='QQ'),
Poly(-2079/4, x, domain='QQ')]
"""
f = self
if auto and f.rep.dom.is_Ring:
f = f.to_field()
if hasattr(f.rep, 'sturm'):
result = f.rep.sturm()
else: # pragma: no cover
raise OperationNotSupported(f, 'sturm')
return list(map(f.per, result))
def gff_list(f):
"""
Computes greatest factorial factorization of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**5 + 2*x**4 - x**3 - 2*x**2
>>> Poly(f).gff_list()
[(Poly(x, x, domain='ZZ'), 1), (Poly(x + 2, x, domain='ZZ'), 4)]
"""
if hasattr(f.rep, 'gff_list'):
result = f.rep.gff_list()
else: # pragma: no cover
raise OperationNotSupported(f, 'gff_list')
return [(f.per(g), k) for g, k in result]
def norm(f):
"""
Computes the product, ``Norm(f)``, of the conjugates of
a polynomial ``f`` defined over a number field ``K``.
Examples
========
>>> from sympy import Poly, sqrt
>>> from sympy.abc import x
>>> a, b = sqrt(2), sqrt(3)
A polynomial over a quadratic extension.
Two conjugates x - a and x + a.
>>> f = Poly(x - a, x, extension=a)
>>> f.norm()
Poly(x**2 - 2, x, domain='QQ')
A polynomial over a quartic extension.
Four conjugates x - a, x - a, x + a and x + a.
>>> f = Poly(x - a, x, extension=(a, b))
>>> f.norm()
Poly(x**4 - 4*x**2 + 4, x, domain='QQ')
"""
if hasattr(f.rep, 'norm'):
r = f.rep.norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'norm')
return f.per(r)
def sqf_norm(f):
"""
Computes square-free norm of ``f``.
Returns ``s``, ``f``, ``r``, such that ``g(x) = f(x-sa)`` and
``r(x) = Norm(g(x))`` is a square-free polynomial over ``K``,
where ``a`` is the algebraic extension of the ground domain.
Examples
========
>>> from sympy import Poly, sqrt
>>> from sympy.abc import x
>>> s, f, r = Poly(x**2 + 1, x, extension=[sqrt(3)]).sqf_norm()
>>> s
1
>>> f
Poly(x**2 - 2*sqrt(3)*x + 4, x, domain='QQ<sqrt(3)>')
>>> r
Poly(x**4 - 4*x**2 + 16, x, domain='QQ')
"""
if hasattr(f.rep, 'sqf_norm'):
s, g, r = f.rep.sqf_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_norm')
return s, f.per(g), f.per(r)
def sqf_part(f):
"""
Computes square-free part of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 - 3*x - 2, x).sqf_part()
Poly(x**2 - x - 2, x, domain='ZZ')
"""
if hasattr(f.rep, 'sqf_part'):
result = f.rep.sqf_part()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_part')
return f.per(result)
def sqf_list(f, all=False):
"""
Returns a list of square-free factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = 2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16
>>> Poly(f).sqf_list()
(2, [(Poly(x + 1, x, domain='ZZ'), 2),
(Poly(x + 2, x, domain='ZZ'), 3)])
>>> Poly(f).sqf_list(all=True)
(2, [(Poly(1, x, domain='ZZ'), 1),
(Poly(x + 1, x, domain='ZZ'), 2),
(Poly(x + 2, x, domain='ZZ'), 3)])
"""
if hasattr(f.rep, 'sqf_list'):
coeff, factors = f.rep.sqf_list(all)
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_list')
return f.rep.dom.to_sympy(coeff), [(f.per(g), k) for g, k in factors]
def sqf_list_include(f, all=False):
"""
Returns a list of square-free factors of ``f``.
Examples
========
>>> from sympy import Poly, expand
>>> from sympy.abc import x
>>> f = expand(2*(x + 1)**3*x**4)
>>> f
2*x**7 + 6*x**6 + 6*x**5 + 2*x**4
>>> Poly(f).sqf_list_include()
[(Poly(2, x, domain='ZZ'), 1),
(Poly(x + 1, x, domain='ZZ'), 3),
(Poly(x, x, domain='ZZ'), 4)]
>>> Poly(f).sqf_list_include(all=True)
[(Poly(2, x, domain='ZZ'), 1),
(Poly(1, x, domain='ZZ'), 2),
(Poly(x + 1, x, domain='ZZ'), 3),
(Poly(x, x, domain='ZZ'), 4)]
"""
if hasattr(f.rep, 'sqf_list_include'):
factors = f.rep.sqf_list_include(all)
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_list_include')
return [(f.per(g), k) for g, k in factors]
def factor_list(f):
"""
Returns a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = 2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y
>>> Poly(f).factor_list()
(2, [(Poly(x + y, x, y, domain='ZZ'), 1),
(Poly(x**2 + 1, x, y, domain='ZZ'), 2)])
"""
if hasattr(f.rep, 'factor_list'):
try:
coeff, factors = f.rep.factor_list()
except DomainError:
return S.One, [(f, 1)]
else: # pragma: no cover
raise OperationNotSupported(f, 'factor_list')
return f.rep.dom.to_sympy(coeff), [(f.per(g), k) for g, k in factors]
def factor_list_include(f):
"""
Returns a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = 2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y
>>> Poly(f).factor_list_include()
[(Poly(2*x + 2*y, x, y, domain='ZZ'), 1),
(Poly(x**2 + 1, x, y, domain='ZZ'), 2)]
"""
if hasattr(f.rep, 'factor_list_include'):
try:
factors = f.rep.factor_list_include()
except DomainError:
return [(f, 1)]
else: # pragma: no cover
raise OperationNotSupported(f, 'factor_list_include')
return [(f.per(g), k) for g, k in factors]
def intervals(f, all=False, eps=None, inf=None, sup=None, fast=False, sqf=False):
"""
Compute isolating intervals for roots of ``f``.
For real roots the Vincent-Akritas-Strzebonski (VAS) continued fractions method is used.
References
==========
.. [#] Alkiviadis G. Akritas and Adam W. Strzebonski: A Comparative Study of Two Real Root
Isolation Methods . Nonlinear Analysis: Modelling and Control, Vol. 10, No. 4, 297-304, 2005.
.. [#] Alkiviadis G. Akritas, Adam W. Strzebonski and Panagiotis S. Vigklas: Improving the
Performance of the Continued Fractions Method Using new Bounds of Positive Roots. Nonlinear
Analysis: Modelling and Control, Vol. 13, No. 3, 265-279, 2008.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3, x).intervals()
[((-2, -1), 1), ((1, 2), 1)]
>>> Poly(x**2 - 3, x).intervals(eps=1e-2)
[((-26/15, -19/11), 1), ((19/11, 26/15), 1)]
"""
if eps is not None:
eps = QQ.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if inf is not None:
inf = QQ.convert(inf)
if sup is not None:
sup = QQ.convert(sup)
if hasattr(f.rep, 'intervals'):
result = f.rep.intervals(
all=all, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf)
else: # pragma: no cover
raise OperationNotSupported(f, 'intervals')
if sqf:
def _real(interval):
s, t = interval
return (QQ.to_sympy(s), QQ.to_sympy(t))
if not all:
return list(map(_real, result))
def _complex(rectangle):
(u, v), (s, t) = rectangle
return (QQ.to_sympy(u) + I*QQ.to_sympy(v),
QQ.to_sympy(s) + I*QQ.to_sympy(t))
real_part, complex_part = result
return list(map(_real, real_part)), list(map(_complex, complex_part))
else:
def _real(interval):
(s, t), k = interval
return ((QQ.to_sympy(s), QQ.to_sympy(t)), k)
if not all:
return list(map(_real, result))
def _complex(rectangle):
((u, v), (s, t)), k = rectangle
return ((QQ.to_sympy(u) + I*QQ.to_sympy(v),
QQ.to_sympy(s) + I*QQ.to_sympy(t)), k)
real_part, complex_part = result
return list(map(_real, real_part)), list(map(_complex, complex_part))
def refine_root(f, s, t, eps=None, steps=None, fast=False, check_sqf=False):
"""
Refine an isolating interval of a root to the given precision.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3, x).refine_root(1, 2, eps=1e-2)
(19/11, 26/15)
"""
if check_sqf and not f.is_sqf:
raise PolynomialError("only square-free polynomials supported")
s, t = QQ.convert(s), QQ.convert(t)
if eps is not None:
eps = QQ.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if steps is not None:
steps = int(steps)
elif eps is None:
steps = 1
if hasattr(f.rep, 'refine_root'):
S, T = f.rep.refine_root(s, t, eps=eps, steps=steps, fast=fast)
else: # pragma: no cover
raise OperationNotSupported(f, 'refine_root')
return QQ.to_sympy(S), QQ.to_sympy(T)
def count_roots(f, inf=None, sup=None):
"""
Return the number of roots of ``f`` in ``[inf, sup]`` interval.
Examples
========
>>> from sympy import Poly, I
>>> from sympy.abc import x
>>> Poly(x**4 - 4, x).count_roots(-3, 3)
2
>>> Poly(x**4 - 4, x).count_roots(0, 1 + 3*I)
1
"""
inf_real, sup_real = True, True
if inf is not None:
inf = sympify(inf)
if inf is S.NegativeInfinity:
inf = None
else:
re, im = inf.as_real_imag()
if not im:
inf = QQ.convert(inf)
else:
inf, inf_real = list(map(QQ.convert, (re, im))), False
if sup is not None:
sup = sympify(sup)
if sup is S.Infinity:
sup = None
else:
re, im = sup.as_real_imag()
if not im:
sup = QQ.convert(sup)
else:
sup, sup_real = list(map(QQ.convert, (re, im))), False
if inf_real and sup_real:
if hasattr(f.rep, 'count_real_roots'):
count = f.rep.count_real_roots(inf=inf, sup=sup)
else: # pragma: no cover
raise OperationNotSupported(f, 'count_real_roots')
else:
if inf_real and inf is not None:
inf = (inf, QQ.zero)
if sup_real and sup is not None:
sup = (sup, QQ.zero)
if hasattr(f.rep, 'count_complex_roots'):
count = f.rep.count_complex_roots(inf=inf, sup=sup)
else: # pragma: no cover
raise OperationNotSupported(f, 'count_complex_roots')
return Integer(count)
def root(f, index, radicals=True):
"""
Get an indexed root of a polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(2*x**3 - 7*x**2 + 4*x + 4)
>>> f.root(0)
-1/2
>>> f.root(1)
2
>>> f.root(2)
2
>>> f.root(3)
Traceback (most recent call last):
...
IndexError: root index out of [-3, 2] range, got 3
>>> Poly(x**5 + x + 1).root(0)
CRootOf(x**3 - x**2 + 1, 0)
"""
return sympy.polys.rootoftools.rootof(f, index, radicals=radicals)
def real_roots(f, multiple=True, radicals=True):
"""
Return a list of real roots with multiplicities.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 - 7*x**2 + 4*x + 4).real_roots()
[-1/2, 2, 2]
>>> Poly(x**3 + x + 1).real_roots()
[CRootOf(x**3 + x + 1, 0)]
"""
reals = sympy.polys.rootoftools.CRootOf.real_roots(f, radicals=radicals)
if multiple:
return reals
else:
return group(reals, multiple=False)
def all_roots(f, multiple=True, radicals=True):
"""
Return a list of real and complex roots with multiplicities.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 - 7*x**2 + 4*x + 4).all_roots()
[-1/2, 2, 2]
>>> Poly(x**3 + x + 1).all_roots()
[CRootOf(x**3 + x + 1, 0),
CRootOf(x**3 + x + 1, 1),
CRootOf(x**3 + x + 1, 2)]
"""
roots = sympy.polys.rootoftools.CRootOf.all_roots(f, radicals=radicals)
if multiple:
return roots
else:
return group(roots, multiple=False)
def nroots(f, n=15, maxsteps=50, cleanup=True):
"""
Compute numerical approximations of roots of ``f``.
Parameters
==========
n ... the number of digits to calculate
maxsteps ... the maximum number of iterations to do
If the accuracy `n` cannot be reached in `maxsteps`, it will raise an
exception. You need to rerun with higher maxsteps.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3).nroots(n=15)
[-1.73205080756888, 1.73205080756888]
>>> Poly(x**2 - 3).nroots(n=30)
[-1.73205080756887729352744634151, 1.73205080756887729352744634151]
"""
from sympy.functions.elementary.complexes import sign
if f.is_multivariate:
raise MultivariatePolynomialError(
"can't compute numerical roots of %s" % f)
if f.degree() <= 0:
return []
# For integer and rational coefficients, convert them to integers only
# (for accuracy). Otherwise just try to convert the coefficients to
# mpmath.mpc and raise an exception if the conversion fails.
if f.rep.dom is ZZ:
coeffs = [int(coeff) for coeff in f.all_coeffs()]
elif f.rep.dom is QQ:
denoms = [coeff.q for coeff in f.all_coeffs()]
from sympy.core.numbers import ilcm
fac = ilcm(*denoms)
coeffs = [int(coeff*fac) for coeff in f.all_coeffs()]
else:
coeffs = [coeff.evalf(n=n).as_real_imag()
for coeff in f.all_coeffs()]
try:
coeffs = [mpmath.mpc(*coeff) for coeff in coeffs]
except TypeError:
raise DomainError("Numerical domain expected, got %s" % \
f.rep.dom)
dps = mpmath.mp.dps
mpmath.mp.dps = n
try:
# We need to add extra precision to guard against losing accuracy.
# 10 times the degree of the polynomial seems to work well.
roots = mpmath.polyroots(coeffs, maxsteps=maxsteps,
cleanup=cleanup, error=False, extraprec=f.degree()*10)
# Mpmath puts real roots first, then complex ones (as does all_roots)
# so we make sure this convention holds here, too.
roots = list(map(sympify,
sorted(roots, key=lambda r: (1 if r.imag else 0, r.real, abs(r.imag), sign(r.imag)))))
except NoConvergence:
raise NoConvergence(
'convergence to root failed; try n < %s or maxsteps > %s' % (
n, maxsteps))
finally:
mpmath.mp.dps = dps
return roots
def ground_roots(f):
"""
Compute roots of ``f`` by factorization in the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**6 - 4*x**4 + 4*x**3 - x**2).ground_roots()
{0: 2, 1: 2}
"""
if f.is_multivariate:
raise MultivariatePolynomialError(
"can't compute ground roots of %s" % f)
roots = {}
for factor, k in f.factor_list()[1]:
if factor.is_linear:
a, b = factor.all_coeffs()
roots[-b/a] = k
return roots
def nth_power_roots_poly(f, n):
"""
Construct a polynomial with n-th powers of roots of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**4 - x**2 + 1)
>>> f.nth_power_roots_poly(2)
Poly(x**4 - 2*x**3 + 3*x**2 - 2*x + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(3)
Poly(x**4 + 2*x**2 + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(4)
Poly(x**4 + 2*x**3 + 3*x**2 + 2*x + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(12)
Poly(x**4 - 4*x**3 + 6*x**2 - 4*x + 1, x, domain='ZZ')
"""
if f.is_multivariate:
raise MultivariatePolynomialError(
"must be a univariate polynomial")
N = sympify(n)
if N.is_Integer and N >= 1:
n = int(N)
else:
raise ValueError("'n' must an integer and n >= 1, got %s" % n)
x = f.gen
t = Dummy('t')
r = f.resultant(f.__class__.from_expr(x**n - t, x, t))
return r.replace(t, x)
def cancel(f, g, include=False):
"""
Cancel common factors in a rational function ``f/g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 - 2, x).cancel(Poly(x**2 - 2*x + 1, x))
(1, Poly(2*x + 2, x, domain='ZZ'), Poly(x - 1, x, domain='ZZ'))
>>> Poly(2*x**2 - 2, x).cancel(Poly(x**2 - 2*x + 1, x), include=True)
(Poly(2*x + 2, x, domain='ZZ'), Poly(x - 1, x, domain='ZZ'))
"""
dom, per, F, G = f._unify(g)
if hasattr(F, 'cancel'):
result = F.cancel(G, include=include)
else: # pragma: no cover
raise OperationNotSupported(f, 'cancel')
if not include:
if dom.has_assoc_Ring:
dom = dom.get_ring()
cp, cq, p, q = result
cp = dom.to_sympy(cp)
cq = dom.to_sympy(cq)
return cp/cq, per(p), per(q)
else:
return tuple(map(per, result))
@property
def is_zero(f):
"""
Returns ``True`` if ``f`` is a zero polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(0, x).is_zero
True
>>> Poly(1, x).is_zero
False
"""
return f.rep.is_zero
@property
def is_one(f):
"""
Returns ``True`` if ``f`` is a unit polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(0, x).is_one
False
>>> Poly(1, x).is_one
True
"""
return f.rep.is_one
@property
def is_sqf(f):
"""
Returns ``True`` if ``f`` is a square-free polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 2*x + 1, x).is_sqf
False
>>> Poly(x**2 - 1, x).is_sqf
True
"""
return f.rep.is_sqf
@property
def is_monic(f):
"""
Returns ``True`` if the leading coefficient of ``f`` is one.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 2, x).is_monic
True
>>> Poly(2*x + 2, x).is_monic
False
"""
return f.rep.is_monic
@property
def is_primitive(f):
"""
Returns ``True`` if GCD of the coefficients of ``f`` is one.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 + 6*x + 12, x).is_primitive
False
>>> Poly(x**2 + 3*x + 6, x).is_primitive
True
"""
return f.rep.is_primitive
@property
def is_ground(f):
"""
Returns ``True`` if ``f`` is an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x, x).is_ground
False
>>> Poly(2, x).is_ground
True
>>> Poly(y, x).is_ground
True
"""
return f.rep.is_ground
@property
def is_linear(f):
"""
Returns ``True`` if ``f`` is linear in all its variables.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x + y + 2, x, y).is_linear
True
>>> Poly(x*y + 2, x, y).is_linear
False
"""
return f.rep.is_linear
@property
def is_quadratic(f):
"""
Returns ``True`` if ``f`` is quadratic in all its variables.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x*y + 2, x, y).is_quadratic
True
>>> Poly(x*y**2 + 2, x, y).is_quadratic
False
"""
return f.rep.is_quadratic
@property
def is_monomial(f):
"""
Returns ``True`` if ``f`` is zero or has only one term.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(3*x**2, x).is_monomial
True
>>> Poly(3*x**2 + 1, x).is_monomial
False
"""
return f.rep.is_monomial
@property
def is_homogeneous(f):
"""
Returns ``True`` if ``f`` is a homogeneous polynomial.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. If you want not
only to check if a polynomial is homogeneous but also compute its
homogeneous order, then use :func:`Poly.homogeneous_order`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x*y, x, y).is_homogeneous
True
>>> Poly(x**3 + x*y, x, y).is_homogeneous
False
"""
return f.rep.is_homogeneous
@property
def is_irreducible(f):
"""
Returns ``True`` if ``f`` has no factors over its domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + x + 1, x, modulus=2).is_irreducible
True
>>> Poly(x**2 + 1, x, modulus=2).is_irreducible
False
"""
return f.rep.is_irreducible
@property
def is_univariate(f):
"""
Returns ``True`` if ``f`` is a univariate polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x + 1, x).is_univariate
True
>>> Poly(x*y**2 + x*y + 1, x, y).is_univariate
False
>>> Poly(x*y**2 + x*y + 1, x).is_univariate
True
>>> Poly(x**2 + x + 1, x, y).is_univariate
False
"""
return len(f.gens) == 1
@property
def is_multivariate(f):
"""
Returns ``True`` if ``f`` is a multivariate polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x + 1, x).is_multivariate
False
>>> Poly(x*y**2 + x*y + 1, x, y).is_multivariate
True
>>> Poly(x*y**2 + x*y + 1, x).is_multivariate
False
>>> Poly(x**2 + x + 1, x, y).is_multivariate
True
"""
return len(f.gens) != 1
@property
def is_cyclotomic(f):
"""
Returns ``True`` if ``f`` is a cyclotomic polnomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1
>>> Poly(f).is_cyclotomic
False
>>> g = x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1
>>> Poly(g).is_cyclotomic
True
"""
return f.rep.is_cyclotomic
def __abs__(f):
return f.abs()
def __neg__(f):
return f.neg()
@_sympifyit('g', NotImplemented)
def __add__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr() + g
return f.add(g)
@_sympifyit('g', NotImplemented)
def __radd__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g + f.as_expr()
return g.add(f)
@_sympifyit('g', NotImplemented)
def __sub__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr() - g
return f.sub(g)
@_sympifyit('g', NotImplemented)
def __rsub__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g - f.as_expr()
return g.sub(f)
@_sympifyit('g', NotImplemented)
def __mul__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr()*g
return f.mul(g)
@_sympifyit('g', NotImplemented)
def __rmul__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g*f.as_expr()
return g.mul(f)
@_sympifyit('n', NotImplemented)
def __pow__(f, n):
if n.is_Integer and n >= 0:
return f.pow(n)
else:
return f.as_expr()**n
@_sympifyit('g', NotImplemented)
def __divmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.div(g)
@_sympifyit('g', NotImplemented)
def __rdivmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.div(f)
@_sympifyit('g', NotImplemented)
def __mod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.rem(g)
@_sympifyit('g', NotImplemented)
def __rmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.rem(f)
@_sympifyit('g', NotImplemented)
def __floordiv__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.quo(g)
@_sympifyit('g', NotImplemented)
def __rfloordiv__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.quo(f)
@_sympifyit('g', NotImplemented)
def __div__(f, g):
return f.as_expr()/g.as_expr()
@_sympifyit('g', NotImplemented)
def __rdiv__(f, g):
return g.as_expr()/f.as_expr()
__truediv__ = __div__
__rtruediv__ = __rdiv__
@_sympifyit('other', NotImplemented)
def __eq__(self, other):
f, g = self, other
if not g.is_Poly:
try:
g = f.__class__(g, f.gens, domain=f.get_domain())
except (PolynomialError, DomainError, CoercionFailed):
return False
if f.gens != g.gens:
return False
if f.rep.dom != g.rep.dom:
try:
dom = f.rep.dom.unify(g.rep.dom, f.gens)
except UnificationFailed:
return False
f = f.set_domain(dom)
g = g.set_domain(dom)
return f.rep == g.rep
@_sympifyit('g', NotImplemented)
def __ne__(f, g):
return not f == g
def __nonzero__(f):
return not f.is_zero
__bool__ = __nonzero__
def eq(f, g, strict=False):
if not strict:
return f == g
else:
return f._strict_eq(sympify(g))
def ne(f, g, strict=False):
return not f.eq(g, strict=strict)
def _strict_eq(f, g):
return isinstance(g, f.__class__) and f.gens == g.gens and f.rep.eq(g.rep, strict=True)
@public
class PurePoly(Poly):
"""Class for representing pure polynomials. """
def _hashable_content(self):
"""Allow SymPy to hash Poly instances. """
return (self.rep,)
def __hash__(self):
return super(PurePoly, self).__hash__()
@property
def free_symbols(self):
"""
Free symbols of a polynomial.
Examples
========
>>> from sympy import PurePoly
>>> from sympy.abc import x, y
>>> PurePoly(x**2 + 1).free_symbols
set()
>>> PurePoly(x**2 + y).free_symbols
set()
>>> PurePoly(x**2 + y, x).free_symbols
{y}
"""
return self.free_symbols_in_domain
@_sympifyit('other', NotImplemented)
def __eq__(self, other):
f, g = self, other
if not g.is_Poly:
try:
g = f.__class__(g, f.gens, domain=f.get_domain())
except (PolynomialError, DomainError, CoercionFailed):
return False
if len(f.gens) != len(g.gens):
return False
if f.rep.dom != g.rep.dom:
try:
dom = f.rep.dom.unify(g.rep.dom, f.gens)
except UnificationFailed:
return False
f = f.set_domain(dom)
g = g.set_domain(dom)
return f.rep == g.rep
def _strict_eq(f, g):
return isinstance(g, f.__class__) and f.rep.eq(g.rep, strict=True)
def _unify(f, g):
g = sympify(g)
if not g.is_Poly:
try:
return f.rep.dom, f.per, f.rep, f.rep.per(f.rep.dom.from_sympy(g))
except CoercionFailed:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if len(f.gens) != len(g.gens):
raise UnificationFailed("can't unify %s with %s" % (f, g))
if not (isinstance(f.rep, DMP) and isinstance(g.rep, DMP)):
raise UnificationFailed("can't unify %s with %s" % (f, g))
cls = f.__class__
gens = f.gens
dom = f.rep.dom.unify(g.rep.dom, gens)
F = f.rep.convert(dom)
G = g.rep.convert(dom)
def per(rep, dom=dom, gens=gens, remove=None):
if remove is not None:
gens = gens[:remove] + gens[remove + 1:]
if not gens:
return dom.to_sympy(rep)
return cls.new(rep, *gens)
return dom, per, F, G
@public
def poly_from_expr(expr, *gens, **args):
"""Construct a polynomial from an expression. """
opt = options.build_options(gens, args)
return _poly_from_expr(expr, opt)
def _poly_from_expr(expr, opt):
"""Construct a polynomial from an expression. """
orig, expr = expr, sympify(expr)
if not isinstance(expr, Basic):
raise PolificationFailed(opt, orig, expr)
elif expr.is_Poly:
poly = expr.__class__._from_poly(expr, opt)
opt.gens = poly.gens
opt.domain = poly.domain
if opt.polys is None:
opt.polys = True
return poly, opt
elif opt.expand:
expr = expr.expand()
rep, opt = _dict_from_expr(expr, opt)
if not opt.gens:
raise PolificationFailed(opt, orig, expr)
monoms, coeffs = list(zip(*list(rep.items())))
domain = opt.domain
if domain is None:
opt.domain, coeffs = construct_domain(coeffs, opt=opt)
else:
coeffs = list(map(domain.from_sympy, coeffs))
rep = dict(list(zip(monoms, coeffs)))
poly = Poly._from_dict(rep, opt)
if opt.polys is None:
opt.polys = False
return poly, opt
@public
def parallel_poly_from_expr(exprs, *gens, **args):
"""Construct polynomials from expressions. """
opt = options.build_options(gens, args)
return _parallel_poly_from_expr(exprs, opt)
def _parallel_poly_from_expr(exprs, opt):
"""Construct polynomials from expressions. """
from sympy.functions.elementary.piecewise import Piecewise
if len(exprs) == 2:
f, g = exprs
if isinstance(f, Poly) and isinstance(g, Poly):
f = f.__class__._from_poly(f, opt)
g = g.__class__._from_poly(g, opt)
f, g = f.unify(g)
opt.gens = f.gens
opt.domain = f.domain
if opt.polys is None:
opt.polys = True
return [f, g], opt
origs, exprs = list(exprs), []
_exprs, _polys = [], []
failed = False
for i, expr in enumerate(origs):
expr = sympify(expr)
if isinstance(expr, Basic):
if expr.is_Poly:
_polys.append(i)
else:
_exprs.append(i)
if opt.expand:
expr = expr.expand()
else:
failed = True
exprs.append(expr)
if failed:
raise PolificationFailed(opt, origs, exprs, True)
if _polys:
# XXX: this is a temporary solution
for i in _polys:
exprs[i] = exprs[i].as_expr()
reps, opt = _parallel_dict_from_expr(exprs, opt)
if not opt.gens:
raise PolificationFailed(opt, origs, exprs, True)
for k in opt.gens:
if isinstance(k, Piecewise):
raise PolynomialError("Piecewise generators do not make sense")
coeffs_list, lengths = [], []
all_monoms = []
all_coeffs = []
for rep in reps:
monoms, coeffs = list(zip(*list(rep.items())))
coeffs_list.extend(coeffs)
all_monoms.append(monoms)
lengths.append(len(coeffs))
domain = opt.domain
if domain is None:
opt.domain, coeffs_list = construct_domain(coeffs_list, opt=opt)
else:
coeffs_list = list(map(domain.from_sympy, coeffs_list))
for k in lengths:
all_coeffs.append(coeffs_list[:k])
coeffs_list = coeffs_list[k:]
polys = []
for monoms, coeffs in zip(all_monoms, all_coeffs):
rep = dict(list(zip(monoms, coeffs)))
poly = Poly._from_dict(rep, opt)
polys.append(poly)
if opt.polys is None:
opt.polys = bool(_polys)
return polys, opt
def _update_args(args, key, value):
"""Add a new ``(key, value)`` pair to arguments ``dict``. """
args = dict(args)
if key not in args:
args[key] = value
return args
@public
def degree(f, gen=0):
"""
Return the degree of ``f`` in the given variable.
The degree of 0 is negative infinity.
Examples
========
>>> from sympy import degree
>>> from sympy.abc import x, y
>>> degree(x**2 + y*x + 1, gen=x)
2
>>> degree(x**2 + y*x + 1, gen=y)
1
>>> degree(0, x)
-oo
See also
========
sympy.polys.polytools.Poly.total_degree
degree_list
"""
f = sympify(f, strict=True)
gen_is_Num = sympify(gen, strict=True).is_Number
if f.is_Poly:
p = f
isNum = p.as_expr().is_Number
else:
isNum = f.is_Number
if not isNum:
if gen_is_Num:
p, _ = poly_from_expr(f)
else:
p, _ = poly_from_expr(f, gen)
if isNum:
return S.Zero if f else S.NegativeInfinity
if not gen_is_Num:
if f.is_Poly and gen not in p.gens:
# try recast without explicit gens
p, _ = poly_from_expr(f.as_expr())
if gen not in p.gens:
return S.Zero
elif not f.is_Poly and len(f.free_symbols) > 1:
raise TypeError(filldedent('''
A symbolic generator of interest is required for a multivariate
expression like func = %s, e.g. degree(func, gen = %s) instead of
degree(func, gen = %s).
''' % (f, next(ordered(f.free_symbols)), gen)))
return Integer(p.degree(gen))
@public
def total_degree(f, *gens):
"""
Return the total_degree of ``f`` in the given variables.
Examples
========
>>> from sympy import total_degree, Poly
>>> from sympy.abc import x, y, z
>>> total_degree(1)
0
>>> total_degree(x + x*y)
2
>>> total_degree(x + x*y, x)
1
If the expression is a Poly and no variables are given
then the generators of the Poly will be used:
>>> p = Poly(x + x*y, y)
>>> total_degree(p)
1
To deal with the underlying expression of the Poly, convert
it to an Expr:
>>> total_degree(p.as_expr())
2
This is done automatically if any variables are given:
>>> total_degree(p, x)
1
See also
========
degree
"""
p = sympify(f)
if p.is_Poly:
p = p.as_expr()
if p.is_Number:
rv = 0
else:
if f.is_Poly:
gens = gens or f.gens
rv = Poly(p, gens).total_degree()
return Integer(rv)
@public
def degree_list(f, *gens, **args):
"""
Return a list of degrees of ``f`` in all variables.
Examples
========
>>> from sympy import degree_list
>>> from sympy.abc import x, y
>>> degree_list(x**2 + y*x + 1)
(2, 1)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('degree_list', 1, exc)
degrees = F.degree_list()
return tuple(map(Integer, degrees))
@public
def LC(f, *gens, **args):
"""
Return the leading coefficient of ``f``.
Examples
========
>>> from sympy import LC
>>> from sympy.abc import x, y
>>> LC(4*x**2 + 2*x*y**2 + x*y + 3*y)
4
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LC', 1, exc)
return F.LC(order=opt.order)
@public
def LM(f, *gens, **args):
"""
Return the leading monomial of ``f``.
Examples
========
>>> from sympy import LM
>>> from sympy.abc import x, y
>>> LM(4*x**2 + 2*x*y**2 + x*y + 3*y)
x**2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LM', 1, exc)
monom = F.LM(order=opt.order)
return monom.as_expr()
@public
def LT(f, *gens, **args):
"""
Return the leading term of ``f``.
Examples
========
>>> from sympy import LT
>>> from sympy.abc import x, y
>>> LT(4*x**2 + 2*x*y**2 + x*y + 3*y)
4*x**2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('LT', 1, exc)
monom, coeff = F.LT(order=opt.order)
return coeff*monom.as_expr()
@public
def pdiv(f, g, *gens, **args):
"""
Compute polynomial pseudo-division of ``f`` and ``g``.
Examples
========
>>> from sympy import pdiv
>>> from sympy.abc import x
>>> pdiv(x**2 + 1, 2*x - 4)
(2*x + 4, 20)
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pdiv', 2, exc)
q, r = F.pdiv(G)
if not opt.polys:
return q.as_expr(), r.as_expr()
else:
return q, r
@public
def prem(f, g, *gens, **args):
"""
Compute polynomial pseudo-remainder of ``f`` and ``g``.
Examples
========
>>> from sympy import prem
>>> from sympy.abc import x
>>> prem(x**2 + 1, 2*x - 4)
20
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('prem', 2, exc)
r = F.prem(G)
if not opt.polys:
return r.as_expr()
else:
return r
@public
def pquo(f, g, *gens, **args):
"""
Compute polynomial pseudo-quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import pquo
>>> from sympy.abc import x
>>> pquo(x**2 + 1, 2*x - 4)
2*x + 4
>>> pquo(x**2 - 1, 2*x - 1)
2*x + 1
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pquo', 2, exc)
try:
q = F.pquo(G)
except ExactQuotientFailed:
raise ExactQuotientFailed(f, g)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def pexquo(f, g, *gens, **args):
"""
Compute polynomial exact pseudo-quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import pexquo
>>> from sympy.abc import x
>>> pexquo(x**2 - 1, 2*x - 2)
2*x + 2
>>> pexquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('pexquo', 2, exc)
q = F.pexquo(G)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def div(f, g, *gens, **args):
"""
Compute polynomial division of ``f`` and ``g``.
Examples
========
>>> from sympy import div, ZZ, QQ
>>> from sympy.abc import x
>>> div(x**2 + 1, 2*x - 4, domain=ZZ)
(0, x**2 + 1)
>>> div(x**2 + 1, 2*x - 4, domain=QQ)
(x/2 + 1, 5)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('div', 2, exc)
q, r = F.div(G, auto=opt.auto)
if not opt.polys:
return q.as_expr(), r.as_expr()
else:
return q, r
@public
def rem(f, g, *gens, **args):
"""
Compute polynomial remainder of ``f`` and ``g``.
Examples
========
>>> from sympy import rem, ZZ, QQ
>>> from sympy.abc import x
>>> rem(x**2 + 1, 2*x - 4, domain=ZZ)
x**2 + 1
>>> rem(x**2 + 1, 2*x - 4, domain=QQ)
5
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('rem', 2, exc)
r = F.rem(G, auto=opt.auto)
if not opt.polys:
return r.as_expr()
else:
return r
@public
def quo(f, g, *gens, **args):
"""
Compute polynomial quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import quo
>>> from sympy.abc import x
>>> quo(x**2 + 1, 2*x - 4)
x/2 + 1
>>> quo(x**2 - 1, x - 1)
x + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('quo', 2, exc)
q = F.quo(G, auto=opt.auto)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def exquo(f, g, *gens, **args):
"""
Compute polynomial exact quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import exquo
>>> from sympy.abc import x
>>> exquo(x**2 - 1, x - 1)
x + 1
>>> exquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('exquo', 2, exc)
q = F.exquo(G, auto=opt.auto)
if not opt.polys:
return q.as_expr()
else:
return q
@public
def half_gcdex(f, g, *gens, **args):
"""
Half extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``.
Examples
========
>>> from sympy import half_gcdex
>>> from sympy.abc import x
>>> half_gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4)
(3/5 - x/5, x + 1)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
s, h = domain.half_gcdex(a, b)
except NotImplementedError:
raise ComputationFailed('half_gcdex', 2, exc)
else:
return domain.to_sympy(s), domain.to_sympy(h)
s, h = F.half_gcdex(G, auto=opt.auto)
if not opt.polys:
return s.as_expr(), h.as_expr()
else:
return s, h
@public
def gcdex(f, g, *gens, **args):
"""
Extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
Examples
========
>>> from sympy import gcdex
>>> from sympy.abc import x
>>> gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4)
(3/5 - x/5, x**2/5 - 6*x/5 + 2, x + 1)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
s, t, h = domain.gcdex(a, b)
except NotImplementedError:
raise ComputationFailed('gcdex', 2, exc)
else:
return domain.to_sympy(s), domain.to_sympy(t), domain.to_sympy(h)
s, t, h = F.gcdex(G, auto=opt.auto)
if not opt.polys:
return s.as_expr(), t.as_expr(), h.as_expr()
else:
return s, t, h
@public
def invert(f, g, *gens, **args):
"""
Invert ``f`` modulo ``g`` when possible.
Examples
========
>>> from sympy import invert, S
>>> from sympy.core.numbers import mod_inverse
>>> from sympy.abc import x
>>> invert(x**2 - 1, 2*x - 1)
-4/3
>>> invert(x**2 - 1, x - 1)
Traceback (most recent call last):
...
NotInvertible: zero divisor
For more efficient inversion of Rationals,
use the :obj:`~.mod_inverse` function:
>>> mod_inverse(3, 5)
2
>>> (S(2)/5).invert(S(7)/3)
5/2
See Also
========
sympy.core.numbers.mod_inverse
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.invert(a, b))
except NotImplementedError:
raise ComputationFailed('invert', 2, exc)
h = F.invert(G, auto=opt.auto)
if not opt.polys:
return h.as_expr()
else:
return h
@public
def subresultants(f, g, *gens, **args):
"""
Compute subresultant PRS of ``f`` and ``g``.
Examples
========
>>> from sympy import subresultants
>>> from sympy.abc import x
>>> subresultants(x**2 + 1, x**2 - 1)
[x**2 + 1, x**2 - 1, -2]
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('subresultants', 2, exc)
result = F.subresultants(G)
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def resultant(f, g, *gens, **args):
"""
Compute resultant of ``f`` and ``g``.
Examples
========
>>> from sympy import resultant
>>> from sympy.abc import x
>>> resultant(x**2 + 1, x**2 - 1)
4
"""
includePRS = args.pop('includePRS', False)
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('resultant', 2, exc)
if includePRS:
result, R = F.resultant(G, includePRS=includePRS)
else:
result = F.resultant(G)
if not opt.polys:
if includePRS:
return result.as_expr(), [r.as_expr() for r in R]
return result.as_expr()
else:
if includePRS:
return result, R
return result
@public
def discriminant(f, *gens, **args):
"""
Compute discriminant of ``f``.
Examples
========
>>> from sympy import discriminant
>>> from sympy.abc import x
>>> discriminant(x**2 + 2*x + 3)
-8
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('discriminant', 1, exc)
result = F.discriminant()
if not opt.polys:
return result.as_expr()
else:
return result
@public
def cofactors(f, g, *gens, **args):
"""
Compute GCD and cofactors of ``f`` and ``g``.
Returns polynomials ``(h, cff, cfg)`` such that ``h = gcd(f, g)``, and
``cff = quo(f, h)`` and ``cfg = quo(g, h)`` are, so called, cofactors
of ``f`` and ``g``.
Examples
========
>>> from sympy import cofactors
>>> from sympy.abc import x
>>> cofactors(x**2 - 1, x**2 - 3*x + 2)
(x - 1, x + 1, x - 2)
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
h, cff, cfg = domain.cofactors(a, b)
except NotImplementedError:
raise ComputationFailed('cofactors', 2, exc)
else:
return domain.to_sympy(h), domain.to_sympy(cff), domain.to_sympy(cfg)
h, cff, cfg = F.cofactors(G)
if not opt.polys:
return h.as_expr(), cff.as_expr(), cfg.as_expr()
else:
return h, cff, cfg
@public
def gcd_list(seq, *gens, **args):
"""
Compute GCD of a list of polynomials.
Examples
========
>>> from sympy import gcd_list
>>> from sympy.abc import x
>>> gcd_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])
x - 1
"""
seq = sympify(seq)
def try_non_polynomial_gcd(seq):
if not gens and not args:
domain, numbers = construct_domain(seq)
if not numbers:
return domain.zero
elif domain.is_Numerical:
result, numbers = numbers[0], numbers[1:]
for number in numbers:
result = domain.gcd(result, number)
if domain.is_one(result):
break
return domain.to_sympy(result)
return None
result = try_non_polynomial_gcd(seq)
if result is not None:
return result
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
# gcd for domain Q[irrational] (purely algebraic irrational)
if len(seq) > 1 and all(elt.is_algebraic and elt.is_irrational for elt in seq):
a = seq[-1]
lst = [ (a/elt).ratsimp() for elt in seq[:-1] ]
if all(frc.is_rational for frc in lst):
lc = 1
for frc in lst:
lc = lcm(lc, frc.as_numer_denom()[0])
return a/lc
except PolificationFailed as exc:
result = try_non_polynomial_gcd(exc.exprs)
if result is not None:
return result
else:
raise ComputationFailed('gcd_list', len(seq), exc)
if not polys:
if not opt.polys:
return S.Zero
else:
return Poly(0, opt=opt)
result, polys = polys[0], polys[1:]
for poly in polys:
result = result.gcd(poly)
if result.is_one:
break
if not opt.polys:
return result.as_expr()
else:
return result
@public
def gcd(f, g=None, *gens, **args):
"""
Compute GCD of ``f`` and ``g``.
Examples
========
>>> from sympy import gcd
>>> from sympy.abc import x
>>> gcd(x**2 - 1, x**2 - 3*x + 2)
x - 1
"""
if hasattr(f, '__iter__'):
if g is not None:
gens = (g,) + gens
return gcd_list(f, *gens, **args)
elif g is None:
raise TypeError("gcd() takes 2 arguments or a sequence of arguments")
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
# gcd for domain Q[irrational] (purely algebraic irrational)
a, b = map(sympify, (f, g))
if a.is_algebraic and a.is_irrational and b.is_algebraic and b.is_irrational:
frc = (a/b).ratsimp()
if frc.is_rational:
return a/frc.as_numer_denom()[0]
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.gcd(a, b))
except NotImplementedError:
raise ComputationFailed('gcd', 2, exc)
result = F.gcd(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def lcm_list(seq, *gens, **args):
"""
Compute LCM of a list of polynomials.
Examples
========
>>> from sympy import lcm_list
>>> from sympy.abc import x
>>> lcm_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])
x**5 - x**4 - 2*x**3 - x**2 + x + 2
"""
seq = sympify(seq)
def try_non_polynomial_lcm(seq):
if not gens and not args:
domain, numbers = construct_domain(seq)
if not numbers:
return domain.one
elif domain.is_Numerical:
result, numbers = numbers[0], numbers[1:]
for number in numbers:
result = domain.lcm(result, number)
return domain.to_sympy(result)
return None
result = try_non_polynomial_lcm(seq)
if result is not None:
return result
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
# lcm for domain Q[irrational] (purely algebraic irrational)
if len(seq) > 1 and all(elt.is_algebraic and elt.is_irrational for elt in seq):
a = seq[-1]
lst = [ (a/elt).ratsimp() for elt in seq[:-1] ]
if all(frc.is_rational for frc in lst):
lc = 1
for frc in lst:
lc = lcm(lc, frc.as_numer_denom()[1])
return a*lc
except PolificationFailed as exc:
result = try_non_polynomial_lcm(exc.exprs)
if result is not None:
return result
else:
raise ComputationFailed('lcm_list', len(seq), exc)
if not polys:
if not opt.polys:
return S.One
else:
return Poly(1, opt=opt)
result, polys = polys[0], polys[1:]
for poly in polys:
result = result.lcm(poly)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def lcm(f, g=None, *gens, **args):
"""
Compute LCM of ``f`` and ``g``.
Examples
========
>>> from sympy import lcm
>>> from sympy.abc import x
>>> lcm(x**2 - 1, x**2 - 3*x + 2)
x**3 - 2*x**2 - x + 2
"""
if hasattr(f, '__iter__'):
if g is not None:
gens = (g,) + gens
return lcm_list(f, *gens, **args)
elif g is None:
raise TypeError("lcm() takes 2 arguments or a sequence of arguments")
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
# lcm for domain Q[irrational] (purely algebraic irrational)
a, b = map(sympify, (f, g))
if a.is_algebraic and a.is_irrational and b.is_algebraic and b.is_irrational:
frc = (a/b).ratsimp()
if frc.is_rational:
return a*frc.as_numer_denom()[1]
except PolificationFailed as exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.lcm(a, b))
except NotImplementedError:
raise ComputationFailed('lcm', 2, exc)
result = F.lcm(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def terms_gcd(f, *gens, **args):
"""
Remove GCD of terms from ``f``.
If the ``deep`` flag is True, then the arguments of ``f`` will have
terms_gcd applied to them.
If a fraction is factored out of ``f`` and ``f`` is an Add, then
an unevaluated Mul will be returned so that automatic simplification
does not redistribute it. The hint ``clear``, when set to False, can be
used to prevent such factoring when all coefficients are not fractions.
Examples
========
>>> from sympy import terms_gcd, cos
>>> from sympy.abc import x, y
>>> terms_gcd(x**6*y**2 + x**3*y, x, y)
x**3*y*(x**3*y + 1)
The default action of polys routines is to expand the expression
given to them. terms_gcd follows this behavior:
>>> terms_gcd((3+3*x)*(x+x*y))
3*x*(x*y + x + y + 1)
If this is not desired then the hint ``expand`` can be set to False.
In this case the expression will be treated as though it were comprised
of one or more terms:
>>> terms_gcd((3+3*x)*(x+x*y), expand=False)
(3*x + 3)*(x*y + x)
In order to traverse factors of a Mul or the arguments of other
functions, the ``deep`` hint can be used:
>>> terms_gcd((3 + 3*x)*(x + x*y), expand=False, deep=True)
3*x*(x + 1)*(y + 1)
>>> terms_gcd(cos(x + x*y), deep=True)
cos(x*(y + 1))
Rationals are factored out by default:
>>> terms_gcd(x + y/2)
(2*x + y)/2
Only the y-term had a coefficient that was a fraction; if one
does not want to factor out the 1/2 in cases like this, the
flag ``clear`` can be set to False:
>>> terms_gcd(x + y/2, clear=False)
x + y/2
>>> terms_gcd(x*y/2 + y**2, clear=False)
y*(x/2 + y)
The ``clear`` flag is ignored if all coefficients are fractions:
>>> terms_gcd(x/3 + y/2, clear=False)
(2*x + 3*y)/6
See Also
========
sympy.core.exprtools.gcd_terms, sympy.core.exprtools.factor_terms
"""
from sympy.core.relational import Equality
orig = sympify(f)
if not isinstance(f, Expr) or f.is_Atom:
return orig
if args.get('deep', False):
new = f.func(*[terms_gcd(a, *gens, **args) for a in f.args])
args.pop('deep')
args['expand'] = False
return terms_gcd(new, *gens, **args)
if isinstance(f, Equality):
return f
clear = args.pop('clear', True)
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
return exc.expr
J, f = F.terms_gcd()
if opt.domain.is_Ring:
if opt.domain.is_Field:
denom, f = f.clear_denoms(convert=True)
coeff, f = f.primitive()
if opt.domain.is_Field:
coeff /= denom
else:
coeff = S.One
term = Mul(*[x**j for x, j in zip(f.gens, J)])
if coeff == 1:
coeff = S.One
if term == 1:
return orig
if clear:
return _keep_coeff(coeff, term*f.as_expr())
# base the clearing on the form of the original expression, not
# the (perhaps) Mul that we have now
coeff, f = _keep_coeff(coeff, f.as_expr(), clear=False).as_coeff_Mul()
return _keep_coeff(coeff, term*f, clear=False)
@public
def trunc(f, p, *gens, **args):
"""
Reduce ``f`` modulo a constant ``p``.
Examples
========
>>> from sympy import trunc
>>> from sympy.abc import x
>>> trunc(2*x**3 + 3*x**2 + 5*x + 7, 3)
-x**3 - x + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('trunc', 1, exc)
result = F.trunc(sympify(p))
if not opt.polys:
return result.as_expr()
else:
return result
@public
def monic(f, *gens, **args):
"""
Divide all coefficients of ``f`` by ``LC(f)``.
Examples
========
>>> from sympy import monic
>>> from sympy.abc import x
>>> monic(3*x**2 + 4*x + 2)
x**2 + 4*x/3 + 2/3
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('monic', 1, exc)
result = F.monic(auto=opt.auto)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def content(f, *gens, **args):
"""
Compute GCD of coefficients of ``f``.
Examples
========
>>> from sympy import content
>>> from sympy.abc import x
>>> content(6*x**2 + 8*x + 12)
2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('content', 1, exc)
return F.content()
@public
def primitive(f, *gens, **args):
"""
Compute content and the primitive form of ``f``.
Examples
========
>>> from sympy.polys.polytools import primitive
>>> from sympy.abc import x
>>> primitive(6*x**2 + 8*x + 12)
(2, 3*x**2 + 4*x + 6)
>>> eq = (2 + 2*x)*x + 2
Expansion is performed by default:
>>> primitive(eq)
(2, x**2 + x + 1)
Set ``expand`` to False to shut this off. Note that the
extraction will not be recursive; use the as_content_primitive method
for recursive, non-destructive Rational extraction.
>>> primitive(eq, expand=False)
(1, x*(2*x + 2) + 2)
>>> eq.as_content_primitive()
(2, x*(x + 1) + 1)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('primitive', 1, exc)
cont, result = F.primitive()
if not opt.polys:
return cont, result.as_expr()
else:
return cont, result
@public
def compose(f, g, *gens, **args):
"""
Compute functional composition ``f(g)``.
Examples
========
>>> from sympy import compose
>>> from sympy.abc import x
>>> compose(x**2 + x, x - 1)
x**2 - x
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('compose', 2, exc)
result = F.compose(G)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def decompose(f, *gens, **args):
"""
Compute functional decomposition of ``f``.
Examples
========
>>> from sympy import decompose
>>> from sympy.abc import x
>>> decompose(x**4 + 2*x**3 - x - 1)
[x**2 - x - 1, x**2 + x]
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('decompose', 1, exc)
result = F.decompose()
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def sturm(f, *gens, **args):
"""
Compute Sturm sequence of ``f``.
Examples
========
>>> from sympy import sturm
>>> from sympy.abc import x
>>> sturm(x**3 - 2*x**2 + x - 3)
[x**3 - 2*x**2 + x - 3, 3*x**2 - 4*x + 1, 2*x/9 + 25/9, -2079/4]
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sturm', 1, exc)
result = F.sturm(auto=opt.auto)
if not opt.polys:
return [r.as_expr() for r in result]
else:
return result
@public
def gff_list(f, *gens, **args):
"""
Compute a list of greatest factorial factors of ``f``.
Note that the input to ff() and rf() should be Poly instances to use the
definitions here.
Examples
========
>>> from sympy import gff_list, ff, Poly
>>> from sympy.abc import x
>>> f = Poly(x**5 + 2*x**4 - x**3 - 2*x**2, x)
>>> gff_list(f)
[(Poly(x, x, domain='ZZ'), 1), (Poly(x + 2, x, domain='ZZ'), 4)]
>>> (ff(Poly(x), 1)*ff(Poly(x + 2), 4)).expand() == f
True
>>> f = Poly(x**12 + 6*x**11 - 11*x**10 - 56*x**9 + 220*x**8 + 208*x**7 - \
1401*x**6 + 1090*x**5 + 2715*x**4 - 6720*x**3 - 1092*x**2 + 5040*x, x)
>>> gff_list(f)
[(Poly(x**3 + 7, x, domain='ZZ'), 2), (Poly(x**2 + 5*x, x, domain='ZZ'), 3)]
>>> ff(Poly(x**3 + 7, x), 2)*ff(Poly(x**2 + 5*x, x), 3) == f
True
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('gff_list', 1, exc)
factors = F.gff_list()
if not opt.polys:
return [(g.as_expr(), k) for g, k in factors]
else:
return factors
@public
def gff(f, *gens, **args):
"""Compute greatest factorial factorization of ``f``. """
raise NotImplementedError('symbolic falling factorial')
@public
def sqf_norm(f, *gens, **args):
"""
Compute square-free norm of ``f``.
Returns ``s``, ``f``, ``r``, such that ``g(x) = f(x-sa)`` and
``r(x) = Norm(g(x))`` is a square-free polynomial over ``K``,
where ``a`` is the algebraic extension of the ground domain.
Examples
========
>>> from sympy import sqf_norm, sqrt
>>> from sympy.abc import x
>>> sqf_norm(x**2 + 1, extension=[sqrt(3)])
(1, x**2 - 2*sqrt(3)*x + 4, x**4 - 4*x**2 + 16)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sqf_norm', 1, exc)
s, g, r = F.sqf_norm()
if not opt.polys:
return Integer(s), g.as_expr(), r.as_expr()
else:
return Integer(s), g, r
@public
def sqf_part(f, *gens, **args):
"""
Compute square-free part of ``f``.
Examples
========
>>> from sympy import sqf_part
>>> from sympy.abc import x
>>> sqf_part(x**3 - 3*x - 2)
x**2 - x - 2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('sqf_part', 1, exc)
result = F.sqf_part()
if not opt.polys:
return result.as_expr()
else:
return result
def _sorted_factors(factors, method):
"""Sort a list of ``(expr, exp)`` pairs. """
if method == 'sqf':
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (exp, len(rep), len(poly.gens), rep)
else:
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (len(rep), len(poly.gens), exp, rep)
return sorted(factors, key=key)
def _factors_product(factors):
"""Multiply a list of ``(expr, exp)`` pairs. """
return Mul(*[f.as_expr()**k for f, k in factors])
def _symbolic_factor_list(expr, opt, method):
"""Helper function for :func:`_symbolic_factor`. """
coeff, factors = S.One, []
args = [i._eval_factor() if hasattr(i, '_eval_factor') else i
for i in Mul.make_args(expr)]
for arg in args:
if arg.is_Number:
coeff *= arg
continue
if arg.is_Mul:
args.extend(arg.args)
continue
if arg.is_Pow:
base, exp = arg.args
if base.is_Number and exp.is_Number:
coeff *= arg
continue
if base.is_Number:
factors.append((base, exp))
continue
else:
base, exp = arg, S.One
try:
poly, _ = _poly_from_expr(base, opt)
except PolificationFailed as exc:
factors.append((exc.expr, exp))
else:
func = getattr(poly, method + '_list')
_coeff, _factors = func()
if _coeff is not S.One:
if exp.is_Integer:
coeff *= _coeff**exp
elif _coeff.is_positive:
factors.append((_coeff, exp))
else:
_factors.append((_coeff, S.One))
if exp is S.One:
factors.extend(_factors)
elif exp.is_integer:
factors.extend([(f, k*exp) for f, k in _factors])
else:
other = []
for f, k in _factors:
if f.as_expr().is_positive:
factors.append((f, k*exp))
else:
other.append((f, k))
factors.append((_factors_product(other), exp))
return coeff, factors
def _symbolic_factor(expr, opt, method):
"""Helper function for :func:`_factor`. """
if isinstance(expr, Expr) and not expr.is_Relational:
if hasattr(expr,'_eval_factor'):
return expr._eval_factor()
coeff, factors = _symbolic_factor_list(together(expr, fraction=opt['fraction']), opt, method)
return _keep_coeff(coeff, _factors_product(factors))
elif hasattr(expr, 'args'):
return expr.func(*[_symbolic_factor(arg, opt, method) for arg in expr.args])
elif hasattr(expr, '__iter__'):
return expr.__class__([_symbolic_factor(arg, opt, method) for arg in expr])
else:
return expr
def _generic_factor_list(expr, gens, args, method):
"""Helper function for :func:`sqf_list` and :func:`factor_list`. """
options.allowed_flags(args, ['frac', 'polys'])
opt = options.build_options(gens, args)
expr = sympify(expr)
if isinstance(expr, Expr) and not expr.is_Relational:
numer, denom = together(expr).as_numer_denom()
cp, fp = _symbolic_factor_list(numer, opt, method)
cq, fq = _symbolic_factor_list(denom, opt, method)
if fq and not opt.frac:
raise PolynomialError("a polynomial expected, got %s" % expr)
_opt = opt.clone(dict(expand=True))
for factors in (fp, fq):
for i, (f, k) in enumerate(factors):
if not f.is_Poly:
f, _ = _poly_from_expr(f, _opt)
factors[i] = (f, k)
fp = _sorted_factors(fp, method)
fq = _sorted_factors(fq, method)
if not opt.polys:
fp = [(f.as_expr(), k) for f, k in fp]
fq = [(f.as_expr(), k) for f, k in fq]
coeff = cp/cq
if not opt.frac:
return coeff, fp
else:
return coeff, fp, fq
else:
raise PolynomialError("a polynomial expected, got %s" % expr)
def _generic_factor(expr, gens, args, method):
"""Helper function for :func:`sqf` and :func:`factor`. """
fraction = args.pop('fraction', True)
options.allowed_flags(args, [])
opt = options.build_options(gens, args)
opt['fraction'] = fraction
return _symbolic_factor(sympify(expr), opt, method)
def to_rational_coeffs(f):
"""
try to transform a polynomial to have rational coefficients
try to find a transformation ``x = alpha*y``
``f(x) = lc*alpha**n * g(y)`` where ``g`` is a polynomial with
rational coefficients, ``lc`` the leading coefficient.
If this fails, try ``x = y + beta``
``f(x) = g(y)``
Returns ``None`` if ``g`` not found;
``(lc, alpha, None, g)`` in case of rescaling
``(None, None, beta, g)`` in case of translation
Notes
=====
Currently it transforms only polynomials without roots larger than 2.
Examples
========
>>> from sympy import sqrt, Poly, simplify
>>> from sympy.polys.polytools import to_rational_coeffs
>>> from sympy.abc import x
>>> p = Poly(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))}), x, domain='EX')
>>> lc, r, _, g = to_rational_coeffs(p)
>>> lc, r
(7 + 5*sqrt(2), 2 - 2*sqrt(2))
>>> g
Poly(x**3 + x**2 - 1/4*x - 1/4, x, domain='QQ')
>>> r1 = simplify(1/r)
>>> Poly(lc*r**3*(g.as_expr()).subs({x:x*r1}), x, domain='EX') == p
True
"""
from sympy.simplify.simplify import simplify
def _try_rescale(f, f1=None):
"""
try rescaling ``x -> alpha*x`` to convert f to a polynomial
with rational coefficients.
Returns ``alpha, f``; if the rescaling is successful,
``alpha`` is the rescaling factor, and ``f`` is the rescaled
polynomial; else ``alpha`` is ``None``.
"""
from sympy.core.add import Add
if not len(f.gens) == 1 or not (f.gens[0]).is_Atom:
return None, f
n = f.degree()
lc = f.LC()
f1 = f1 or f1.monic()
coeffs = f1.all_coeffs()[1:]
coeffs = [simplify(coeffx) for coeffx in coeffs]
if coeffs[-2]:
rescale1_x = simplify(coeffs[-2]/coeffs[-1])
coeffs1 = []
for i in range(len(coeffs)):
coeffx = simplify(coeffs[i]*rescale1_x**(i + 1))
if not coeffx.is_rational:
break
coeffs1.append(coeffx)
else:
rescale_x = simplify(1/rescale1_x)
x = f.gens[0]
v = [x**n]
for i in range(1, n + 1):
v.append(coeffs1[i - 1]*x**(n - i))
f = Add(*v)
f = Poly(f)
return lc, rescale_x, f
return None
def _try_translate(f, f1=None):
"""
try translating ``x -> x + alpha`` to convert f to a polynomial
with rational coefficients.
Returns ``alpha, f``; if the translating is successful,
``alpha`` is the translating factor, and ``f`` is the shifted
polynomial; else ``alpha`` is ``None``.
"""
from sympy.core.add import Add
if not len(f.gens) == 1 or not (f.gens[0]).is_Atom:
return None, f
n = f.degree()
f1 = f1 or f1.monic()
coeffs = f1.all_coeffs()[1:]
c = simplify(coeffs[0])
if c and not c.is_rational:
func = Add
if c.is_Add:
args = c.args
func = c.func
else:
args = [c]
c1, c2 = sift(args, lambda z: z.is_rational, binary=True)
alpha = -func(*c2)/n
f2 = f1.shift(alpha)
return alpha, f2
return None
def _has_square_roots(p):
"""
Return True if ``f`` is a sum with square roots but no other root
"""
from sympy.core.exprtools import Factors
coeffs = p.coeffs()
has_sq = False
for y in coeffs:
for x in Add.make_args(y):
f = Factors(x).factors
r = [wx.q for b, wx in f.items() if
b.is_number and wx.is_Rational and wx.q >= 2]
if not r:
continue
if min(r) == 2:
has_sq = True
if max(r) > 2:
return False
return has_sq
if f.get_domain().is_EX and _has_square_roots(f):
f1 = f.monic()
r = _try_rescale(f, f1)
if r:
return r[0], r[1], None, r[2]
else:
r = _try_translate(f, f1)
if r:
return None, None, r[0], r[1]
return None
def _torational_factor_list(p, x):
"""
helper function to factor polynomial using to_rational_coeffs
Examples
========
>>> from sympy.polys.polytools import _torational_factor_list
>>> from sympy.abc import x
>>> from sympy import sqrt, expand, Mul
>>> p = expand(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))}))
>>> factors = _torational_factor_list(p, x); factors
(-2, [(-x*(1 + sqrt(2))/2 + 1, 1), (-x*(1 + sqrt(2)) - 1, 1), (-x*(1 + sqrt(2)) + 1, 1)])
>>> expand(factors[0]*Mul(*[z[0] for z in factors[1]])) == p
True
>>> p = expand(((x**2-1)*(x-2)).subs({x:x + sqrt(2)}))
>>> factors = _torational_factor_list(p, x); factors
(1, [(x - 2 + sqrt(2), 1), (x - 1 + sqrt(2), 1), (x + 1 + sqrt(2), 1)])
>>> expand(factors[0]*Mul(*[z[0] for z in factors[1]])) == p
True
"""
from sympy.simplify.simplify import simplify
p1 = Poly(p, x, domain='EX')
n = p1.degree()
res = to_rational_coeffs(p1)
if not res:
return None
lc, r, t, g = res
factors = factor_list(g.as_expr())
if lc:
c = simplify(factors[0]*lc*r**n)
r1 = simplify(1/r)
a = []
for z in factors[1:][0]:
a.append((simplify(z[0].subs({x: x*r1})), z[1]))
else:
c = factors[0]
a = []
for z in factors[1:][0]:
a.append((z[0].subs({x: x - t}), z[1]))
return (c, a)
@public
def sqf_list(f, *gens, **args):
"""
Compute a list of square-free factors of ``f``.
Examples
========
>>> from sympy import sqf_list
>>> from sympy.abc import x
>>> sqf_list(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
(2, [(x + 1, 2), (x + 2, 3)])
"""
return _generic_factor_list(f, gens, args, method='sqf')
@public
def sqf(f, *gens, **args):
"""
Compute square-free factorization of ``f``.
Examples
========
>>> from sympy import sqf
>>> from sympy.abc import x
>>> sqf(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
2*(x + 1)**2*(x + 2)**3
"""
return _generic_factor(f, gens, args, method='sqf')
@public
def factor_list(f, *gens, **args):
"""
Compute a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import factor_list
>>> from sympy.abc import x, y
>>> factor_list(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
(2, [(x + y, 1), (x**2 + 1, 2)])
"""
return _generic_factor_list(f, gens, args, method='factor')
@public
def factor(f, *gens, **args):
"""
Compute the factorization of expression, ``f``, into irreducibles. (To
factor an integer into primes, use ``factorint``.)
There two modes implemented: symbolic and formal. If ``f`` is not an
instance of :class:`Poly` and generators are not specified, then the
former mode is used. Otherwise, the formal mode is used.
In symbolic mode, :func:`factor` will traverse the expression tree and
factor its components without any prior expansion, unless an instance
of :class:`~.Add` is encountered (in this case formal factorization is
used). This way :func:`factor` can handle large or symbolic exponents.
By default, the factorization is computed over the rationals. To factor
over other domain, e.g. an algebraic or finite field, use appropriate
options: ``extension``, ``modulus`` or ``domain``.
Examples
========
>>> from sympy import factor, sqrt, exp
>>> from sympy.abc import x, y
>>> factor(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
2*(x + y)*(x**2 + 1)**2
>>> factor(x**2 + 1)
x**2 + 1
>>> factor(x**2 + 1, modulus=2)
(x + 1)**2
>>> factor(x**2 + 1, gaussian=True)
(x - I)*(x + I)
>>> factor(x**2 - 2, extension=sqrt(2))
(x - sqrt(2))*(x + sqrt(2))
>>> factor((x**2 - 1)/(x**2 + 4*x + 4))
(x - 1)*(x + 1)/(x + 2)**2
>>> factor((x**2 + 4*x + 4)**10000000*(x**2 + 1))
(x + 2)**20000000*(x**2 + 1)
By default, factor deals with an expression as a whole:
>>> eq = 2**(x**2 + 2*x + 1)
>>> factor(eq)
2**(x**2 + 2*x + 1)
If the ``deep`` flag is True then subexpressions will
be factored:
>>> factor(eq, deep=True)
2**((x + 1)**2)
If the ``fraction`` flag is False then rational expressions
won't be combined. By default it is True.
>>> factor(5*x + 3*exp(2 - 7*x), deep=True)
(5*x*exp(7*x) + 3*exp(2))*exp(-7*x)
>>> factor(5*x + 3*exp(2 - 7*x), deep=True, fraction=False)
5*x + 3*exp(2)*exp(-7*x)
See Also
========
sympy.ntheory.factor_.factorint
"""
f = sympify(f)
if args.pop('deep', False):
from sympy.simplify.simplify import bottom_up
def _try_factor(expr):
"""
Factor, but avoid changing the expression when unable to.
"""
fac = factor(expr, *gens, **args)
if fac.is_Mul or fac.is_Pow:
return fac
return expr
f = bottom_up(f, _try_factor)
# clean up any subexpressions that may have been expanded
# while factoring out a larger expression
partials = {}
muladd = f.atoms(Mul, Add)
for p in muladd:
fac = factor(p, *gens, **args)
if (fac.is_Mul or fac.is_Pow) and fac != p:
partials[p] = fac
return f.xreplace(partials)
try:
return _generic_factor(f, gens, args, method='factor')
except PolynomialError as msg:
if not f.is_commutative:
from sympy.core.exprtools import factor_nc
return factor_nc(f)
else:
raise PolynomialError(msg)
@public
def intervals(F, all=False, eps=None, inf=None, sup=None, strict=False, fast=False, sqf=False):
"""
Compute isolating intervals for roots of ``f``.
Examples
========
>>> from sympy import intervals
>>> from sympy.abc import x
>>> intervals(x**2 - 3)
[((-2, -1), 1), ((1, 2), 1)]
>>> intervals(x**2 - 3, eps=1e-2)
[((-26/15, -19/11), 1), ((19/11, 26/15), 1)]
"""
if not hasattr(F, '__iter__'):
try:
F = Poly(F)
except GeneratorsNeeded:
return []
return F.intervals(all=all, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf)
else:
polys, opt = parallel_poly_from_expr(F, domain='QQ')
if len(opt.gens) > 1:
raise MultivariatePolynomialError
for i, poly in enumerate(polys):
polys[i] = poly.rep.rep
if eps is not None:
eps = opt.domain.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if inf is not None:
inf = opt.domain.convert(inf)
if sup is not None:
sup = opt.domain.convert(sup)
intervals = dup_isolate_real_roots_list(polys, opt.domain,
eps=eps, inf=inf, sup=sup, strict=strict, fast=fast)
result = []
for (s, t), indices in intervals:
s, t = opt.domain.to_sympy(s), opt.domain.to_sympy(t)
result.append(((s, t), indices))
return result
@public
def refine_root(f, s, t, eps=None, steps=None, fast=False, check_sqf=False):
"""
Refine an isolating interval of a root to the given precision.
Examples
========
>>> from sympy import refine_root
>>> from sympy.abc import x
>>> refine_root(x**2 - 3, 1, 2, eps=1e-2)
(19/11, 26/15)
"""
try:
F = Poly(f)
except GeneratorsNeeded:
raise PolynomialError(
"can't refine a root of %s, not a polynomial" % f)
return F.refine_root(s, t, eps=eps, steps=steps, fast=fast, check_sqf=check_sqf)
@public
def count_roots(f, inf=None, sup=None):
"""
Return the number of roots of ``f`` in ``[inf, sup]`` interval.
If one of ``inf`` or ``sup`` is complex, it will return the number of roots
in the complex rectangle with corners at ``inf`` and ``sup``.
Examples
========
>>> from sympy import count_roots, I
>>> from sympy.abc import x
>>> count_roots(x**4 - 4, -3, 3)
2
>>> count_roots(x**4 - 4, 0, 1 + 3*I)
1
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError("can't count roots of %s, not a polynomial" % f)
return F.count_roots(inf=inf, sup=sup)
@public
def real_roots(f, multiple=True):
"""
Return a list of real roots with multiplicities of ``f``.
Examples
========
>>> from sympy import real_roots
>>> from sympy.abc import x
>>> real_roots(2*x**3 - 7*x**2 + 4*x + 4)
[-1/2, 2, 2]
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError(
"can't compute real roots of %s, not a polynomial" % f)
return F.real_roots(multiple=multiple)
@public
def nroots(f, n=15, maxsteps=50, cleanup=True):
"""
Compute numerical approximations of roots of ``f``.
Examples
========
>>> from sympy import nroots
>>> from sympy.abc import x
>>> nroots(x**2 - 3, n=15)
[-1.73205080756888, 1.73205080756888]
>>> nroots(x**2 - 3, n=30)
[-1.73205080756887729352744634151, 1.73205080756887729352744634151]
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError(
"can't compute numerical roots of %s, not a polynomial" % f)
return F.nroots(n=n, maxsteps=maxsteps, cleanup=cleanup)
@public
def ground_roots(f, *gens, **args):
"""
Compute roots of ``f`` by factorization in the ground domain.
Examples
========
>>> from sympy import ground_roots
>>> from sympy.abc import x
>>> ground_roots(x**6 - 4*x**4 + 4*x**3 - x**2)
{0: 2, 1: 2}
"""
options.allowed_flags(args, [])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('ground_roots', 1, exc)
return F.ground_roots()
@public
def nth_power_roots_poly(f, n, *gens, **args):
"""
Construct a polynomial with n-th powers of roots of ``f``.
Examples
========
>>> from sympy import nth_power_roots_poly, factor, roots
>>> from sympy.abc import x
>>> f = x**4 - x**2 + 1
>>> g = factor(nth_power_roots_poly(f, 2))
>>> g
(x**2 - x + 1)**2
>>> R_f = [ (r**2).expand() for r in roots(f) ]
>>> R_g = roots(g).keys()
>>> set(R_f) == set(R_g)
True
"""
options.allowed_flags(args, [])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('nth_power_roots_poly', 1, exc)
result = F.nth_power_roots_poly(n)
if not opt.polys:
return result.as_expr()
else:
return result
@public
def cancel(f, *gens, **args):
"""
Cancel common factors in a rational function ``f``.
Examples
========
>>> from sympy import cancel, sqrt, Symbol
>>> from sympy.abc import x
>>> A = Symbol('A', commutative=False)
>>> cancel((2*x**2 - 2)/(x**2 - 2*x + 1))
(2*x + 2)/(x - 1)
>>> cancel((sqrt(3) + sqrt(15)*A)/(sqrt(2) + sqrt(10)*A))
sqrt(6)/2
"""
from sympy.core.exprtools import factor_terms
from sympy.functions.elementary.piecewise import Piecewise
options.allowed_flags(args, ['polys'])
f = sympify(f)
if not isinstance(f, (tuple, Tuple)):
if f.is_Number or isinstance(f, Relational) or not isinstance(f, Expr):
return f
f = factor_terms(f, radical=True)
p, q = f.as_numer_denom()
elif len(f) == 2:
p, q = f
elif isinstance(f, Tuple):
return factor_terms(f)
else:
raise ValueError('unexpected argument: %s' % f)
try:
(F, G), opt = parallel_poly_from_expr((p, q), *gens, **args)
except PolificationFailed:
if not isinstance(f, (tuple, Tuple)):
return f
else:
return S.One, p, q
except PolynomialError as msg:
if f.is_commutative and not f.has(Piecewise):
raise PolynomialError(msg)
# Handling of noncommutative and/or piecewise expressions
if f.is_Add or f.is_Mul:
c, nc = sift(f.args, lambda x:
x.is_commutative is True and not x.has(Piecewise),
binary=True)
nc = [cancel(i) for i in nc]
return f.func(cancel(f.func(*c)), *nc)
else:
reps = []
pot = preorder_traversal(f)
next(pot)
for e in pot:
# XXX: This should really skip anything that's not Expr.
if isinstance(e, (tuple, Tuple, BooleanAtom)):
continue
try:
reps.append((e, cancel(e)))
pot.skip() # this was handled successfully
except NotImplementedError:
pass
return f.xreplace(dict(reps))
c, P, Q = F.cancel(G)
if not isinstance(f, (tuple, Tuple)):
return c*(P.as_expr()/Q.as_expr())
else:
if not opt.polys:
return c, P.as_expr(), Q.as_expr()
else:
return c, P, Q
@public
def reduced(f, G, *gens, **args):
"""
Reduces a polynomial ``f`` modulo a set of polynomials ``G``.
Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``,
computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r``
such that ``f = q_1*g_1 + ... + q_n*g_n + r``, where ``r`` vanishes or ``r``
is a completely reduced polynomial with respect to ``G``.
Examples
========
>>> from sympy import reduced
>>> from sympy.abc import x, y
>>> reduced(2*x**4 + y**2 - x**2 + y**3, [x**3 - x, y**3 - y])
([2*x, 1], x**2 + y**2 + y)
"""
options.allowed_flags(args, ['polys', 'auto'])
try:
polys, opt = parallel_poly_from_expr([f] + list(G), *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('reduced', 0, exc)
domain = opt.domain
retract = False
if opt.auto and domain.is_Ring and not domain.is_Field:
opt = opt.clone(dict(domain=domain.get_field()))
retract = True
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, opt.order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
Q, r = polys[0].div(polys[1:])
Q = [Poly._from_dict(dict(q), opt) for q in Q]
r = Poly._from_dict(dict(r), opt)
if retract:
try:
_Q, _r = [q.to_ring() for q in Q], r.to_ring()
except CoercionFailed:
pass
else:
Q, r = _Q, _r
if not opt.polys:
return [q.as_expr() for q in Q], r.as_expr()
else:
return Q, r
@public
def groebner(F, *gens, **args):
"""
Computes the reduced Groebner basis for a set of polynomials.
Use the ``order`` argument to set the monomial ordering that will be
used to compute the basis. Allowed orders are ``lex``, ``grlex`` and
``grevlex``. If no order is specified, it defaults to ``lex``.
For more information on Groebner bases, see the references and the docstring
of :func:`~.solve_poly_system`.
Examples
========
Example taken from [1].
>>> from sympy import groebner
>>> from sympy.abc import x, y
>>> F = [x*y - 2*y, 2*y**2 - x**2]
>>> groebner(F, x, y, order='lex')
GroebnerBasis([x**2 - 2*y**2, x*y - 2*y, y**3 - 2*y], x, y,
domain='ZZ', order='lex')
>>> groebner(F, x, y, order='grlex')
GroebnerBasis([y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y], x, y,
domain='ZZ', order='grlex')
>>> groebner(F, x, y, order='grevlex')
GroebnerBasis([y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y], x, y,
domain='ZZ', order='grevlex')
By default, an improved implementation of the Buchberger algorithm is
used. Optionally, an implementation of the F5B algorithm can be used. The
algorithm can be set using the ``method`` flag or with the
:func:`sympy.polys.polyconfig.setup` function.
>>> F = [x**2 - x - 1, (2*x - 1) * y - (x**10 - (1 - x)**10)]
>>> groebner(F, x, y, method='buchberger')
GroebnerBasis([x**2 - x - 1, y - 55], x, y, domain='ZZ', order='lex')
>>> groebner(F, x, y, method='f5b')
GroebnerBasis([x**2 - x - 1, y - 55], x, y, domain='ZZ', order='lex')
References
==========
1. [Buchberger01]_
2. [Cox97]_
"""
return GroebnerBasis(F, *gens, **args)
@public
def is_zero_dimensional(F, *gens, **args):
"""
Checks if the ideal generated by a Groebner basis is zero-dimensional.
The algorithm checks if the set of monomials not divisible by the
leading monomial of any element of ``F`` is bounded.
References
==========
David A. Cox, John B. Little, Donal O'Shea. Ideals, Varieties and
Algorithms, 3rd edition, p. 230
"""
return GroebnerBasis(F, *gens, **args).is_zero_dimensional
@public
class GroebnerBasis(Basic):
"""Represents a reduced Groebner basis. """
def __new__(cls, F, *gens, **args):
"""Compute a reduced Groebner basis for a system of polynomials. """
options.allowed_flags(args, ['polys', 'method'])
try:
polys, opt = parallel_poly_from_expr(F, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('groebner', len(F), exc)
from sympy.polys.rings import PolyRing
ring = PolyRing(opt.gens, opt.domain, opt.order)
polys = [ring.from_dict(poly.rep.to_dict()) for poly in polys if poly]
G = _groebner(polys, ring, method=opt.method)
G = [Poly._from_dict(g, opt) for g in G]
return cls._new(G, opt)
@classmethod
def _new(cls, basis, options):
obj = Basic.__new__(cls)
obj._basis = tuple(basis)
obj._options = options
return obj
@property
def args(self):
return (Tuple(*self._basis), Tuple(*self._options.gens))
@property
def exprs(self):
return [poly.as_expr() for poly in self._basis]
@property
def polys(self):
return list(self._basis)
@property
def gens(self):
return self._options.gens
@property
def domain(self):
return self._options.domain
@property
def order(self):
return self._options.order
def __len__(self):
return len(self._basis)
def __iter__(self):
if self._options.polys:
return iter(self.polys)
else:
return iter(self.exprs)
def __getitem__(self, item):
if self._options.polys:
basis = self.polys
else:
basis = self.exprs
return basis[item]
def __hash__(self):
return hash((self._basis, tuple(self._options.items())))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._basis == other._basis and self._options == other._options
elif iterable(other):
return self.polys == list(other) or self.exprs == list(other)
else:
return False
def __ne__(self, other):
return not self == other
@property
def is_zero_dimensional(self):
"""
Checks if the ideal generated by a Groebner basis is zero-dimensional.
The algorithm checks if the set of monomials not divisible by the
leading monomial of any element of ``F`` is bounded.
References
==========
David A. Cox, John B. Little, Donal O'Shea. Ideals, Varieties and
Algorithms, 3rd edition, p. 230
"""
def single_var(monomial):
return sum(map(bool, monomial)) == 1
exponents = Monomial([0]*len(self.gens))
order = self._options.order
for poly in self.polys:
monomial = poly.LM(order=order)
if single_var(monomial):
exponents *= monomial
# If any element of the exponents vector is zero, then there's
# a variable for which there's no degree bound and the ideal
# generated by this Groebner basis isn't zero-dimensional.
return all(exponents)
def fglm(self, order):
"""
Convert a Groebner basis from one ordering to another.
The FGLM algorithm converts reduced Groebner bases of zero-dimensional
ideals from one ordering to another. This method is often used when it
is infeasible to compute a Groebner basis with respect to a particular
ordering directly.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import groebner
>>> F = [x**2 - 3*y - x + 1, y**2 - 2*x + y - 1]
>>> G = groebner(F, x, y, order='grlex')
>>> list(G.fglm('lex'))
[2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7]
>>> list(groebner(F, x, y, order='lex'))
[2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7]
References
==========
.. [1] J.C. Faugere, P. Gianni, D. Lazard, T. Mora (1994). Efficient
Computation of Zero-dimensional Groebner Bases by Change of
Ordering
"""
opt = self._options
src_order = opt.order
dst_order = monomial_key(order)
if src_order == dst_order:
return self
if not self.is_zero_dimensional:
raise NotImplementedError("can't convert Groebner bases of ideals with positive dimension")
polys = list(self._basis)
domain = opt.domain
opt = opt.clone(dict(
domain=domain.get_field(),
order=dst_order,
))
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, src_order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
G = matrix_fglm(polys, _ring, dst_order)
G = [Poly._from_dict(dict(g), opt) for g in G]
if not domain.is_Field:
G = [g.clear_denoms(convert=True)[1] for g in G]
opt.domain = domain
return self._new(G, opt)
def reduce(self, expr, auto=True):
"""
Reduces a polynomial modulo a Groebner basis.
Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``,
computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r``
such that ``f = q_1*f_1 + ... + q_n*f_n + r``, where ``r`` vanishes or ``r``
is a completely reduced polynomial with respect to ``G``.
Examples
========
>>> from sympy import groebner, expand
>>> from sympy.abc import x, y
>>> f = 2*x**4 - x**2 + y**3 + y**2
>>> G = groebner([x**3 - x, y**3 - y])
>>> G.reduce(f)
([2*x, 1], x**2 + y**2 + y)
>>> Q, r = _
>>> expand(sum(q*g for q, g in zip(Q, G)) + r)
2*x**4 - x**2 + y**3 + y**2
>>> _ == f
True
"""
poly = Poly._from_expr(expr, self._options)
polys = [poly] + list(self._basis)
opt = self._options
domain = opt.domain
retract = False
if auto and domain.is_Ring and not domain.is_Field:
opt = opt.clone(dict(domain=domain.get_field()))
retract = True
from sympy.polys.rings import xring
_ring, _ = xring(opt.gens, opt.domain, opt.order)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = _ring.from_dict(poly)
Q, r = polys[0].div(polys[1:])
Q = [Poly._from_dict(dict(q), opt) for q in Q]
r = Poly._from_dict(dict(r), opt)
if retract:
try:
_Q, _r = [q.to_ring() for q in Q], r.to_ring()
except CoercionFailed:
pass
else:
Q, r = _Q, _r
if not opt.polys:
return [q.as_expr() for q in Q], r.as_expr()
else:
return Q, r
def contains(self, poly):
"""
Check if ``poly`` belongs the ideal generated by ``self``.
Examples
========
>>> from sympy import groebner
>>> from sympy.abc import x, y
>>> f = 2*x**3 + y**3 + 3*y
>>> G = groebner([x**2 + y**2 - 1, x*y - 2])
>>> G.contains(f)
True
>>> G.contains(f + 1)
False
"""
return self.reduce(poly)[1] == 0
@public
def poly(expr, *gens, **args):
"""
Efficiently transform an expression into a polynomial.
Examples
========
>>> from sympy import poly
>>> from sympy.abc import x
>>> poly(x*(x**2 + x - 1)**2)
Poly(x**5 + 2*x**4 - x**3 - 2*x**2 + x, x, domain='ZZ')
"""
options.allowed_flags(args, [])
def _poly(expr, opt):
terms, poly_terms = [], []
for term in Add.make_args(expr):
factors, poly_factors = [], []
for factor in Mul.make_args(term):
if factor.is_Add:
poly_factors.append(_poly(factor, opt))
elif factor.is_Pow and factor.base.is_Add and \
factor.exp.is_Integer and factor.exp >= 0:
poly_factors.append(
_poly(factor.base, opt).pow(factor.exp))
else:
factors.append(factor)
if not poly_factors:
terms.append(term)
else:
product = poly_factors[0]
for factor in poly_factors[1:]:
product = product.mul(factor)
if factors:
factor = Mul(*factors)
if factor.is_Number:
product = product.mul(factor)
else:
product = product.mul(Poly._from_expr(factor, opt))
poly_terms.append(product)
if not poly_terms:
result = Poly._from_expr(expr, opt)
else:
result = poly_terms[0]
for term in poly_terms[1:]:
result = result.add(term)
if terms:
term = Add(*terms)
if term.is_Number:
result = result.add(term)
else:
result = result.add(Poly._from_expr(term, opt))
return result.reorder(*opt.get('gens', ()), **args)
expr = sympify(expr)
if expr.is_Poly:
return Poly(expr, *gens, **args)
if 'expand' not in args:
args['expand'] = False
opt = options.build_options(gens, args)
return _poly(expr, opt)
| kaushik94/sympy | sympy/polys/polytools.py | Python | bsd-3-clause | 182,487 | [
"Gaussian"
] | ccc55db8287f0c1c83e35929b55b8baabc4cf31dca20b15f3432827e285f2f57 |
# Copyright (c) 2010 Howard Hughes Medical Institute.
# All rights reserved.
# Use is subject to Janelia Farm Research Campus Software Copyright 1.1 license terms.
# http://license.janelia.org/license/jfrc_copyright_1_1.html
import neuroptikon
from os import path
from neuro_object import NeuroObject
from neurite import Neurite
from arborization import Arborization
from gap_junction import GapJunction
from innervation import Innervation
from stimulus import Stimulus
from synapse import Synapse
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
import xml.etree.ElementTree as ElementTree
from pydispatch import dispatcher
class Neuron(NeuroObject):
class Polarity: # pylint: disable=W0232
UNIPOLAR = 'UNIPOLAR'
BIPOLAR = 'BIPOLAR'
PSEUDOUNIPOLAR = 'PSEUDOUNIPOLAR'
MULTIPOLAR = 'MULTIPOLAR'
class Function: # pylint: disable=W0232
SENSORY = 'SENSORY'
INTERNEURON = 'INTERNEURON'
MOTOR = 'MOTOR'
#TODO Refactor neuron image into an object
Functions = [Function.SENSORY, Function.INTERNEURON, Function.MOTOR]
def __init__(self, network, neuronClass = None, *args, **keywordArgs):
"""
Neurons represent individual neural cells in the network.
You create a neuron by messaging the network:
>>> neuron1 = network.createNeuron(...)
"""
# Upconvert old 'function' singleton param to list expected by new 'functions' param.
if 'function' in keywordArgs:
functions = set([function])
del function
# Upconvert old 'neurotransmitter' singleton param to list expected by new 'neurotransmitters' param.
if 'neurotransmitter' in keywordArgs:
neurotransmitters = [neurotransmitter]
del neurotransmitter
# Pull out the keyword arguments specific to this class before we call super.
# We need to do this so we can know if the caller specified an argument or not.
# For example, the caller might specify a neuron class and one attribute to override. We need to know which attributes _not_ to set.
localAttrNames = ['activation', 'functions', 'neurotransmitters', 'polarity', 'region', 'neuronImage', 'links']
localKeywordArgs = {}
for attrName in localAttrNames:
if attrName in keywordArgs:
localKeywordArgs[attrName] = keywordArgs[attrName]
del keywordArgs[attrName]
NeuroObject.__init__(self, network, *args, **keywordArgs)
self._neurites = []
self.neuronClass = neuronClass
self.activation = None
self._functions = set()
self.neurotransmitters = []
self.polarity = None
self.region = None
self._synapses = []
self.neuronImage = []
self.links = []
for attrName in localAttrNames:
if attrName == 'functions':
attrValue = set()
elif attrName in ('neurotransmitters', 'links', 'neuronImage'):
attrValue = []
else:
attrValue = None
if attrName in localKeywordArgs:
# The user has explicitly set the attribute.
if attrName == 'functions':
attrValue = set(localKeywordArgs[attrName])
elif attrName == 'neuronImage':
for img in localKeywordArgs[attrName]:
if img['path']:
imageLabel = img['label']
imageLocation = img['path']
myImage = self.Img(imageLabel, imageLocation)
attrValue.append(myImage)
else:
attrValue = localKeywordArgs[attrName]
elif self.neuronClass:
attrValue = getattr(self.neuronClass, attrName) # Inherit the value from the class
if attrName == 'functions':
attrName = '_functions'
setattr(self, attrName, attrValue)
if self.region is not None:
self.region.neurons.append(self)
def defaultName(self):
# Try to build a name based on connections.
# TODO: should send/received be ignored, i.e. should the connector always be '- '?
connections = []
for connection in self.connections():
sends = receives = False
if isinstance(connection, Arborization):
otherName = connection.region.name
sends = connection.sendsOutput
receives = connection.receivesInput
elif isinstance(connection, GapJunction):
neurons = [neurite.neuron() for neurite in connection.neurites()]
neurons.remove(self)
otherName = neurons[0].name
sends = receives = True
elif isinstance(connection, Innervation):
otherName = connection.muscle.name
sends = True
elif isinstance(connection, Stimulus):
otherName = connection.name
receives = True
elif isinstance(connection, Synapse):
if connection.preSynapticNeurite.neuron() == self:
# TODO: check if other neuron names are nameless
otherName = ', '.join([(partner.name if isinstance(partner, Neuron) else partner.neuron().name) for partner in connection.postSynapticPartners])
sends = True
else:
otherName = connection.preSynapticNeurite.neuron().name
receives = True
if otherName is None:
return None
if sends and receives:
connector = '<->'
elif sends:
connector = '->'
elif receives:
connector = '<-'
else:
connector = '-'
connections += [connector + otherName]
return 'Neuron ' + ' & '.join(connections)
@classmethod
def _fromXMLElement(cls, network, xmlElement):
#TODO need to add links and images when I get this working
neuron = super(Neuron, cls)._fromXMLElement(network, xmlElement)
classId = xmlElement.findtext('Class')
if classId is None:
classId = xmlElement.findtext('class')
neuron.neuronClass = neuroptikon.library.neuronClass(classId)
if classId is not None and neuron.neuronClass is None:
raise ValueError, gettext('Neuron class "%s" does not exist') % (classId)
neuron.neurotransmitters = []
for ntName in ['Neurotransmitter', 'neurotransmitter']:
for ntElement in xmlElement.findall(ntName):
ntId = ntElement.text
if ntId is not None:
nt = neuroptikon.library.neurotransmitter(ntId)
if nt is None:
raise ValueError, gettext('Neurotransmitter "%s" does not exist') % (ntId)
else:
neuron.neurotransmitters.append(nt)
neuron.activation = xmlElement.findtext('Activation')
if neuron.activation is None:
neuron.activation = xmlElement.findtext('activation')
neuron._functions = set()
for functionName in ['Function', 'function']:
for functionElement in xmlElement.findall(functionName):
if functionElement.text in Neuron.Functions:
neuron.setHasFunction(functionElement.text, True)
neuron.polarity = xmlElement.findtext('Polarity')
if neuron.polarity is None:
neuron.polarity = xmlElement.findtext('polarity')
regionId = xmlElement.get('somaRegionId')
neuron.region = network.objectWithId(regionId)
if regionId is not None and neuron.region is None:
raise ValueError, gettext('Region with id "%s" does not exist') % (regionId)
if neuron.region is not None:
neuron.region.neurons.append(neuron)
neuron._synapses = []
neuron._neurites = []
for neuriteElement in xmlElement.findall('Neurite'):
neurite = Neurite._fromXMLElement(network, neuriteElement)
if neurite is None:
raise ValueError, gettext('Could not create neurite')
neurite.root = neuron
neuron._neurites.append(neurite)
network.addObject(neurite)
return neuron
def _toXMLElement(self, parentElement):
#TODO need to add links and images when I get this working
neuronElement = NeuroObject._toXMLElement(self, parentElement)
if self.neuronClass is not None:
ElementTree.SubElement(neuronElement, 'Class').text = self.neuronClass.identifier
for neurotransmitter in self.neurotransmitters:
ElementTree.SubElement(neuronElement, 'Neurotransmitter').text = neurotransmitter.identifier
if self.activation is not None:
ElementTree.SubElement(neuronElement, 'Activation').text = self.activation
for function in self._functions:
ElementTree.SubElement(neuronElement, 'Function').text = function
if self.polarity is not None:
ElementTree.SubElement(neuronElement, 'Polarity').text = self.polarity
if self.region is not None:
ElementTree.SubElement(neuronElement, 'SomaRegionId').text = str(self.region.networkId)
for neurite in self._neurites:
neurite._toXMLElement(neuronElement)
return neuronElement
def _needsScriptRef(self):
return len(self._neurites) > 0 or NeuroObject._needsScriptRef(self)
def _creationScriptParams(self, scriptRefs):
args, keywords = NeuroObject._creationScriptParams(self, scriptRefs)
if self.neuronClass is not None:
keywords['neuronClass'] = 'library.neuronClass(\'' + self.neuronClass.identifier + '\')'
if len(self.neurotransmitters) > 0:
ntCalls = []
for neurotransmitter in self.neurotransmitters:
ntCalls.append('library.neurotransmitter(\'' + neurotransmitter.identifier + '\')')
keywords['neurotransmitters'] = '[' + ', '.join(ntCalls) + ']'
if self.activation is not None:
keywords['activation'] = '\'' + self.activation + '\'' # TODO: this should be 'NeuralActivation.' + self.activation
if len(self._functions) > 0:
keywords['functions'] = '[Neuron.Function.' + ', Neuron.Function.'.join(self._functions) + ']'
if self.polarity is not None:
keywords['polarity'] = 'Neuron.Polarity.' + self.polarity
if self.region is not None:
keywords['region'] = scriptRefs[self.region.networkId]
return (args, keywords)
def _creationScriptChildren(self):
return NeuroObject._creationScriptChildren(self) + self._neurites
def createNeurite(self, *args, **keywords):
"""
DEPRECATED: Please use :meth:`extendNeurite() <Network.Neuron.Neuron.extendNeurite>` instead.
"""
return self.extendNeurite(*args, **keywords)
def extendNeurite(self, *args, **keywords):
"""
Create and return a :class:`neurite <Network.Neurite.Neurite>` object that extends from the soma of this neuron.
"""
neurite = Neurite(self.network, self, *args, **keywords)
self._neurites.append(neurite)
self.network.addObject(neurite)
return neurite
def neurites(self, recurse = True):
"""
Return a list of all :class:`neurite <Network.Neurite.Neurite>` extending from this neuron.
If recurse is True then all subsequently extending neurites will be included with the neurites that extend from the soma.
If no neurites extend from the soma of this neuron then an empty list will be returned.
"""
neurites = list(self._neurites)
if recurse:
for neurite in self._neurites:
neurites += neurite.neurites()
return neurites
def arborize(self, region, sendsOutput = True, receivesInput = True, *args, **keywordArgs):
"""
Convenience method for creating a :class:`neurite <Network.Neurite.Neurite>` and having it :class:`arborize <Network.Neurite.Neurite.arborize>` a :class:`region <Network.Region.Region>`.
Returns the arborization object that is created.
"""
return self.extendNeurite().arborize(region, sendsOutput, receivesInput, *args, **keywordArgs)
def arborizations(self):
"""
Return a list of all :class:`arborizations <Network.Arborization.Arborization>` extending from this neuron.
If this neuron does not arborize any regions then an empty list will be returned.
"""
arborizations = []
for neurite in self._neurites:
arborizations += neurite.arborizations()
return arborizations
def synapseOn(self, otherObject, *args, **keywordArgs):
"""
Convenience method that creates a :class:`neurite <Network.Neurite.Neurite>` for this neuron and then creates a :class:`synapse <Network.Synapse.Synapse>` with the other object.
Returns the synapse object that is created.
"""
neurite = self.extendNeurite()
return neurite.synapseOn(otherObject, activation = self.activation, *args, **keywordArgs)
def synapses(self, includePre = True, includePost = True):
"""
Return a list of all :class:`synapses <Network.Synapse.Synapse>` in which the :class:`neurite's <Network.Neurite.Neurite>` of this neuron are pre- or post-synaptic.
If includePre is False then synapses where this neuron is pre-synaptic will be excluded. If includePost is False then synapses where this neuron is post-synaptic will be excluded.
If this neuron does not form a synapse with any other neurons then an empty list will be returned.
"""
synapses = []
if includePost:
synapses += self._synapses
for neurite in self._neurites:
synapses += neurite.synapses(includePre = includePre, includePost = includePost)
return synapses
def gapJunctionWith(self, otherObject, *args, **keywordArgs):
"""
Convenience method that creates a :class:`neurite <Network.Neurite.Neurite>` for this neuron and then creates a :class:`gap junction <Network.GapJunction.GapJunction>` with the other object.
Returns the gap junction object that is created.
"""
neurite = self.extendNeurite()
return neurite.gapJunctionWith(otherObject, *args, **keywordArgs)
def gapJunctions(self):
"""
Return a list of all :class:`gap junctions <Network.GapJunction.GapJunction>` in which the :class:`neurite's <Network.Neurite.Neurite>` of this neuron are involved.
If this neuron does not form a gap junction with any other neurons then an empty list will be returned.
"""
junctions = []
for neurite in self._neurites:
junctions += neurite.gapJunctions()
return junctions
def innervate(self, muscle, *args, **keywordArgs):
"""
Convenience method that creates a :class:`neurite <Network.Neurite.Neurite>` and has it innervate the :class:`muscle <Network.Muscle.Muscle>`.
Returns the :class:`innervation <Network.Innervation.Innervation>` object that is created.
"""
neurite = self.extendNeurite()
return neurite.innervate(muscle, *args, **keywordArgs)
def innervations(self):
"""
Return a list of all :class:`innervations <Network.Innervation.Innervation>` involving this neuron's :class:`neurite's <Network.Neurite.Neurite>`.
If this neuron does not innervate any :class:`muscles <Network.Muscle.Muscle>` then an empty list will be returned.
"""
innervations = []
for neurite in self._neurites:
innervations += neurite.innervations()
return innervations
def connections(self, recurse = True):
"""
Return a list of all objects that connect to this neuron and optionally any extending :class:`neurites <Network.Neurite.Neurite>`.
The list may contain any number of :class:`arborizations <Network.Arborization.Arborization>`, :class:`gap junctions <Network.GapJunction.GapJunction>`, :class:`innervations <Network.Innervation.Innervation>`, :class:`stimuli <Network.Stimulus.Stimulus>` or :class:`synapses <Network.Synapse.Synapse>`.
"""
return NeuroObject.connections(self, recurse) + self._synapses
def inputs(self, recurse = True):
"""
Return a list of all objects that send information into this neuron and optionally any extending :class:`neurites <Network.Neurite.Neurite>`.
The list may contain any number of :class:`arborizations <Network.Arborization.Arborization>`, :class:`gap junctions <Network.GapJunction.GapJunction>`, :class:`stimuli <Network.Stimulus.Stimulus>` or :class:`synapses <Network.Synapse.Synapse>`.
"""
return NeuroObject.inputs(self, recurse) + self._synapses
def outputs(self, recurse = True):
"""
Return a list of all objects that receive information from this neuron and optionally any extending :class:`neurites <Network.Neurite.Neurite>`.
The list may contain any number of :class:`arborizations <Network.Arborization.Arborization>`, :class:`gap junctions <Network.GapJunction.GapJunction>`, :class:`innervations <Network.Innervation.Innervation>` or :class:`synapses <Network.Synapse.Synapse>`.
"""
return NeuroObject.outputs(self, recurse)
def childObjects(self):
return list(self._neurites)
def dependentObjects(self):
return NeuroObject.dependentObjects(self) + self._synapses + self.neurites()
def disconnectFromNetwork(self):
if self.region:
self.region.neurons.remove(self)
def setHasFunction(self, function, hasFunction):
"""
Set whether or not this neuron has the indicated function.
>>> neuron1.setHasFunction(Neuron.Function.SENSORY, True)
The function argument should be one of the attributes of Neuron.Function.
The hasFunction argument should indicate whether or not this neuron has the indicated function.
"""
if hasFunction and function not in self._functions:
self._functions.add(function)
dispatcher.send(('set', 'functions'), self)
elif not hasFunction and function in self._functions:
self._functions.remove(function)
dispatcher.send(('set', 'functions'), self)
def hasFunction(self, function):
"""
Return whether or not this neuron has the indicated function.
>>> # Show all sensory neurons in red.
>>> if neuron.hasFunction(Neuron.Function.SENSORY):
... display.setVisibleColor(neuron, (1.0, 0.0, 0.0))
The function argument should be one of the attributes of Neuron.Function.
"""
return function in self._functions
def searchPost(self, preRegion = None, postRegion = None, postNeuron = None, activation = None, neurotransmitter = None):
"""
Searches for post-synaptic sites
"""
# limit synapse to where neuron is presynaptic
synapses = [connection for connection in self.synapses() if connection.preSynapticNeuron() == self]
synapses = self._filterSynapsesForSearch(synapses, preRegion = preRegion, postRegion = postRegion, activation = activation)
# get post synaptic neurons
neurons = []
for synapse in synapses:
neurons.extend(synapse.postSynapticNeurons())
neurons = self._filterNeuronsForSearch(neurons, neurotransmitter=neurotransmitter, name=postNeuron)
return neurons
def searchPre(self, preRegion = None, postRegion = None, preNeuron = None, activation = None, neurotransmitter = None):
"""
Searches for pre-synaptic sites
"""
# limit synapse to where neuron is postsynaptic
synapses = [connection for connection in self.synapses() if self in connection.postSynapticNeurons()]
synapses = self._filterSynapsesForSearch(synapses, preRegion = preRegion, postRegion = postRegion, activation = activation)
# get pre synaptic neurons
neurons = []
for synapse in synapses:
neurons.append(synapse.preSynapticNeuron())
neurons = self._filterNeuronsForSearch(neurons, neurotransmitter = neurotransmitter, name = preNeuron)
return neurons
def _filterNeuronsForSearch(self, neurons, neurotransmitter = None, name = None):
from re import search
if neurotransmitter:
neurons = [neuron for neuron in neurons if neurotransmitter in neuron.neurotransmitters]
if name:
neurons = [neuron for neuron in neurons if search(name, neuron.name)]
return neurons
def _filterSynapsesForSearch(self, synapses, preRegion = None, postRegion = None, activation = None):
from re import search
if preRegion:
synapses = [synapse for synapse in synapses if synapse.preSynapticRegion and search(preRegion, synapse.preSynapticRegion.name)]
if postRegion:
synapses = [synapse for synapse in synapses if synapse.postSynapticRegion and search(postRegion, synapse.postSynapticRegion.name)]
if activation:
synapses = [synapse for synapse in synapses if synapse.activation and synapse.activation == activation]
return synapses
@classmethod
def _defaultVisualizationParams(cls):
params = NeuroObject._defaultVisualizationParams()
params['shape'] = 'Ball'
params['size'] = (.01, .01, .01)
params['sizeIsAbsolute'] = True
return params
def defaultVisualizationParams(self):
params = self.__class__._defaultVisualizationParams()
if self.region:
params['parent'] = self.region
return params
| JaneliaSciComp/Neuroptikon | Source/network/neuron.py | Python | bsd-3-clause | 22,759 | [
"NEURON"
] | 2dfd678c57edcf56b44427357566c2099584c9e2814cedcd6e4a66edaec66ee1 |
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import os,unittest,numpy as np
from pyscf.nao import tddft_iter
from pyscf import gto, dft, scf, tddft
from pyscf.data.nist import HARTREE2EV
class KnowValues(unittest.TestCase):
def test_144_h2b_uks_pz_pb(self):
""" This """
mol = gto.M(verbose=1,atom='B 0 0 0; H 0 0.489 1.074; H 0 0.489 -1.074',basis='cc-pvdz',spin=3)
gto_mf = dft.UKS(mol)
gto_mf.kernel()
vhf = gto_mf.get_veff(mol=mol); #print(__name__, 'exc gto', vhf.exc)
nao_mf = tddft_iter(gto=mol, mf=gto_mf, tol_loc=1e-5, tol_biloc=1e-7)
#print(__name__, 'exc nao', nao_mf.exc())
comega = np.arange(0.0, 2.0, 0.01) + 1j*0.03
polave = -nao_mf.polariz_inter_ave(comega, verbosity=0).imag
data = np.array([comega.real*HARTREE2EV, polave])
np.savetxt('test_144_h2b_uks_pz_pb.txt', data.T, fmt=['%f','%f'])
data_ref = np.loadtxt('test_144_h2b_uks_pz_pb.txt-ref').T
self.assertTrue(np.allclose(data_ref, data, atol=1e-6, rtol=1e-3))
if __name__ == "__main__": unittest.main()
| gkc1000/pyscf | pyscf/nao/test/test_0144_h2b_uks_pz_pb.py | Python | apache-2.0 | 1,654 | [
"PySCF"
] | ae4479de2f8504579e7181602189af6cfa791621296836b69455848e87194f87 |
from setuptools import find_packages, setup
LONG_DESCRIPTION = ''
try:
with open('./README.md', 'r') as f:
LONG_DESCRIPTION = f.read()
except FileNotFoundError:
pass
setup(
name='raspapreco',
description='Pesquisador de preços',
long_description=LONG_DESCRIPTION,
version='0.0.1',
url='https://github.com/IvanBrasilico/raspa-preco',
license='MIT',
author='Ivan Brasilico',
author_email='brasilico.ivan@gmail.com',
packages=find_packages(),
install_requires=[
'bs4',
'celery',
'certifi',
'chardet',
'flask',
'flask_cors',
'Flask-JWT',
'flask_restless',
'flask_sqlalchemy',
'gunicorn',
'idna',
'json-tricks',
'requests',
'SQLAlchemy',
'urllib3'
],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
test_suite="tests",
package_data={
'raspapreco': ['locale/*'],
},
extras_require={
'dev': [
'coverage',
'flake8',
'isort',
'pytest',
'pytest-cov',
'pytest-mock',
'requests-mock',
'Sphinx',
'testfixtures',
],
},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: User Interfaces',
'Topic :: Utilities',
'Programming Language :: Python :: 3.5',
],
)
| IvanBrasilico/raspa-preco | setup.py | Python | gpl-3.0 | 1,739 | [
"RASPA"
] | ae3ac3f6b6ac1737bd669ef1034e792a998515969400472087ab0621f2dffea0 |
#!/usr/bin/env python
'''
ViSAPy example script for generating benchmark corresponding to an in vivo
tetrode recording
'''
#import modules
import uuid
import urllib2
import zipfile
import numpy as np
import h5py
import os
import glob
#workaround for plots on cluster
if not os.environ.has_key('DISPLAY'):
import matplotlib
matplotlib.use('Agg')
from scipy.signal import filtfilt, butter, lfilter
from time import asctime, time
import ViSAPy
import neuron
from mpi4py import MPI
#tic - toc
tic = time()
######## set random number generator seed ######################################
SEED = 1234567
POPULATIONSEED = SEED
np.random.seed(SEED)
################# Initialization of MPI stuff ##################################
COMM = MPI.COMM_WORLD
SIZE = COMM.Get_size()
RANK = COMM.Get_rank()
######## create unique output folder and copy simulation script ################
if RANK == 0:
#savefolder = glob.glob('savedata_polytrode*')[-1]
string = asctime().split()
savefolder = os.path.join(os.path.split(__file__)[0], 'savedata_polytrode_')
for s in string:
for ss in s.split(':'):
savefolder += ss + '_'
savefolder += uuid.uuid4().hex
os.mkdir(savefolder)
os.system("cp %s '%s'" % (__file__, savefolder + '/.'))
else:
savefolder = None
savefolder = COMM.bcast(savefolder, root=0)
######### Fetch Hay et al. 2011 model files, unzip locally #####################
if not os.path.isfile('L5bPCmodelsEH/morphologies/cell1.asc'):
if RANK == 0:
#get the model files:
u = urllib2.urlopen('http://senselab.med.yale.edu/ModelDB/' +
'eavBinDown.asp?o=139653&a=23&mime=application/zip')
localFile = open('L5bPCmodelsEH.zip', 'w')
localFile.write(u.read())
localFile.close()
#unzip:
myzip = zipfile.ZipFile('L5bPCmodelsEH.zip', 'r')
myzip.extractall('.')
myzip.close()
#compile NMODL language files
os.system('''
cd L5bPCmodelsEH/mod/
nrnivmodl
cd -
''')
COMM.Barrier()
##### load NEURON mechanisms from Hay et al. 2011 ##############################
neuron.load_mechanisms("L5bPCmodelsEH/mod")
################################################################################
# PARAMETERS
################################################################################
#set up base parameter file for the LFPy.Cell or LFPy.TemplateCell class,
#without specifying cell model.
cellParameters = {
'v_init' : -80,
'passive' : False,
'nsegs_method' : None,
'timeres_NEURON' : 2**-5,
'timeres_python' : 2**-5,
'tstartms' : 0.,
'tstopms' : 120500, #2 minutes without 500 ms startup transient
'verbose' : False,
'pt3d' : False,
}
#in this particular set up, each cell will be a random permutation of each
#morphology and templatefile specification of Hay et al 2011.
morphologies = [
'L5bPCmodelsEH/morphologies/cell1.asc',
'L5bPCmodelsEH/morphologies/cell2.asc',
]
templatefiles = [
['L5bPCmodelsEH/models/L5PCbiophys2.hoc',
'L5bPCmodelsEH/models/L5PCtemplate.hoc'],
['L5bPCmodelsEH/models/L5PCbiophys3.hoc',
'L5bPCmodelsEH/models/L5PCtemplate.hoc'],
['L5bPCmodelsEH/models/L5PCbiophys3.hoc',
'L5bPCmodelsEH/models/L5PCtemplate.hoc'],
]
#pointer to template specification name, cf. Linden et al. 2014
cellParameters.update(dict(templatename = 'L5PCtemplate'))
# set the default rotation of the cells
defaultrotation = {}
#LFPy can simulate directly to file, but for performance reasons, this
#feature should be avoided
simulationParameters = {
#'to_file' : True, #file_name set in cellsim()
}
#parameters for the signal-generating model population.
populationParameters = {
'POPULATION_SIZE' : 16,
'radius' : 50,
'killzone' : 25,
'z_min' : 0,
'z_max' : 750,
'X' : np.array([ [0, 0, 0, -40, -40, 0, 0],
[0, 0, 0, 40, 40, 0, 0]]),
'Y' : np.array([ [0, 0, -50, -50, -50, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]),
'Z' : np.array([-np.inf, -50.01, -50, 0, 2000, 2000.01, np.inf]),
'min_cell_interdist' : 25.,
}
#Recording electrode emulating NeuroNexus laminar tetrode array
#first contact superficial
x, y, z = -np.mgrid[0:1, 0:1, -15:1] * 50
N = np.empty((x.size, 3))
for i in xrange(N.shape[0]): N[i,] = [1, 0, 0]
#dictionary passed to class LFPy.RecExtElectrode
electrodeParameters = {
'x' : x.flatten(),
'y' : y.flatten(),
'z' : z.flatten(),
'sigma' : 0.3, #extracellular conductivity
'N' : N, #electrode contact surface normal
'r' : 7.5, #contact radius
'n' : 100, #number of points averaged over on contact
'method' : 'som_as_point' #soma segment assumed spherical, dendrites lines
}
##one dimensional drift, here; shifting electrode.z +5 mum every 1000 ms
#driftParameters = {
# 'driftInterval' : 1000,
# 'driftShift' : 5,
# 'driftOnset' : 0,
#}
##but we're not using that feature here
driftParameters = None
#synaptic parameters: AMPA - excitatory, GABA_A - inhibitory
synparams_AMPA = { #Excitatory synapse parameters
'e' : 0, #reversal potential
'syntype' : 'Exp2Syn', #conductance based two-exponential synapse
'tau1' : 1., #Time constant, rise
'tau2' : 3., #Time constant, decay
'weight' : 0.0125, #Synaptic weight
'section' : ['apic', 'dend'],
'nPerArea' : [45E-3, 45E-4], #mean +- std
}
synparams_GABA_A = { #Inhibitory synapse parameters
'e' : -80,
'syntype' : 'Exp2Syn',
'tau1' : 1.,
'tau2' : 12.,
'weight' : 0.025,
'section' : ['soma', 'apic', 'dend'],
'nPerArea' : [20E-3, 20E-4],
}
#parameters for ViSAPy.*Network instance
networkParameters = {
#class Network
'simtime' : cellParameters['tstopms']-cellParameters['tstartms'],
'dt' : cellParameters['timeres_NEURON'],
'total_num_virtual_procs' : SIZE,
'savefolder' : savefolder,
'label' : 'spikes',
'to_file' : True,
'to_memory' : False,
'print_time' : False,
#class RingNetwork
'N' : 12500,
'theta' : 20.,
'tauMem' : 20.,
'delay' : 2.,
'J_ex' : 0.05,
'g' : 5.0,
'eta' : 0.9,
}
#class ExternalNoiseRingNetwork we're using below need a few extra arguments
ExternalNoiseRingNetworkParameters = {
'tstopms' : cellParameters['tstopms'],
'invertnoise_ex' : True,
'invertnoise_in' : False,
'rate' : 40,
'projection': ['exc', 'inh'],
'weight' : 0.5,
}
#nyquist frequency of simulation output
nyquist = 1000. / cellParameters['timeres_python'] / 2
#Parameters for class ViSAPy.LogBumpFilterBank that sets up
#series of cosine log-bump filters:
logBumpParameters = dict(
n = 16,
taps = 401,
alpha = 0.01,
nyquist=nyquist,
)
#set up filtering steps of extracellular potentials
filters = []
#presample filter to avoid aliasing
b, a = butter(1, np.array([0.5, 8000.]) / nyquist, btype='pass')
filters.append({
'b' : b,
'a' : a,
'filterFun' : lfilter
})
b, a = butter(4, np.array([300., 5000.]) / nyquist, btype='pass')
filters.append({
'b' : b,
'a' : a,
'filterFun' : filtfilt
})
#note, filterFun should be either scipy.signal.lfilter or filtfilt
#download experimental data for use in generation of noise
fname = os.path.join('data', '08_2012101909.bin_raw_cleaned.h5')
if RANK == 0:
if not os.path.isdir('data'):
os.mkdir('data')
if not os.path.isfile(fname):
u = urllib2.urlopen('https://www.dropbox.com/s/sttueav1fs18esz/' +
'08_2012101909.bin_raw_cleaned.h5?dl=1')
f = open(fname, 'w')
f.write(u.read())
f.close()
COMM.Barrier()
#Noise parameters including noise covariance matrix
noiseParameters = None
#extract noise covariances extracted from experimental tetrode recording
noiseFeaturesParameters = dict(logBumpParameters)
noiseFeaturesParameters.update({
'fname' : fname,
'outputfile' : os.path.join(savefolder, 'ViSAPy_noise.h5'),
'T' : 15000.,
'srate_in' : 48000.,
'srate_out' : 2 * nyquist,
'NFFT' : 2**16,
'psdmethod': 'mlab',
'remove_spikes' : True,
#parameters passed to class SpikeACut, only used if remove_spikes == True
'remove_spikes_args' : {
'TEMPLATELEN' : 32, #1 ms
'TEMPLATEOFFS' : 0.5,
'threshold' : 5, #standard deviations
'data_filter' : {
'filter_design' : butter,
'filter_design_args' : {
'N' : 2,
'Wn' : np.array([300., 5000.]) / nyquist,
'btype' : 'pass',
},
'filter' : filtfilt
},
},
})
#container file for noise output etc.
noise_output_file = os.path.join(savefolder, 'ViSAPy_noise.h5')
setup_time = time()-tic
################################################################################
## MAIN SIMULATION PROCEDURE
################################################################################
tic = time()
################################################################################
## Step 1: Estimate PSD and covariance between channels, here using
## an experimental dataset.
##
## In the present ViSAPy, we should use only a single RANK for this
## and subsequent steps, we also skip regenerating noise and spike
## events, because it can take some time for long simulation durations
##
if RANK == 0:
if not os.path.isfile(noise_output_file):
noise_features = ViSAPy.NoiseFeatures(**noiseFeaturesParameters)
################################################################################
## Step 2: Generate synthetic noise with PSD and covariance channels extracted
## using class NoiseFeatures, preserving the overall amplitude.
## We choose to save directly to file, as it will be used in
## later steps
##
noise_generator = ViSAPy.CorrelatedNoise(psd=noise_features.psd,
C=noise_features.C,
amplitude_scaling=1.,
**logBumpParameters)
#file object containing extracellular noise and related data
f = h5py.File(noise_output_file)
f['data'] = noise_generator.correlated_noise(T = cellParameters['tstopms'])
################################################################################
## Step 3: Create a rate expectation envelope lambda_t for generating
## non-stationary Poisson spike trains
##
#band-pass filter mean noise before non-stat Poisson generation
b, a = butter(N=2, Wn=np.array([1., 25.]) / nyquist, btype='pass')
#compute lambda function, use signal averaged over space. It will be
#normalized by the ViSAPy.NonStationaryPoisson instance later.
lambda_t = filtfilt(b, a, f['data'].value.mean(axis=0))
f['lambda_t'] = lambda_t
f.close()
else:
#file exists, so we make some attempts at loading the non-stationarity
f = h5py.File(noise_output_file)
lambda_t = f['lambda_t'].value
f.close()
else:
lambda_t = None
#communicate rate envelope across ranks
lambda_t = COMM.bcast(lambda_t, root=0)
noise_time = time() - tic
################################################################################
## Step 4: Run network simulation, generating a pool of synaptic activation
## times used by the ViSAPy.Testdata instance
##
tic = time()
#if database files exist, skip regenerating spike events
if not os.path.isfile(os.path.join(savefolder, 'SpTimesEx.db')) \
and not os.path.isfile(os.path.join(savefolder, 'SpTimesIn.db')):
#create an instance of our network
networkInstance = ViSAPy.ExternalNoiseRingNetwork(lambda_t=lambda_t,
**{k : v for k, v in networkParameters.items() +
ExternalNoiseRingNetworkParameters.items()})
networkInstance.run()
networkInstance.get_results()
networkInstance.process_gdf_files()
else:
#create instance of parent RingNetwork
networkInstance = ViSAPy.RingNetwork(**networkParameters)
network_time = time() - tic
################################################################################
## Step 5: Fix seed and set up Testdata object, generating a model cell
## population, find and distribute synapse inputs with spiketrains from
## network, run simulations for extracellular potentials,
## collect data and generate final benchmark data
##
tic = time()
#set some seeds AFTER network sim, may want noise and spiking to be different,
#but populations to be equal. We are not explicitly setting the seed for NEST.
np.random.seed(POPULATIONSEED)
#Create BenchmarkData object
benchmark_data = ViSAPy.BenchmarkDataRing(
cellParameters = cellParameters,
morphologies = morphologies,
templatefiles = templatefiles,
defaultrotation = defaultrotation,
simulationParameters = simulationParameters,
populationParameters = populationParameters,
electrodeParameters = electrodeParameters,
noiseFile = noise_output_file,
filters = filters,
savefolder = savefolder,
default_h5_file = 'lfp_cell_%.3i.h5',
nPCA = 2,
TEMPLATELEN = 100,
TEMPLATEOFFS = 0.3,
spikethreshold = 3.,
networkInstance = networkInstance,
synapseParametersEx = synparams_AMPA,
synapseParametersIn = synparams_GABA_A,
driftParameters = driftParameters)
print 'setup ok!'
#run simulations and gather results
benchmark_data.run()
benchmark_data.collect_data()
bench_time = time() - tic
################################################################################
## Step 6: Plot simulation output to the default simulation output folder
tic = time()
#utilize plot methods provided by class plotBenchmarkData. We are
#removing a startup transient of 500 ms.
myplot = ViSAPy.plotBenchmarkData(benchmark_data, TRANSIENT=500.)
myplot.run()
plot_time = time() - tic
################################################################################
## print out some stats.
if RANK == 0:
print '\nsimulation times:\n'
print 'setup: \t\t%.1f seconds' % setup_time
print 'noise: \t\t%.1f seconds' % noise_time
print 'network: \t%.1f seconds' % network_time
print 'benchmark: \t%.1f seconds' % bench_time
print 'plots: \t\t%.1f seconds\n' % plot_time
| espenhgn/ViSAPy | examples/example_in_vivo_polytrode.py | Python | gpl-2.0 | 14,774 | [
"NEURON"
] | 3cec00b3f2f4bdea3fe47ee191dd11c6c3d8ca4e3bbba6479abf5c371e54c5bf |
""" Test for Profiler.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from os.path import dirname, join
from subprocess import Popen
import pytest
from flaky import flaky
import DIRAC
from DIRAC.Core.Utilities.Profiler import Profiler
# Mark this entire module as slow
pytestmark = pytest.mark.slow
def test_base():
p = Profiler()
res = p.pid()
assert res['OK'] is False
res = p.status()
assert res['OK'] is False
mainProcess = Popen([
'python',
join(dirname(DIRAC.__file__), 'tests/Utilities/ProcessesCreator_withChildren.py'),
])
time.sleep(1)
p = Profiler(mainProcess.pid)
res = p.pid()
assert res['OK'] is True
res = p.status()
assert res['OK'] is True
res = p.runningTime()
assert res['OK'] is True
assert res['Value'] > 0
res = p.memoryUsage()
assert res['OK'] is True
assert res['Value'] > 0
resWC = p.memoryUsage(withChildren=True)
assert resWC['OK'] is True
assert resWC['Value'] > 0
assert resWC['Value'] >= res['Value']
res = p.vSizeUsage()
assert res['OK'] is True
assert res['Value'] > 0
resWC = p.vSizeUsage(withChildren=True)
assert resWC['OK'] is True
assert resWC['Value'] > 0
assert resWC['Value'] >= res['Value']
res = p.vSizeUsage()
assert res['OK'] is True
assert res['Value'] > 0
resWC = p.vSizeUsage(withChildren=True)
assert resWC['OK'] is True
assert resWC['Value'] > 0
assert resWC['Value'] >= res['Value']
res = p.numThreads()
assert res['OK'] is True
assert res['Value'] > 0
resWC = p.numThreads(withChildren=True)
assert resWC['OK'] is True
assert resWC['Value'] > 0
assert resWC['Value'] >= res['Value']
res = p.cpuPercentage()
assert res['OK'] is True
assert res['Value'] >= 0
resWC = p.cpuPercentage(withChildren=True)
assert resWC['OK'] is True
assert resWC['Value'] >= 0
assert resWC['Value'] >= res['Value']
@flaky(max_runs=10, min_passes=2)
def test_cpuUsage():
mainProcess = Popen([
'python',
join(dirname(DIRAC.__file__), 'tests/Utilities/ProcessesCreator_withChildren.py'),
])
time.sleep(2)
p = Profiler(mainProcess.pid)
res = p.pid()
assert res['OK'] is True
res = p.status()
assert res['OK'] is True
# user
res = p.cpuUsageUser()
assert res['OK'] is True
assert res['Value'] > 0
resC = p.cpuUsageUser(withChildren=True)
assert resC['OK'] is True
assert resC['Value'] > 0
assert resC['Value'] >= res['Value']
res = p.cpuUsageUser()
assert res['OK'] is True
assert res['Value'] > 0
resC = p.cpuUsageUser(withChildren=True)
assert resC['OK'] is True
assert resC['Value'] > 0
assert resC['Value'] >= res['Value']
resT = p.cpuUsageUser(withTerminatedChildren=True)
assert resT['OK'] is True
assert resT['Value'] > 0
assert resT['Value'] >= res['Value']
resTC = p.cpuUsageUser(withChildren=True, withTerminatedChildren=True)
assert resTC['OK'] is True
assert resTC['Value'] > 0
assert resTC['Value'] >= res['Value']
# system
res = p.cpuUsageSystem()
assert res['OK'] is True
assert res['Value'] >= 0
resWC = p.cpuUsageSystem(withChildren=True)
assert resWC['OK'] is True
assert resWC['Value'] >= 0
assert resWC['Value'] >= res['Value']
res = p.cpuUsageSystem()
assert res['OK'] is True
assert res['Value'] > 0
resC = p.cpuUsageSystem(withChildren=True)
assert resC['OK'] is True
assert resC['Value'] > 0
assert resC['Value'] >= res['Value']
resT = p.cpuUsageSystem(withTerminatedChildren=True)
assert resT['OK'] is True
assert resT['Value'] > 0
assert resT['Value'] >= res['Value']
resTC = p.cpuUsageSystem(withChildren=True, withTerminatedChildren=True)
assert resTC['OK'] is True
assert resTC['Value'] > 0
assert resTC['Value'] >= res['Value']
# After this the main process will no-longer exist
mainProcess.wait()
res = p.cpuUsageUser()
assert res['OK'] is False
assert res['Errno'] == 3
| yujikato/DIRAC | src/DIRAC/Core/Utilities/test/Test_Profiler.py | Python | gpl-3.0 | 3,974 | [
"DIRAC"
] | 43beb6ea9427317a793a76a9f51fca93ab29a1a7084fb49d0aecf657d111fe4c |
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import sys
import gtk
from dialog import Template
from zeroinstall import _
from zeroinstall.gtkui import help_box
from zeroinstall.injector.model import network_levels
from zeroinstall.injector import trust, gpg
from freshness import freshness_levels, Freshness
SHOW_CACHE = 0
class Preferences(object):
def __init__(self, config, notify_cb = None):
if notify_cb is None:
notify_cb = lambda: None
def connect_toggle(widget_name, setting_name):
widget = widgets.get_widget(widget_name)
widget.set_active(getattr(config, setting_name))
def toggle(w, config = config, setting_name = setting_name):
setattr(config, setting_name, w.get_active())
config.save_globals()
notify_cb()
widget.connect('toggled', toggle)
widgets = Template('preferences_box')
self.window = widgets.get_widget('preferences_box')
self.window.connect('destroy', lambda w: self.destroyed())
# (attribute to avoid: free variable 'network' referenced before assignment in enclosing scope)
self.network = widgets.get_widget('network_use')
self.network.set_active(list(network_levels).index(config.network_use))
def set_network_use(combo):
config.network_use = network_levels[self.network.get_active()]
config.save_globals()
notify_cb()
self.network.connect('changed', set_network_use)
# Freshness
times = [x.time for x in freshness_levels]
if config.freshness not in times:
freshness_levels.append(Freshness(config.freshness,
'%d seconds' % config.freshness))
times.append(config.freshness)
freshness = widgets.get_widget('freshness')
freshness_model = freshness.get_model()
for level in freshness_levels:
i = freshness_model.append()
freshness_model.set_value(i, 0, str(level))
freshness.set_active(times.index(config.freshness))
def set_freshness(combo, freshness = freshness): # (pygtk bug?)
config.freshness = freshness_levels[freshness.get_active()].time
config.save_globals()
notify_cb()
freshness.connect('changed', set_freshness)
connect_toggle('help_test', 'help_with_testing')
# Keys
keys_view = widgets.get_widget('trusted_keys')
KeyList(keys_view)
connect_toggle('auto_approve', 'auto_approve_keys')
# Responses
self.window.set_default_response(gtk.RESPONSE_CLOSE)
self.window.get_default_widget().grab_focus()
def response(dialog, resp):
if resp in (gtk.RESPONSE_CLOSE, gtk.RESPONSE_DELETE_EVENT):
self.window.destroy()
elif resp == gtk.RESPONSE_HELP:
gui_help.display()
self.window.connect('response', response)
self.window.set_default_size(-1, gtk.gdk.screen_height() / 3)
def destroyed(self):
global preferences_box
preferences_box = None
class KeyList(object):
def __init__(self, tv):
self.trusted_keys = gtk.TreeStore(str, object)
tv.set_model(self.trusted_keys)
tc = gtk.TreeViewColumn(_('Trusted keys'), gtk.CellRendererText(), text = 0)
tv.append_column(tc)
trust.trust_db.ensure_uptodate()
def update_keys():
# Remember which ones are open
expanded_elements = set()
def add_row(tv, path, unused = None):
if len(path) == 1:
domain = self.trusted_keys[path][0]
expanded_elements.add(domain)
tv.map_expanded_rows(add_row, None)
self.trusted_keys.clear()
domains = {}
keys = gpg.load_keys(list(trust.trust_db.keys.keys()))
for fingerprint in keys:
for domain in trust.trust_db.keys[fingerprint]:
if domain not in domains:
domains[domain] = set()
domains[domain].add(keys[fingerprint])
for domain in sorted(domains):
iter = self.trusted_keys.append(None, [domain, None])
for key in domains[domain]:
self.trusted_keys.append(iter, [key.name, key])
def may_expand(model, path, iter, unused):
if len(path) == 1:
if model[iter][0] in expanded_elements:
tv.expand_row(path, False)
self.trusted_keys.foreach(may_expand, None)
trust.trust_db.watchers.append(update_keys)
tv.connect('destroy', lambda w: trust.trust_db.watchers.remove(update_keys))
update_keys()
def remove_key(fingerprint, domain):
trust.trust_db.untrust_key(fingerprint, domain)
trust.trust_db.notify()
def trusted_keys_button_press(tv, bev):
if bev.type == gtk.gdk.BUTTON_PRESS and bev.button == 3:
pos = tv.get_path_at_pos(int(bev.x), int(bev.y))
if not pos:
return False
path, col, x, y = pos
if len(path) != 2:
return False
key = self.trusted_keys[path][1]
if isinstance(path, tuple):
path = path[:-1] # PyGTK
else:
path.up() # PyGObject
domain = self.trusted_keys[path][0]
global menu # Needed to stop Python 3 GCing the menu and closing it instantly
menu = gtk.Menu()
item = gtk.MenuItem()
item.set_label(_('Remove key for "%s"') % key.get_short_name())
item.connect('activate',
lambda item, fp = key.fingerprint, d = domain: remove_key(fp, d))
item.show()
menu.append(item)
if sys.version_info[0] > 2:
menu.popup(None, None, None, None, bev.button, bev.time)
else:
menu.popup(None, None, None, bev.button, bev.time)
return True
return False
tv.connect('button-press-event', trusted_keys_button_press)
preferences_box = None
def show_preferences(config, notify_cb = None):
global preferences_box
if preferences_box:
preferences_box.window.destroy()
preferences_box = Preferences(config, notify_cb)
preferences_box.window.show()
return preferences_box.window
gui_help = help_box.HelpBox(_("Zero Install Preferences Help"),
(_('Overview'), '\n\n' +
_("""There are three ways to control which implementations are chosen. You can adjust the \
network policy and the overall stability policy, which affect all interfaces, or you \
can edit the policy of individual interfaces.""")),
(_('Network use'), '\n' +
_("""The 'Network use' option controls how the injector uses the network. If off-line, \
the network is not used at all. If 'Minimal' is selected then the injector will use \
the network if needed, but only if it has no choice. It will run an out-of-date \
version rather than download a newer one. If 'Full' is selected, the injector won't \
worry about how much it downloads, but will always pick the version it thinks is best.""")),
(_('Freshness'), '\n' +
_("""The feed files, which provide the information about which versions are \
available, are also cached. To update them, click on 'Refresh all now'. You can also \
get the injector to check for new versions automatically from time to time using \
the Freshness setting.""")),
(_('Help test new versions'), '\n' +
_("""The overall stability policy can either be to prefer stable versions, or to help test \
new versions. Choose whichever suits you. Since different programmers have different \
ideas of what 'stable' means, you may wish to override this on a per-interface basis.
To set the policy for an interface individually, select it in the main window and \
click on 'Interface Properties'. See that dialog's help text for more information.""")),
(_('Security'), '\n' +
_("""This section lists all keys which you currently trust. When fetching a new program or \
updates for an existing one, the feed must be signed by one of these keys. If not, \
you will be prompted to confirm that you trust the new key, and it will then be added \
to this list.
If "Automatic approval for new feeds" is on, new keys will be automatically approved if \
you haven't used the program before and the key is known to the key information server. \
When updating feeds, confirmation for new keys is always required.
To remove a key, right-click on it and choose 'Remove' from the menu.""")),
)
| rammstein/0install | zeroinstall/0launch-gui/preferences.py | Python | lgpl-2.1 | 7,731 | [
"VisIt"
] | e2868c362908c4cdad7f7d8d9cef8e83f890e89dbf08eb9b47556781990c2d2e |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014, 2015 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import
import json
import os
import requests
from flask import abort, \
Blueprint, \
jsonify, \
make_response, \
render_template, \
request, \
url_for, \
redirect
from flask_breadcrumbs import default_breadcrumb_root, register_breadcrumb
from werkzeug.datastructures import MultiDict
from flask_login import login_required
from invenio.base.decorators import wash_arguments
from invenio.base.globals import cfg
from invenio.base.i18n import _
from invenio.ext.principal import permission_required
from invenio_workflows.models import BibWorkflowObject
from .acl import viewauthorreview
from .forms import AuthorUpdateForm
blueprint = Blueprint(
'inspire_authors',
__name__,
url_prefix='/author',
template_folder='templates',
static_folder="static",
)
default_breadcrumb_root(blueprint, '.')
def convert_for_form(data):
"""Convert jsonalchemy keys to form field names."""
if "name" in data:
data["full_name"] = data["name"].get("value")
try:
data["given_names"] = data["name"].get("value").split(",")[1].strip()
except IndexError:
data["given_names"] = ""
data["family_name"] = data["name"].get("value").split(",")[0].strip()
data["display_name"] = data["name"].get("preferred_name")
data["status"] = data["name"].get("status").lower()
if "url" in data:
data["websites"] = []
for url in data["url"]:
if "description" not in url:
data["websites"].append({"webpage": url["url"]})
else:
if url["description"].lower() == "twitter":
data["twitter_url"] = url["url"]
elif url["description"].lower() == "blog":
data["blog_url"] = url["url"]
elif url["description"].lower() == "linkedin":
data["linkedin_url"] = url["url"]
del data["url"]
if "field_categories" in data:
data["research_field"] = [field["name"].lower() for
field in data["field_categories"]]
if "positions" in data:
data["institution_history"] = []
for position in data["positions"]:
if not any(
[
key in position for key in ('name', 'rank',
'start_year', 'end_year')
]
):
continue
pos = {}
pos["name"] = position.get("institution", {}).get("name")
pos["rank"] = position.get("rank", "")
pos["start_year"] = position.get("start_date", "")
pos["end_year"] = position.get("end_date", "")
pos["current"] = True if position.get("status") else False
data["institution_history"].append(pos)
if position.get("email"):
data["public_email"] = position.get("email")
data["institution_history"].reverse()
if "phd_advisors" in data:
phd_advisors = data["phd_advisors"]
data["advisors"] = []
for advisor in phd_advisors:
adv = {}
adv["name"] = advisor.get("name", "")
adv["degree_type"] = advisor.get("degree_type", "")
data["advisors"].append(adv)
if "ids" in data:
for id in data["ids"]:
try:
if id["type"] == "ORCID":
data["orcid"] = id["value"]
elif id["type"] == "BAI":
data["bai"] = id["value"]
elif id["type"] == "INSPIRE":
data["inspireid"] = id["value"]
except KeyError:
# Protect against cases when there is no value in metadata
pass
@blueprint.route('/validate', methods=['POST'])
def validate():
"""Validate form and return validation errors."""
if request.method != 'POST':
abort(400)
data = request.json or MultiDict({})
formdata = MultiDict(data or {})
form = AuthorUpdateForm(formdata=formdata)
form.validate()
result = {}
changed_msgs = dict(
(name, messages) for name, messages in form.messages.items()
if name in formdata.keys()
)
result['messages'] = changed_msgs
return jsonify(result)
@blueprint.route('/update', methods=['GET', 'POST'])
@register_breadcrumb(blueprint, '.update', _('Update author information'))
@login_required
@wash_arguments({'recid': (int, 0)})
def update(recid):
"""View for INSPIRE author update form."""
from dojson.contrib.marc21.utils import create_record
from inspire.dojson.hepnames import hepnames
data = {}
if recid:
try:
url = os.path.join(cfg["AUTHORS_UPDATE_BASE_URL"], "record",
str(recid), "export", "xm")
xml = requests.get(url)
data = hepnames.do(create_record(xml.content.encode("utf-8")))
convert_for_form(data)
except requests.exceptions.RequestException:
pass
data["recid"] = recid
else:
return redirect(url_for("inspire_authors.new"))
form = AuthorUpdateForm(data=data)
ctx = {
"action": url_for('.submitupdate'),
"name": "authorUpdateForm",
"id": "authorUpdateForm",
}
return render_template('authors/forms/update_form.html', form=form, **ctx)
@blueprint.route('/new', methods=['GET', 'POST'])
@register_breadcrumb(blueprint, '.new', _('New author information'))
@login_required
@wash_arguments({'bai': (unicode, u"")})
def new(bai):
"""View for INSPIRE author new form."""
data = {}
if bai:
# Add BAI information to form in order to keep connection between
# a HEPName and an author profile.
data["bai"] = bai
form = AuthorUpdateForm(data=data)
ctx = {
"action": url_for('.submitnew'),
"name": "authorUpdateForm",
"id": "authorUpdateForm",
}
return render_template('authors/forms/new_form.html', form=form, **ctx)
@blueprint.route('/newreview', methods=['GET', 'POST'])
@login_required
@permission_required(viewauthorreview.name)
@wash_arguments({'objectid': (int, 0)})
def newreview(objectid):
"""View for INSPIRE author new form review by a cataloger."""
if not objectid:
abort(400)
workflow_object = BibWorkflowObject.query.get(objectid)
extra_data = workflow_object.get_extra_data()
form = AuthorUpdateForm(data=extra_data["formdata"], is_review=True)
ctx = {
"action": url_for('.reviewaccepted', objectid=objectid),
"name": "authorUpdateForm",
"id": "authorUpdateForm",
"objectid": objectid
}
return render_template('authors/forms/review_form.html', form=form, **ctx)
@blueprint.route('/reviewaccepted', methods=['GET', 'POST'])
@login_required
@permission_required(viewauthorreview.name)
@wash_arguments({'objectid': (int, 0),
'approved': (bool, False),
'ticket': (bool, False)})
def reviewaccepted(objectid, approved, ticket):
"""Form handler when a cataloger accepts a new author update"""
if not objectid:
abort(400)
workflow_object = BibWorkflowObject.query.get(objectid)
extra_data = workflow_object.get_extra_data()
extra_data["approved"] = approved
extra_data["ticket"] = ticket
workflow_object.set_extra_data(extra_data)
workflow_object.save()
workflow_object.continue_workflow(delayed=True)
return render_template('authors/forms/new_review_accepted.html',
approved=approved)
@blueprint.route('/submitupdate', methods=['POST'])
@login_required
def submitupdate():
"""Form action handler for INSPIRE author update form."""
from inspire.modules.forms.utils import DataExporter
from invenio_workflows.models import BibWorkflowObject
from flask.ext.login import current_user
form = AuthorUpdateForm(formdata=request.form)
visitor = DataExporter()
visitor.visit(form)
myobj = BibWorkflowObject.create_object(id_user=current_user.get_id())
myobj.set_data(visitor.data)
# Start workflow. delayed=True will execute the workflow in the
# background using, for example, Celery.
myobj.start_workflow("authorupdate", delayed=True)
ctx = {
"inspire_url": get_inspire_url(visitor.data)
}
return render_template('authors/forms/update_success.html', **ctx)
@blueprint.route('/submitnew', methods=['POST'])
@login_required
def submitnew():
"""Form action handler for INSPIRE author new form."""
from inspire.modules.forms.utils import DataExporter
from invenio_workflows.models import BibWorkflowObject
from flask.ext.login import current_user
form = AuthorUpdateForm(formdata=request.form)
visitor = DataExporter()
visitor.visit(form)
myobj = BibWorkflowObject.create_object(id_user=current_user.get_id())
myobj.set_data(visitor.data)
# Start workflow. delayed=True will execute the workflow in the
# background using, for example, Celery.
myobj.start_workflow("authornew", delayed=True)
ctx = {
"inspire_url": get_inspire_url(visitor.data)
}
return render_template('authors/forms/new_success.html', **ctx)
@blueprint.route(
'/<field_name>/',
methods=['GET', 'POST'])
@login_required
def autocomplete(field_name=None):
"""Auto-complete a form field."""
term = request.args.get('term') # value
limit = request.args.get('limit', 50, type=int)
form = AuthorUpdateForm()
result = form.autocomplete(field_name, term, limit=limit)
result = result if result is not None else []
# jsonify doesn't return lists as top-level items.
resp = make_response(
json.dumps(result, indent=None if request.is_xhr else 2)
)
resp.mimetype = "application/json"
return resp
def get_inspire_url(data):
""" Generate url for the user to go back to INSPIRE. """
url = ""
if "bai" in data and data["bai"]:
url = "http://inspirehep.net/author/profile/" + data["bai"]
elif "recid" in data and data["recid"]:
url = "http://inspirehep.net/record/" + str(data["recid"])
else:
url = "http://inspirehep.net/hepnames"
return url
| Dziolas/inspire-next | inspire/modules/authors/views.py | Python | gpl-2.0 | 11,226 | [
"VisIt"
] | 7fd4a0066b84163c990042491c7a73a052ce2cd4b7c8711a5f9f4750e9953bfe |
from builtins import object
from future.utils import with_metaclass
import abc
class ActionMapping(with_metaclass(abc.ABCMeta, object)):
"""
Action Mapping abc for discrete and continuous actions
"""
def __init__(self, belief_node):
self.owner = belief_node
@abc.abstractmethod
def get_action_node(self, action):
"""
Retrieves the action node (if any) corresponding to the given action.
:param action:
:return action_node:
"""
@abc.abstractmethod
def create_action_node(self, action):
"""
Creates a new action node for the given action.
:param action:
:return action_node:
"""
@abc.abstractmethod
def delete_child(self, action_mapping_entry):
"""
Deletes the child in the given entry, as well as the entire corresponding subtree
:param action_mapping_entry:
:return:
"""
@abc.abstractmethod
def get_child_entries(self):
"""
Returns all entries in this mapping that have a child node associated with them
:return: List of ActionMappingEntry's
"""
@abc.abstractmethod
def get_visited_entries(self):
"""
Returns a vector of all of the visited entries in this mapping
Some of those entries might have null action nodes if the visit counts were initialized
with nonzero values.
:return:
"""
@abc.abstractmethod
def get_entry(self, action):
"""
Returns the mapping entry associated with the given action, or None if there is none.
:param action:
:return:
"""
@abc.abstractmethod
def get_next_action_to_try(self):
"""
Returns the next unvisited action to be tried for this node, or None if there are no
more unvisited actions (that are legal).
:param:
:return action:
"""
class ActionMappingEntry(with_metaclass(abc.ABCMeta, object)):
"""
An interface for discrete and continuous actions
that represents a (belief, action) edge in the belief tree.
There are two core pieces of functionality - a number of getter methods returning various
properties of this edge, as well as, more importantly
update_visit_count(), update_q_value(), which updates the visit count and/or Q-value for this edge
"""
@abc.abstractmethod
def update_visit_count(self, delta_n_visits):
"""
:param delta_n_visits:
:return: visit count
"""
@abc.abstractmethod
def update_q_value(self, delta_total_q):
"""
:param delta_n_visits:
:param delta_total_Q:
:return true iff the Q value changed:
"""
@abc.abstractmethod
def set_legal(self, legal):
"""
:param legal: bool
Sets the legality of this action - this determines whether or not it will be taken in the
course of *future* searches.
In and of itself, making this action illegal will not delete previous histories that have
already taken this action. In order to achieve this the associated history entries also
need to be marked for updating via the model-changing interface.
This feature is currently not used
:return:
"""
@abc.abstractmethod
def get_action(self):
"""
Returns the action for this entry.
:return Action:
"""
| pemami4911/POMDPy | pomdpy/pomdp/action_mapping.py | Python | mit | 3,473 | [
"VisIt"
] | e4777f5f4c148d5d35839c0dbd179e90bbdf19d08ee289d5c366963c74854e6f |
#!/usr/bin/env python
"""
Allows to add a specified extension to an already existing DIRAC installation.
The extension can come from another project than the one installed.
No new version directory is created. The command is based on the main DIRAC installer dirac-install.py.
Usage:
dirac-install-extension [options] ...
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import sys
import six
import getopt
import importlib
cmdOpts = (('r:', 'release=', 'Release version to install'),
('l:', 'project=', 'Project to install'),
('e:', 'extensions=', 'Extensions to install (comma separated). Several -e options can be given'),
('M:', 'defaultsURL=', 'Where to retrieve the global defaults from'),
('h', 'help', 'help doc string'))
def usage():
""" Usage printout
"""
print(__doc__)
print('Options::\n\n')
for cmdOpt in cmdOpts:
print(" %s %s : %s" % (cmdOpt[0].ljust(3), cmdOpt[1].ljust(20), cmdOpt[2]))
sys.exit(0)
# Import dirac-install.py as a module
installFile = ''
for basePath in ('pro', '.'):
if os.path.exists(os.path.join(basePath, 'DIRAC/Core/scripts/dirac-install.py')):
installFile = os.path.join(basePath, 'DIRAC/Core/scripts/dirac-install.py')
break
if installFile:
sys.path.append(os.path.dirname(installFile))
diracInstall = importlib.import_module("dirac-install")
else:
usage()
sys.exit(-1)
def loadConfiguration():
"""
It loads the configuration file
"""
optList, args = getopt.getopt(sys.argv[1:],
"".join([opt[0] for opt in cmdOpts]),
[opt[1] for opt in cmdOpts])
# First check if the name is defined
for opt, value in optList:
if opt in ('-h', '--help'):
usage()
elif opt in ("-M", "--defaultsURL"):
diracInstall.cliParams.globalDefaults = value
rConfig = diracInstall.ReleaseConfig(
instName=diracInstall.cliParams.installation,
globalDefaultsURL=diracInstall.cliParams.globalDefaults)
if diracInstall.cliParams.debug:
rConfig.debugCB = diracInstall.logDEBUG
res = rConfig.loadInstallationDefaults()
if not res['OK']:
diracInstall.logERROR("Could not load defaults: %s" % res['Message'])
rConfig.loadInstallationLocalDefaults(args)
for opName in ('release', 'globalDefaults', 'useVersionsDir'):
try:
opVal = rConfig.getInstallationConfig(
"LocalInstallation/%s" % (opName[0].upper() + opName[1:]))
except KeyError:
continue
if isinstance(getattr(diracInstall.cliParams, opName), six.string_types):
setattr(diracInstall.cliParams, opName, opVal)
elif isinstance(getattr(diracInstall.cliParams, opName), bool):
setattr(diracInstall.cliParams, opName, opVal.lower() in ("y", "yes", "true", "1"))
# Now parse the ops
for opt, value in optList:
if opt in ('-r', '--release'):
diracInstall.cliParams.release = value
elif opt in ('-l', '--project'):
diracInstall.cliParams.project = value
elif opt in ('-e', '--extensions'):
for pkg in [p.strip() for p in value.split(",") if p.strip()]:
if pkg not in diracInstall.cliParams.extensions:
diracInstall.cliParams.extensions.append(pkg)
if not diracInstall.cliParams.release and not diracInstall.cliParams.modules:
diracInstall.logERROR("Missing release to install")
usage()
diracInstall.cliParams.basePath = diracInstall.cliParams.targetPath
if diracInstall.cliParams.useVersionsDir:
# install under the pro directory
diracInstall.cliParams.targetPath = os.path.join(diracInstall.cliParams.targetPath, 'pro')
diracInstall.logNOTICE("Destination path for installation is %s" % diracInstall.cliParams.targetPath)
rConfig.projectName = diracInstall.cliParams.project
res = rConfig.loadProjectRelease(diracInstall.cliParams.release,
project=diracInstall.cliParams.project,
sourceURL=diracInstall.cliParams.installSource)
if not res['OK']:
return res
# Reload the local configuration to ensure it takes prescience
rConfig.loadInstallationLocalDefaults(args)
return diracInstall.S_OK(rConfig)
if __name__ == "__main__":
result = loadConfiguration()
if result['OK']:
releaseConfig = result['Value']
else:
diracInstall.logERROR('Can not load the configuration: %s' % result['Message'])
sys.exit(-1)
result = releaseConfig.getModulesToInstall(diracInstall.cliParams.release, diracInstall.cliParams.extensions)
for extension in diracInstall.cliParams.extensions:
if ":" in extension:
extension = extension.split(":")[0]
extUrl = result['Value'][1][extension][0]
extVersion = result['Value'][1][extension][1]
diracInstall.logNOTICE('Installing extension %s:%s' % (extension, extVersion))
if not diracInstall.downloadAndExtractTarball(extUrl, extension, extVersion):
diracInstall.logERROR('Failed to install %s' % extension)
sys.exit(-1)
# (Re)deploy scripts now taking into account the newly installed extensions
ddeLocation = os.path.join(diracInstall.cliParams.targetPath, "DIRAC", "Core",
"scripts", "dirac-deploy-scripts.py")
if not os.path.isfile(ddeLocation):
ddeLocation = os.path.join(diracInstall.cliParams.targetPath, "DIRAC", "Core",
"scripts", "dirac_deploy_scripts.py")
if os.path.isfile(ddeLocation):
cmd = ddeLocation
# if specified, create symlink instead of wrapper.
if diracInstall.cliParams.scriptSymlink:
cmd += ' --symlink'
extensionList = []
for extension in diracInstall.cliParams.extensions:
if ":" in extension:
extension = extension.split(":")[0]
extensionList.append(extension.split(":")[0])
cmd += " --module " + ','.join(extensionList)
os.system(cmd)
| yujikato/DIRAC | src/DIRAC/Core/scripts/dirac_install_extension.py | Python | gpl-3.0 | 5,929 | [
"DIRAC"
] | 59771aab9c4dfbd508243aad21360fb77ce3c93167edcfc444a95869a0f9c916 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides an interface to communicate with the device via the adb command.
Assumes adb binary is currently on system path.
"""
# pylint: skip-file
import collections
import datetime
import inspect
import logging
import os
import random
import re
import shlex
import signal
import subprocess
import sys
import tempfile
import time
import cmd_helper
import constants
import system_properties
from utils import host_utils
try:
from pylib import pexpect
except ImportError:
pexpect = None
sys.path.append(os.path.join(
constants.DIR_SOURCE_ROOT, 'third_party', 'android_testrunner'))
import adb_interface
import am_instrument_parser
import errors
from pylib.device import device_blacklist
from pylib.device import device_errors
# Pattern to search for the next whole line of pexpect output and capture it
# into a match group. We can't use ^ and $ for line start end with pexpect,
# see http://www.noah.org/python/pexpect/#doc for explanation why.
PEXPECT_LINE_RE = re.compile('\n([^\r]*)\r')
# Set the adb shell prompt to be a unique marker that will [hopefully] not
# appear at the start of any line of a command's output.
SHELL_PROMPT = '~+~PQ\x17RS~+~'
# Java properties file
LOCAL_PROPERTIES_PATH = constants.DEVICE_LOCAL_PROPERTIES_PATH
# Property in /data/local.prop that controls Java assertions.
JAVA_ASSERT_PROPERTY = 'dalvik.vm.enableassertions'
# Keycode "enum" suitable for passing to AndroidCommands.SendKey().
KEYCODE_HOME = 3
KEYCODE_BACK = 4
KEYCODE_DPAD_UP = 19
KEYCODE_DPAD_DOWN = 20
KEYCODE_DPAD_RIGHT = 22
KEYCODE_ENTER = 66
KEYCODE_MENU = 82
MD5SUM_DEVICE_FOLDER = constants.TEST_EXECUTABLE_DIR + '/md5sum/'
MD5SUM_DEVICE_PATH = MD5SUM_DEVICE_FOLDER + 'md5sum_bin'
PIE_WRAPPER_PATH = constants.TEST_EXECUTABLE_DIR + '/run_pie'
CONTROL_USB_CHARGING_COMMANDS = [
{
# Nexus 4
'witness_file': '/sys/module/pm8921_charger/parameters/disabled',
'enable_command': 'echo 0 > /sys/module/pm8921_charger/parameters/disabled',
'disable_command':
'echo 1 > /sys/module/pm8921_charger/parameters/disabled',
},
{
# Nexus 5
# Setting the HIZ bit of the bq24192 causes the charger to actually ignore
# energy coming from USB. Setting the power_supply offline just updates the
# Android system to reflect that.
'witness_file': '/sys/kernel/debug/bq24192/INPUT_SRC_CONT',
'enable_command': (
'echo 0x4A > /sys/kernel/debug/bq24192/INPUT_SRC_CONT && '
'echo 1 > /sys/class/power_supply/usb/online'),
'disable_command': (
'echo 0xCA > /sys/kernel/debug/bq24192/INPUT_SRC_CONT && '
'chmod 644 /sys/class/power_supply/usb/online && '
'echo 0 > /sys/class/power_supply/usb/online'),
},
]
class DeviceTempFile(object):
def __init__(self, android_commands, prefix='temp_file', suffix=''):
"""Find an unused temporary file path in the devices external directory.
When this object is closed, the file will be deleted on the device.
"""
self.android_commands = android_commands
while True:
# TODO(cjhopman): This could actually return the same file in multiple
# calls if the caller doesn't write to the files immediately. This is
# expected to never happen.
i = random.randint(0, 1000000)
self.name = '%s/%s-%d-%010d%s' % (
android_commands.GetExternalStorage(),
prefix, int(time.time()), i, suffix)
if not android_commands.FileExistsOnDevice(self.name):
break
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.android_commands.RunShellCommand('rm ' + self.name)
def GetAVDs():
"""Returns a list of AVDs."""
re_avd = re.compile('^[ ]+Name: ([a-zA-Z0-9_:.-]+)', re.MULTILINE)
avds = re_avd.findall(cmd_helper.GetCmdOutput(['android', 'list', 'avd']))
return avds
def ResetBadDevices():
"""Removes the blacklist that keeps track of bad devices for a current
build.
"""
device_blacklist.ResetBlacklist()
def ExtendBadDevices(devices):
"""Adds devices to the blacklist that keeps track of bad devices for a
current build.
The devices listed in the bad devices file will not be returned by
GetAttachedDevices.
Args:
devices: list of bad devices to be added to the bad devices file.
"""
device_blacklist.ExtendBlacklist(devices)
def GetAttachedDevices(hardware=True, emulator=True, offline=False):
"""Returns a list of attached, android devices and emulators.
If a preferred device has been set with ANDROID_SERIAL, it will be first in
the returned list. The arguments specify what devices to include in the list.
Example output:
* daemon not running. starting it now on port 5037 *
* daemon started successfully *
List of devices attached
027c10494100b4d7 device
emulator-5554 offline
Args:
hardware: Include attached actual devices that are online.
emulator: Include emulators (i.e. AVD's) currently on host.
offline: Include devices and emulators that are offline.
Returns: List of devices.
"""
adb_devices_output = cmd_helper.GetCmdOutput([constants.GetAdbPath(),
'devices'])
re_device = re.compile('^([a-zA-Z0-9_:.-]+)\tdevice$', re.MULTILINE)
online_devices = re_device.findall(adb_devices_output)
re_device = re.compile('^(emulator-[0-9]+)\tdevice', re.MULTILINE)
emulator_devices = re_device.findall(adb_devices_output)
re_device = re.compile('^([a-zA-Z0-9_:.-]+)\t(?:offline|unauthorized)$',
re.MULTILINE)
offline_devices = re_device.findall(adb_devices_output)
devices = []
# First determine list of online devices (e.g. hardware and/or emulator).
if hardware and emulator:
devices = online_devices
elif hardware:
devices = [device for device in online_devices
if device not in emulator_devices]
elif emulator:
devices = emulator_devices
# Now add offline devices if offline is true
if offline:
devices = devices + offline_devices
# Remove any devices in the blacklist.
blacklist = device_blacklist.ReadBlacklist()
if len(blacklist):
logging.info('Avoiding bad devices %s', ' '.join(blacklist))
devices = [device for device in devices if device not in blacklist]
preferred_device = os.environ.get('ANDROID_SERIAL')
if preferred_device in devices:
devices.remove(preferred_device)
devices.insert(0, preferred_device)
return devices
def IsDeviceAttached(device):
"""Return true if the device is attached and online."""
return device in GetAttachedDevices()
def _GetFilesFromRecursiveLsOutput(path, ls_output, re_file, utc_offset=None):
"""Gets a list of files from `ls` command output.
Python's os.walk isn't used because it doesn't work over adb shell.
Args:
path: The path to list.
ls_output: A list of lines returned by an `ls -lR` command.
re_file: A compiled regular expression which parses a line into named groups
consisting of at minimum "filename", "date", "time", "size" and
optionally "timezone".
utc_offset: A 5-character string of the form +HHMM or -HHMM, where HH is a
2-digit string giving the number of UTC offset hours, and MM is a
2-digit string giving the number of UTC offset minutes. If the input
utc_offset is None, will try to look for the value of "timezone" if it
is specified in re_file.
Returns:
A dict of {"name": (size, lastmod), ...} where:
name: The file name relative to |path|'s directory.
size: The file size in bytes (0 for directories).
lastmod: The file last modification date in UTC.
"""
re_directory = re.compile('^%s/(?P<dir>[^:]+):$' % re.escape(path))
path_dir = os.path.dirname(path)
current_dir = ''
files = {}
for line in ls_output:
directory_match = re_directory.match(line)
if directory_match:
current_dir = directory_match.group('dir')
continue
file_match = re_file.match(line)
if file_match:
filename = os.path.join(current_dir, file_match.group('filename'))
if filename.startswith(path_dir):
filename = filename[len(path_dir) + 1:]
lastmod = datetime.datetime.strptime(
file_match.group('date') + ' ' + file_match.group('time')[:5],
'%Y-%m-%d %H:%M')
if not utc_offset and 'timezone' in re_file.groupindex:
utc_offset = file_match.group('timezone')
if isinstance(utc_offset, str) and len(utc_offset) == 5:
utc_delta = datetime.timedelta(hours=int(utc_offset[1:3]),
minutes=int(utc_offset[3:5]))
if utc_offset[0:1] == '-':
utc_delta = -utc_delta
lastmod -= utc_delta
files[filename] = (int(file_match.group('size')), lastmod)
return files
def _ParseMd5SumOutput(md5sum_output):
"""Returns a list of tuples from the provided md5sum output.
Args:
md5sum_output: output directly from md5sum binary.
Returns:
List of namedtuples with attributes |hash| and |path|, where |path| is the
absolute path to the file with an Md5Sum of |hash|.
"""
HashAndPath = collections.namedtuple('HashAndPath', ['hash', 'path'])
split_lines = [line.split(' ') for line in md5sum_output]
return [HashAndPath._make(s) for s in split_lines if len(s) == 2]
def _HasAdbPushSucceeded(command_output):
"""Returns whether adb push has succeeded from the provided output."""
# TODO(frankf): We should look at the return code instead of the command
# output for many of the commands in this file.
if not command_output:
return True
# Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)"
# Errors look like this: "failed to copy ... "
if not re.search('^[0-9]', command_output.splitlines()[-1]):
logging.critical('PUSH FAILED: ' + command_output)
return False
return True
def GetLogTimestamp(log_line, year):
"""Returns the timestamp of the given |log_line| in the given year."""
try:
return datetime.datetime.strptime('%s-%s' % (year, log_line[:18]),
'%Y-%m-%d %H:%M:%S.%f')
except (ValueError, IndexError):
logging.critical('Error reading timestamp from ' + log_line)
return None
class AndroidCommands(object):
"""Helper class for communicating with Android device via adb."""
def __init__(self, device=None):
"""Constructor.
Args:
device: If given, adb commands are only send to the device of this ID.
Otherwise commands are sent to all attached devices.
"""
self._adb = adb_interface.AdbInterface(constants.GetAdbPath())
if device:
self._adb.SetTargetSerial(device)
self._device = device
self._logcat = None
self.logcat_process = None
self._logcat_tmpoutfile = None
self._pushed_files = []
self._device_utc_offset = None
self._potential_push_size = 0
self._actual_push_size = 0
self._external_storage = ''
self._util_wrapper = ''
self._system_properties = system_properties.SystemProperties(self.Adb())
self._push_if_needed_cache = {}
self._control_usb_charging_command = {
'command': None,
'cached': False,
}
self._protected_file_access_method_initialized = None
self._privileged_command_runner = None
self._pie_wrapper = None
@property
def system_properties(self):
return self._system_properties
def _LogShell(self, cmd):
"""Logs the adb shell command."""
if self._device:
device_repr = self._device[-4:]
else:
device_repr = '????'
logging.info('[%s]> %s', device_repr, cmd)
def Adb(self):
"""Returns our AdbInterface to avoid us wrapping all its methods."""
# TODO(tonyg): Goal should be to git rid of this method by making this API
# complete and alleviating the need.
return self._adb
def GetDevice(self):
"""Returns the device serial."""
return self._device
def IsOnline(self):
"""Checks whether the device is online.
Returns:
True if device is in 'device' mode, False otherwise.
"""
# TODO(aurimas): revert to using adb get-state when android L adb is fixed.
#out = self._adb.SendCommand('get-state')
#return out.strip() == 'device'
out = self._adb.SendCommand('devices')
for line in out.split('\n'):
if self._device in line and 'device' in line:
return True
return False
def IsRootEnabled(self):
"""Checks if root is enabled on the device."""
root_test_output = self.RunShellCommand('ls /root') or ['']
return not 'Permission denied' in root_test_output[0]
def EnableAdbRoot(self):
"""Enables adb root on the device.
Returns:
True: if output from executing adb root was as expected.
False: otherwise.
"""
if self.GetBuildType() == 'user':
logging.warning("Can't enable root in production builds with type user")
return False
else:
return_value = self._adb.EnableAdbRoot()
# EnableAdbRoot inserts a call for wait-for-device only when adb logcat
# output matches what is expected. Just to be safe add a call to
# wait-for-device.
self._adb.SendCommand('wait-for-device')
return return_value
def GetDeviceYear(self):
"""Returns the year information of the date on device."""
return self.RunShellCommand('date +%Y')[0]
def GetExternalStorage(self):
if not self._external_storage:
self._external_storage = self.RunShellCommand('echo $EXTERNAL_STORAGE')[0]
if not self._external_storage:
raise device_errors.CommandFailedError(
['shell', "'echo $EXTERNAL_STORAGE'"],
'Unable to find $EXTERNAL_STORAGE')
return self._external_storage
def WaitForDevicePm(self, timeout=120):
"""Blocks until the device's package manager is available.
To workaround http://b/5201039, we restart the shell and retry if the
package manager isn't back after 120 seconds.
Raises:
errors.WaitForResponseTimedOutError after max retries reached.
"""
last_err = None
retries = 3
while retries:
try:
self._adb.WaitForDevicePm(wait_time=timeout)
return # Success
except errors.WaitForResponseTimedOutError as e:
last_err = e
logging.warning('Restarting and retrying after timeout: %s', e)
retries -= 1
self.RestartShell()
raise last_err # Only reached after max retries, re-raise the last error.
def RestartShell(self):
"""Restarts the shell on the device. Does not block for it to return."""
self.RunShellCommand('stop')
self.RunShellCommand('start')
def Reboot(self, full_reboot=True):
"""Reboots the device and waits for the package manager to return.
Args:
full_reboot: Whether to fully reboot the device or just restart the shell.
"""
# TODO(torne): hive can't reboot the device either way without breaking the
# connection; work out if we can handle this better
if os.environ.get('USING_HIVE'):
logging.warning('Ignoring reboot request as we are on hive')
return
if full_reboot or not self.IsRootEnabled():
self._adb.SendCommand('reboot')
self._system_properties = system_properties.SystemProperties(self.Adb())
timeout = 300
retries = 1
# Wait for the device to disappear.
while retries < 10 and self.IsOnline():
time.sleep(1)
retries += 1
else:
self.RestartShell()
timeout = 120
# To run tests we need at least the package manager and the sd card (or
# other external storage) to be ready.
self.WaitForDevicePm(timeout)
self.WaitForSdCardReady(timeout)
def Shutdown(self):
"""Shuts down the device."""
self._adb.SendCommand('reboot -p')
self._system_properties = system_properties.SystemProperties(self.Adb())
def Uninstall(self, package):
"""Uninstalls the specified package from the device.
Args:
package: Name of the package to remove.
Returns:
A status string returned by adb uninstall
"""
uninstall_command = 'uninstall %s' % package
self._LogShell(uninstall_command)
return self._adb.SendCommand(uninstall_command, timeout_time=60)
def Install(self, package_file_path, reinstall=False):
"""Installs the specified package to the device.
Args:
package_file_path: Path to .apk file to install.
reinstall: Reinstall an existing apk, keeping the data.
Returns:
A status string returned by adb install
"""
assert os.path.isfile(package_file_path), ('<%s> is not file' %
package_file_path)
install_cmd = ['install']
if reinstall:
install_cmd.append('-r')
install_cmd.append(package_file_path)
install_cmd = ' '.join(install_cmd)
self._LogShell(install_cmd)
return self._adb.SendCommand(install_cmd,
timeout_time=2 * 60,
retry_count=0)
def ManagedInstall(self, apk_path, keep_data=False, package_name=None,
reboots_on_timeout=2):
"""Installs specified package and reboots device on timeouts.
If package_name is supplied, checks if the package is already installed and
doesn't reinstall if the apk md5sums match.
Args:
apk_path: Path to .apk file to install.
keep_data: Reinstalls instead of uninstalling first, preserving the
application data.
package_name: Package name (only needed if keep_data=False).
reboots_on_timeout: number of time to reboot if package manager is frozen.
"""
# Check if package is already installed and up to date.
if package_name:
installed_apk_path = self.GetApplicationPath(package_name)
if (installed_apk_path and
not self.GetFilesChanged(apk_path, installed_apk_path,
ignore_filenames=True)):
logging.info('Skipped install: identical %s APK already installed' %
package_name)
return
# Install.
reboots_left = reboots_on_timeout
while True:
try:
if not keep_data:
assert package_name
self.Uninstall(package_name)
install_status = self.Install(apk_path, reinstall=keep_data)
if 'Success' in install_status:
return
else:
raise Exception('Install failure: %s' % install_status)
except errors.WaitForResponseTimedOutError:
print '@@@STEP_WARNINGS@@@'
logging.info('Timeout on installing %s on device %s', apk_path,
self._device)
if reboots_left <= 0:
raise Exception('Install timed out')
# Force a hard reboot on last attempt
self.Reboot(full_reboot=(reboots_left == 1))
reboots_left -= 1
def MakeSystemFolderWritable(self):
"""Remounts the /system folder rw."""
out = self._adb.SendCommand('remount')
if out.strip() != 'remount succeeded':
raise errors.MsgException('Remount failed: %s' % out)
def RestartAdbdOnDevice(self):
logging.info('Restarting adbd on the device...')
with DeviceTempFile(self, suffix=".sh") as temp_script_file:
host_script_path = os.path.join(constants.DIR_SOURCE_ROOT,
'build',
'android',
'pylib',
'restart_adbd.sh')
self._adb.Push(host_script_path, temp_script_file.name)
self.RunShellCommand('. %s' % temp_script_file.name)
self._adb.SendCommand('wait-for-device')
def RestartAdbServer(self):
"""Restart the adb server."""
ret = self.KillAdbServer()
if ret != 0:
raise errors.MsgException('KillAdbServer: %d' % ret)
ret = self.StartAdbServer()
if ret != 0:
raise errors.MsgException('StartAdbServer: %d' % ret)
@staticmethod
def KillAdbServer():
"""Kill adb server."""
adb_cmd = [constants.GetAdbPath(), 'kill-server']
ret = cmd_helper.RunCmd(adb_cmd)
retry = 0
while retry < 3:
ret, _ = cmd_helper.GetCmdStatusAndOutput(['pgrep', 'adb'])
if ret != 0:
# pgrep didn't find adb, kill-server succeeded.
return 0
retry += 1
time.sleep(retry)
return ret
def StartAdbServer(self):
"""Start adb server."""
adb_cmd = ['taskset', '-c', '0', constants.GetAdbPath(), 'start-server']
ret, _ = cmd_helper.GetCmdStatusAndOutput(adb_cmd)
retry = 0
while retry < 3:
ret, _ = cmd_helper.GetCmdStatusAndOutput(['pgrep', 'adb'])
if ret == 0:
# pgrep found adb, start-server succeeded.
# Waiting for device to reconnect before returning success.
self._adb.SendCommand('wait-for-device')
return 0
retry += 1
time.sleep(retry)
return ret
def WaitForSystemBootCompleted(self, wait_time):
"""Waits for targeted system's boot_completed flag to be set.
Args:
wait_time: time in seconds to wait
Raises:
WaitForResponseTimedOutError if wait_time elapses and flag still not
set.
"""
logging.info('Waiting for system boot completed...')
self._adb.SendCommand('wait-for-device')
# Now the device is there, but system not boot completed.
# Query the sys.boot_completed flag with a basic command
boot_completed = False
attempts = 0
wait_period = 5
while not boot_completed and (attempts * wait_period) < wait_time:
output = self.system_properties['sys.boot_completed']
output = output.strip()
if output == '1':
boot_completed = True
else:
# If 'error: xxx' returned when querying the flag, it means
# adb server lost the connection to the emulator, so restart the adb
# server.
if 'error:' in output:
self.RestartAdbServer()
time.sleep(wait_period)
attempts += 1
if not boot_completed:
raise errors.WaitForResponseTimedOutError(
'sys.boot_completed flag was not set after %s seconds' % wait_time)
def WaitForSdCardReady(self, timeout_time):
"""Wait for the SD card ready before pushing data into it."""
logging.info('Waiting for SD card ready...')
sdcard_ready = False
attempts = 0
wait_period = 5
external_storage = self.GetExternalStorage()
while not sdcard_ready and attempts * wait_period < timeout_time:
output = self.RunShellCommand('ls ' + external_storage)
if output:
sdcard_ready = True
else:
time.sleep(wait_period)
attempts += 1
if not sdcard_ready:
raise errors.WaitForResponseTimedOutError(
'SD card not ready after %s seconds' % timeout_time)
def GetAndroidToolStatusAndOutput(self, command, lib_path=None, *args, **kw):
"""Runs a native Android binary, wrapping the command as necessary.
This is a specialization of GetShellCommandStatusAndOutput, which is meant
for running tools/android/ binaries and handle properly: (1) setting the
lib path (for component=shared_library), (2) using the PIE wrapper on ICS.
See crbug.com/373219 for more context.
Args:
command: String containing the command to send.
lib_path: (optional) path to the folder containing the dependent libs.
Same other arguments of GetCmdStatusAndOutput.
"""
# The first time this command is run the device is inspected to check
# whether a wrapper for running PIE executable is needed (only Android ICS)
# or not. The results is cached, so the wrapper is pushed only once.
if self._pie_wrapper is None:
# None: did not check; '': did check and not needed; '/path': use /path.
self._pie_wrapper = ''
if self.GetBuildId().startswith('I'): # Ixxxx = Android ICS.
run_pie_dist_path = os.path.join(constants.GetOutDirectory(), 'run_pie')
assert os.path.exists(run_pie_dist_path), 'Please build run_pie'
# The PIE loader must be pushed manually (i.e. no PushIfNeeded) because
# PushIfNeeded requires md5sum and md5sum requires the wrapper as well.
adb_command = 'push %s %s' % (run_pie_dist_path, PIE_WRAPPER_PATH)
assert _HasAdbPushSucceeded(self._adb.SendCommand(adb_command))
self._pie_wrapper = PIE_WRAPPER_PATH
if self._pie_wrapper:
command = '%s %s' % (self._pie_wrapper, command)
if lib_path:
command = 'LD_LIBRARY_PATH=%s %s' % (lib_path, command)
return self.GetShellCommandStatusAndOutput(command, *args, **kw)
# It is tempting to turn this function into a generator, however this is not
# possible without using a private (local) adb_shell instance (to ensure no
# other command interleaves usage of it), which would defeat the main aim of
# being able to reuse the adb shell instance across commands.
def RunShellCommand(self, command, timeout_time=20, log_result=False):
"""Send a command to the adb shell and return the result.
Args:
command: String containing the shell command to send.
timeout_time: Number of seconds to wait for command to respond before
retrying, used by AdbInterface.SendShellCommand.
log_result: Boolean to indicate whether we should log the result of the
shell command.
Returns:
list containing the lines of output received from running the command
"""
self._LogShell(command)
if "'" in command:
command = command.replace('\'', '\'\\\'\'')
result = self._adb.SendShellCommand(
"'%s'" % command, timeout_time).splitlines()
# TODO(b.kelemen): we should really be able to drop the stderr of the
# command or raise an exception based on what the caller wants.
result = [ l for l in result if not l.startswith('WARNING') ]
if ['error: device not found'] == result:
raise errors.DeviceUnresponsiveError('device not found')
if log_result:
self._LogShell('\n'.join(result))
return result
def GetShellCommandStatusAndOutput(self, command, timeout_time=20,
log_result=False):
"""See RunShellCommand() above.
Returns:
The tuple (exit code, list of output lines).
"""
lines = self.RunShellCommand(
command + '; echo %$?', timeout_time, log_result)
last_line = lines[-1]
status_pos = last_line.rfind('%')
assert status_pos >= 0
status = int(last_line[status_pos + 1:])
if status_pos == 0:
lines = lines[:-1]
else:
lines = lines[:-1] + [last_line[:status_pos]]
return (status, lines)
def KillAll(self, process, signum=9, with_su=False):
"""Android version of killall, connected via adb.
Args:
process: name of the process to kill off.
signum: signal to use, 9 (SIGKILL) by default.
with_su: wether or not to use su to kill the processes.
Returns:
the number of processes killed
"""
pids = self.ExtractPid(process)
if pids:
cmd = 'kill -%d %s' % (signum, ' '.join(pids))
if with_su:
self.RunShellCommandWithSU(cmd)
else:
self.RunShellCommand(cmd)
return len(pids)
def KillAllBlocking(self, process, timeout_sec, signum=9, with_su=False):
"""Blocking version of killall, connected via adb.
This waits until no process matching the corresponding name appears in ps'
output anymore.
Args:
process: name of the process to kill off
timeout_sec: the timeout in seconds
signum: same as |KillAll|
with_su: same as |KillAll|
Returns:
the number of processes killed
"""
processes_killed = self.KillAll(process, signum=signum, with_su=with_su)
if processes_killed:
elapsed = 0
wait_period = 0.1
# Note that this doesn't take into account the time spent in ExtractPid().
while self.ExtractPid(process) and elapsed < timeout_sec:
time.sleep(wait_period)
elapsed += wait_period
if elapsed >= timeout_sec:
return processes_killed - self.ExtractPid(process)
return processes_killed
@staticmethod
def _GetActivityCommand(package, activity, wait_for_completion, action,
category, data, extras, trace_file_name, force_stop,
flags):
"""Creates command to start |package|'s activity on the device.
Args - as for StartActivity
Returns:
the command to run on the target to start the activity
"""
cmd = 'am start -a %s' % action
if force_stop:
cmd += ' -S'
if wait_for_completion:
cmd += ' -W'
if category:
cmd += ' -c %s' % category
if package and activity:
cmd += ' -n %s/%s' % (package, activity)
if data:
cmd += ' -d "%s"' % data
if extras:
for key in extras:
value = extras[key]
if isinstance(value, str):
cmd += ' --es'
elif isinstance(value, bool):
cmd += ' --ez'
elif isinstance(value, int):
cmd += ' --ei'
else:
raise NotImplementedError(
'Need to teach StartActivity how to pass %s extras' % type(value))
cmd += ' %s %s' % (key, value)
if trace_file_name:
cmd += ' --start-profiler ' + trace_file_name
if flags:
cmd += ' -f %s' % flags
return cmd
def StartActivity(self, package, activity, wait_for_completion=False,
action='android.intent.action.VIEW',
category=None, data=None,
extras=None, trace_file_name=None,
force_stop=False, flags=None):
"""Starts |package|'s activity on the device.
Args:
package: Name of package to start (e.g. 'com.google.android.apps.chrome').
activity: Name of activity (e.g. '.Main' or
'com.google.android.apps.chrome.Main').
wait_for_completion: wait for the activity to finish launching (-W flag).
action: string (e.g. "android.intent.action.MAIN"). Default is VIEW.
category: string (e.g. "android.intent.category.HOME")
data: Data string to pass to activity (e.g. 'http://www.example.com/').
extras: Dict of extras to pass to activity. Values are significant.
trace_file_name: If used, turns on and saves the trace to this file name.
force_stop: force stop the target app before starting the activity (-S
flag).
Returns:
The output of the underlying command as a list of lines.
"""
cmd = self._GetActivityCommand(package, activity, wait_for_completion,
action, category, data, extras,
trace_file_name, force_stop, flags)
return self.RunShellCommand(cmd)
def StartActivityTimed(self, package, activity, wait_for_completion=False,
action='android.intent.action.VIEW',
category=None, data=None,
extras=None, trace_file_name=None,
force_stop=False, flags=None):
"""Starts |package|'s activity on the device, returning the start time
Args - as for StartActivity
Returns:
A tuple containing:
- the output of the underlying command as a list of lines, and
- a timestamp string for the time at which the activity started
"""
cmd = self._GetActivityCommand(package, activity, wait_for_completion,
action, category, data, extras,
trace_file_name, force_stop, flags)
self.StartMonitoringLogcat()
out = self.RunShellCommand('log starting activity; ' + cmd)
activity_started_re = re.compile('.*starting activity.*')
m = self.WaitForLogMatch(activity_started_re, None)
assert m
start_line = m.group(0)
return (out, GetLogTimestamp(start_line, self.GetDeviceYear()))
def StartCrashUploadService(self, package):
# TODO(frankf): We really need a python wrapper around Intent
# to be shared with StartActivity/BroadcastIntent.
cmd = (
'am startservice -a %s.crash.ACTION_FIND_ALL -n '
'%s/%s.crash.MinidumpUploadService' %
(constants.PACKAGE_INFO['chrome'].package,
package,
constants.PACKAGE_INFO['chrome'].package))
am_output = self.RunShellCommandWithSU(cmd)
assert am_output and 'Starting' in am_output[-1], (
'Service failed to start: %s' % am_output)
time.sleep(15)
def BroadcastIntent(self, package, intent, *args):
"""Send a broadcast intent.
Args:
package: Name of package containing the intent.
intent: Name of the intent.
args: Optional extra arguments for the intent.
"""
cmd = 'am broadcast -a %s.%s %s' % (package, intent, ' '.join(args))
self.RunShellCommand(cmd)
def GoHome(self):
"""Tell the device to return to the home screen. Blocks until completion."""
self.RunShellCommand('am start -W '
'-a android.intent.action.MAIN -c android.intent.category.HOME')
def CloseApplication(self, package):
"""Attempt to close down the application, using increasing violence.
Args:
package: Name of the process to kill off, e.g.
com.google.android.apps.chrome
"""
self.RunShellCommand('am force-stop ' + package)
def GetApplicationPath(self, package):
"""Get the installed apk path on the device for the given package.
Args:
package: Name of the package.
Returns:
Path to the apk on the device if it exists, None otherwise.
"""
pm_path_output = self.RunShellCommand('pm path ' + package)
# The path output contains anything if and only if the package
# exists.
if pm_path_output:
# pm_path_output is of the form: "package:/path/to/foo.apk"
return pm_path_output[0].split(':')[1]
else:
return None
def ClearApplicationState(self, package):
"""Closes and clears all state for the given |package|."""
# Check that the package exists before clearing it. Necessary because
# calling pm clear on a package that doesn't exist may never return.
pm_path_output = self.RunShellCommand('pm path ' + package)
# The path output only contains anything if and only if the package exists.
if pm_path_output:
self.RunShellCommand('pm clear ' + package)
def SendKeyEvent(self, keycode):
"""Sends keycode to the device.
Args:
keycode: Numeric keycode to send (see "enum" at top of file).
"""
self.RunShellCommand('input keyevent %d' % keycode)
def _RunMd5Sum(self, host_path, device_path):
"""Gets the md5sum of a host path and device path.
Args:
host_path: Path (file or directory) on the host.
device_path: Path on the device.
Returns:
A tuple containing lists of the host and device md5sum results as
created by _ParseMd5SumOutput().
"""
md5sum_dist_path = os.path.join(constants.GetOutDirectory(),
'md5sum_dist')
assert os.path.exists(md5sum_dist_path), 'Please build md5sum.'
md5sum_dist_mtime = os.stat(md5sum_dist_path).st_mtime
if (md5sum_dist_path not in self._push_if_needed_cache or
self._push_if_needed_cache[md5sum_dist_path] != md5sum_dist_mtime):
command = 'push %s %s' % (md5sum_dist_path, MD5SUM_DEVICE_FOLDER)
assert _HasAdbPushSucceeded(self._adb.SendCommand(command))
self._push_if_needed_cache[md5sum_dist_path] = md5sum_dist_mtime
(_, md5_device_output) = self.GetAndroidToolStatusAndOutput(
self._util_wrapper + ' ' + MD5SUM_DEVICE_PATH + ' ' + device_path,
lib_path=MD5SUM_DEVICE_FOLDER,
timeout_time=2 * 60)
device_hash_tuples = _ParseMd5SumOutput(md5_device_output)
assert os.path.exists(host_path), 'Local path not found %s' % host_path
md5sum_output = cmd_helper.GetCmdOutput(
[os.path.join(constants.GetOutDirectory(), 'md5sum_bin_host'),
host_path])
host_hash_tuples = _ParseMd5SumOutput(md5sum_output.splitlines())
return (host_hash_tuples, device_hash_tuples)
def GetFilesChanged(self, host_path, device_path, ignore_filenames=False):
"""Compares the md5sum of a host path against a device path.
Note: Ignores extra files on the device.
Args:
host_path: Path (file or directory) on the host.
device_path: Path on the device.
ignore_filenames: If True only the file contents are considered when
checking whether a file has changed, otherwise the relative path
must also match.
Returns:
A list of tuples of the form (host_path, device_path) for files whose
md5sums do not match.
"""
# Md5Sum resolves symbolic links in path names so the calculation of
# relative path names from its output will need the real path names of the
# base directories. Having calculated these they are used throughout the
# function since this makes us less subject to any future changes to Md5Sum.
real_host_path = os.path.realpath(host_path)
real_device_path = self.RunShellCommand('realpath "%s"' % device_path)[0]
host_hash_tuples, device_hash_tuples = self._RunMd5Sum(
real_host_path, real_device_path)
if len(host_hash_tuples) > len(device_hash_tuples):
logging.info('%s files do not exist on the device' %
(len(host_hash_tuples) - len(device_hash_tuples)))
host_rel = [(os.path.relpath(os.path.normpath(t.path), real_host_path),
t.hash)
for t in host_hash_tuples]
if os.path.isdir(real_host_path):
def RelToRealPaths(rel_path):
return (os.path.join(real_host_path, rel_path),
os.path.join(real_device_path, rel_path))
else:
assert len(host_rel) == 1
def RelToRealPaths(_):
return (real_host_path, real_device_path)
if ignore_filenames:
# If we are ignoring file names, then we want to push any file for which
# a file with an equivalent MD5 sum does not exist on the device.
device_hashes = set([h.hash for h in device_hash_tuples])
ShouldPush = lambda p, h: h not in device_hashes
else:
# Otherwise, we want to push any file on the host for which a file with
# an equivalent MD5 sum does not exist at the same relative path on the
# device.
device_rel = dict([(os.path.relpath(os.path.normpath(t.path),
real_device_path),
t.hash)
for t in device_hash_tuples])
ShouldPush = lambda p, h: p not in device_rel or h != device_rel[p]
return [RelToRealPaths(path) for path, host_hash in host_rel
if ShouldPush(path, host_hash)]
def PushIfNeeded(self, host_path, device_path):
"""Pushes |host_path| to |device_path|.
Works for files and directories. This method skips copying any paths in
|test_data_paths| that already exist on the device with the same hash.
All pushed files can be removed by calling RemovePushedFiles().
"""
MAX_INDIVIDUAL_PUSHES = 50
if not os.path.exists(host_path):
raise device_errors.CommandFailedError(
'Local path not found %s' % host_path, device=str(self))
# See if the file on the host changed since the last push (if any) and
# return early if it didn't. Note that this shortcut assumes that the tests
# on the device don't modify the files.
if not os.path.isdir(host_path):
if host_path in self._push_if_needed_cache:
host_path_mtime = self._push_if_needed_cache[host_path]
if host_path_mtime == os.stat(host_path).st_mtime:
return
size = host_utils.GetRecursiveDiskUsage(host_path)
self._pushed_files.append(device_path)
self._potential_push_size += size
if os.path.isdir(host_path):
self.RunShellCommand('mkdir -p "%s"' % device_path)
changed_files = self.GetFilesChanged(host_path, device_path)
logging.info('Found %d files that need to be pushed to %s',
len(changed_files), device_path)
if not changed_files:
return
def Push(host, device):
# NOTE: We can't use adb_interface.Push() because it hardcodes a timeout
# of 60 seconds which isn't sufficient for a lot of users of this method.
push_command = 'push %s %s' % (host, device)
self._LogShell(push_command)
# Retry push with increasing backoff if the device is busy.
retry = 0
while True:
output = self._adb.SendCommand(push_command, timeout_time=30 * 60)
if _HasAdbPushSucceeded(output):
if not os.path.isdir(host_path):
self._push_if_needed_cache[host] = os.stat(host).st_mtime
return
if retry < 3:
retry += 1
wait_time = 5 * retry
logging.error('Push failed, retrying in %d seconds: %s' %
(wait_time, output))
time.sleep(wait_time)
else:
raise Exception('Push failed: %s' % output)
diff_size = 0
if len(changed_files) <= MAX_INDIVIDUAL_PUSHES:
diff_size = sum(host_utils.GetRecursiveDiskUsage(f[0])
for f in changed_files)
# TODO(craigdh): Replace this educated guess with a heuristic that
# approximates the push time for each method.
if len(changed_files) > MAX_INDIVIDUAL_PUSHES or diff_size > 0.5 * size:
self._actual_push_size += size
Push(host_path, device_path)
else:
for f in changed_files:
Push(f[0], f[1])
self._actual_push_size += diff_size
def GetPushSizeInfo(self):
"""Get total size of pushes to the device done via PushIfNeeded()
Returns:
A tuple:
1. Total size of push requests to PushIfNeeded (MB)
2. Total size that was actually pushed (MB)
"""
return (self._potential_push_size, self._actual_push_size)
def GetFileContents(self, filename, log_result=False):
"""Gets contents from the file specified by |filename|."""
return self.RunShellCommand('cat "%s" 2>/dev/null' % filename,
log_result=log_result)
def SetFileContents(self, filename, contents):
"""Writes |contents| to the file specified by |filename|."""
with tempfile.NamedTemporaryFile() as f:
f.write(contents)
f.flush()
self._adb.Push(f.name, filename)
def RunShellCommandWithSU(self, command, timeout_time=20, log_result=False):
return self.RunShellCommand('su -c %s' % command, timeout_time, log_result)
def CanAccessProtectedFileContents(self):
"""Returns True if Get/SetProtectedFileContents would work via "su" or adb
shell running as root.
Devices running user builds don't have adb root, but may provide "su" which
can be used for accessing protected files.
"""
return (self._GetProtectedFileCommandRunner() != None)
def _GetProtectedFileCommandRunner(self):
"""Finds the best method to access protected files on the device.
Returns:
1. None when privileged files cannot be accessed on the device.
2. Otherwise: A function taking a single parameter: a string with command
line arguments. Running that function executes the command with
the appropriate method.
"""
if self._protected_file_access_method_initialized:
return self._privileged_command_runner
self._privileged_command_runner = None
self._protected_file_access_method_initialized = True
for cmd in [self.RunShellCommand, self.RunShellCommandWithSU]:
# Get contents of the auxv vector for the init(8) process from a small
# binary file that always exists on linux and is always read-protected.
contents = cmd('cat /proc/1/auxv')
# The leading 4 or 8-bytes of auxv vector is a_type. There are not many
# reserved a_type values, hence byte 2 must always be '\0' for a realistic
# auxv. See /usr/include/elf.h.
if len(contents) > 0 and (contents[0][2] == '\0'):
self._privileged_command_runner = cmd
break
return self._privileged_command_runner
def GetProtectedFileContents(self, filename):
"""Gets contents from the protected file specified by |filename|.
This is potentially less efficient than GetFileContents.
"""
command = 'cat "%s" 2> /dev/null' % filename
command_runner = self._GetProtectedFileCommandRunner()
if command_runner:
return command_runner(command)
else:
logging.warning('Could not access protected file: %s' % filename)
return []
def SetProtectedFileContents(self, filename, contents):
"""Writes |contents| to the protected file specified by |filename|.
This is less efficient than SetFileContents.
"""
with DeviceTempFile(self) as temp_file:
with DeviceTempFile(self, suffix=".sh") as temp_script:
# Put the contents in a temporary file
self.SetFileContents(temp_file.name, contents)
# Create a script to copy the file contents to its final destination
self.SetFileContents(temp_script.name,
'cat %s > %s' % (temp_file.name, filename))
command = 'sh %s' % temp_script.name
command_runner = self._GetProtectedFileCommandRunner()
if command_runner:
return command_runner(command)
else:
logging.warning(
'Could not set contents of protected file: %s' % filename)
def RemovePushedFiles(self):
"""Removes all files pushed with PushIfNeeded() from the device."""
for p in self._pushed_files:
self.RunShellCommand('rm -r %s' % p, timeout_time=2 * 60)
def ListPathContents(self, path):
"""Lists files in all subdirectories of |path|.
Args:
path: The path to list.
Returns:
A dict of {"name": (size, lastmod), ...}.
"""
# Example output:
# /foo/bar:
# -rw-r----- user group 102 2011-05-12 12:29:54.131623387 +0100 baz.txt
re_file = re.compile('^-(?P<perms>[^\s]+)\s+'
'(?P<user>[^\s]+)\s+'
'(?P<group>[^\s]+)\s+'
'(?P<size>[^\s]+)\s+'
'(?P<date>[^\s]+)\s+'
'(?P<time>[^\s]+)\s+'
'(?P<filename>[^\s]+)$')
return _GetFilesFromRecursiveLsOutput(
path, self.RunShellCommand('ls -lR %s' % path), re_file,
self.GetUtcOffset())
def GetUtcOffset(self):
if not self._device_utc_offset:
self._device_utc_offset = self.RunShellCommand('date +%z')[0]
return self._device_utc_offset
def SetJavaAssertsEnabled(self, enable):
"""Sets or removes the device java assertions property.
Args:
enable: If True the property will be set.
Returns:
True if the file was modified (reboot is required for it to take effect).
"""
# First ensure the desired property is persisted.
temp_props_file = tempfile.NamedTemporaryFile()
properties = ''
if self._adb.Pull(LOCAL_PROPERTIES_PATH, temp_props_file.name):
with open(temp_props_file.name) as f:
properties = f.read()
re_search = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*all\s*$', re.MULTILINE)
if enable != bool(re.search(re_search, properties)):
re_replace = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*\w+\s*$', re.MULTILINE)
properties = re.sub(re_replace, '', properties)
if enable:
properties += '\n%s=all\n' % JAVA_ASSERT_PROPERTY
file(temp_props_file.name, 'w').write(properties)
self._adb.Push(temp_props_file.name, LOCAL_PROPERTIES_PATH)
# Next, check the current runtime value is what we need, and
# if not, set it and report that a reboot is required.
was_set = 'all' in self.system_properties[JAVA_ASSERT_PROPERTY]
if was_set == enable:
return False
self.system_properties[JAVA_ASSERT_PROPERTY] = enable and 'all' or ''
return True
def GetBuildId(self):
"""Returns the build ID of the system (e.g. JRM79C)."""
build_id = self.system_properties['ro.build.id']
assert build_id
return build_id
def GetBuildType(self):
"""Returns the build type of the system (e.g. eng)."""
build_type = self.system_properties['ro.build.type']
assert build_type
return build_type
def GetBuildProduct(self):
"""Returns the build product of the device (e.g. maguro)."""
build_product = self.system_properties['ro.build.product']
assert build_product
return build_product
def GetProductName(self):
"""Returns the product name of the device (e.g. takju)."""
name = self.system_properties['ro.product.name']
assert name
return name
def GetBuildFingerprint(self):
"""Returns the build fingerprint of the device."""
build_fingerprint = self.system_properties['ro.build.fingerprint']
assert build_fingerprint
return build_fingerprint
def GetDescription(self):
"""Returns the description of the system.
For example, "yakju-userdebug 4.1 JRN54F 364167 dev-keys".
"""
description = self.system_properties['ro.build.description']
assert description
return description
def GetProductModel(self):
"""Returns the name of the product model (e.g. "Galaxy Nexus") """
model = self.system_properties['ro.product.model']
assert model
return model
def GetWifiIP(self):
"""Returns the wifi IP on the device."""
wifi_ip = self.system_properties['dhcp.wlan0.ipaddress']
# Do not assert here. Devices (e.g. emulators) may not have a WifiIP.
return wifi_ip
def GetSubscriberInfo(self):
"""Returns the device subscriber info (e.g. GSM and device ID) as string."""
iphone_sub = self.RunShellCommand('dumpsys iphonesubinfo')
# Do not assert here. Devices (e.g. Nakasi on K) may not have iphonesubinfo.
return '\n'.join(iphone_sub)
def GetBatteryInfo(self):
"""Returns a {str: str} dict of battery info (e.g. status, level, etc)."""
battery = self.RunShellCommand('dumpsys battery')
assert battery
battery_info = {}
for line in battery[1:]:
k, _, v = line.partition(': ')
battery_info[k.strip()] = v.strip()
return battery_info
def GetSetupWizardStatus(self):
"""Returns the status of the device setup wizard (e.g. DISABLED)."""
status = self.system_properties['ro.setupwizard.mode']
# On some devices, the status is empty if not otherwise set. In such cases
# the caller should expect an empty string to be returned.
return status
def StartMonitoringLogcat(self, clear=True, logfile=None, filters=None):
"""Starts monitoring the output of logcat, for use with WaitForLogMatch.
Args:
clear: If True the existing logcat output will be cleared, to avoiding
matching historical output lurking in the log.
filters: A list of logcat filters to be used.
"""
if clear:
self.RunShellCommand('logcat -c')
args = []
if self._adb._target_arg:
args += shlex.split(self._adb._target_arg)
args += ['logcat', '-v', 'threadtime']
if filters:
args.extend(filters)
else:
args.append('*:v')
if logfile:
logfile = NewLineNormalizer(logfile)
# Spawn logcat and synchronize with it.
for _ in range(4):
self._logcat = pexpect.spawn(constants.GetAdbPath(), args, timeout=10,
logfile=logfile)
if not clear or self.SyncLogCat():
break
self._logcat.close(force=True)
else:
logging.critical('Error reading from logcat: ' + str(self._logcat.match))
sys.exit(1)
def SyncLogCat(self):
"""Synchronize with logcat.
Synchronize with the monitored logcat so that WaitForLogMatch will only
consider new message that are received after this point in time.
Returns:
True if the synchronization succeeded.
"""
assert self._logcat
tag = 'logcat_sync_%s' % time.time()
self.RunShellCommand('log ' + tag)
return self._logcat.expect([tag, pexpect.EOF, pexpect.TIMEOUT]) == 0
def GetMonitoredLogCat(self):
"""Returns an "adb logcat" command as created by pexpected.spawn."""
if not self._logcat:
self.StartMonitoringLogcat(clear=False)
return self._logcat
def WaitForLogMatch(self, success_re, error_re, clear=False, timeout=10):
"""Blocks until a matching line is logged or a timeout occurs.
Args:
success_re: A compiled re to search each line for.
error_re: A compiled re which, if found, terminates the search for
|success_re|. If None is given, no error condition will be detected.
clear: If True the existing logcat output will be cleared, defaults to
false.
timeout: Timeout in seconds to wait for a log match.
Raises:
pexpect.TIMEOUT after |timeout| seconds without a match for |success_re|
or |error_re|.
Returns:
The re match object if |success_re| is matched first or None if |error_re|
is matched first.
"""
logging.info('<<< Waiting for logcat:' + str(success_re.pattern))
t0 = time.time()
while True:
if not self._logcat:
self.StartMonitoringLogcat(clear)
try:
while True:
# Note this will block for upto the timeout _per log line_, so we need
# to calculate the overall timeout remaining since t0.
time_remaining = t0 + timeout - time.time()
if time_remaining < 0:
raise pexpect.TIMEOUT(self._logcat)
self._logcat.expect(PEXPECT_LINE_RE, timeout=time_remaining)
line = self._logcat.match.group(1)
if error_re:
error_match = error_re.search(line)
if error_match:
return None
success_match = success_re.search(line)
if success_match:
return success_match
logging.info('<<< Skipped Logcat Line:' + str(line))
except pexpect.TIMEOUT:
raise pexpect.TIMEOUT(
'Timeout (%ds) exceeded waiting for pattern "%s" (tip: use -vv '
'to debug)' %
(timeout, success_re.pattern))
except pexpect.EOF:
# It seems that sometimes logcat can end unexpectedly. This seems
# to happen during Chrome startup after a reboot followed by a cache
# clean. I don't understand why this happens, but this code deals with
# getting EOF in logcat.
logging.critical('Found EOF in adb logcat. Restarting...')
# Rerun spawn with original arguments. Note that self._logcat.args[0] is
# the path of adb, so we don't want it in the arguments.
self._logcat = pexpect.spawn(constants.GetAdbPath(),
self._logcat.args[1:],
timeout=self._logcat.timeout,
logfile=self._logcat.logfile)
def StartRecordingLogcat(self, clear=True, filters=None):
"""Starts recording logcat output to eventually be saved as a string.
This call should come before some series of tests are run, with either
StopRecordingLogcat or SearchLogcatRecord following the tests.
Args:
clear: True if existing log output should be cleared.
filters: A list of logcat filters to be used.
"""
if not filters:
filters = ['*:v']
if clear:
self._adb.SendCommand('logcat -c')
logcat_command = 'adb %s logcat -v threadtime %s' % (self._adb._target_arg,
' '.join(filters))
self._logcat_tmpoutfile = tempfile.NamedTemporaryFile(bufsize=0)
self.logcat_process = subprocess.Popen(logcat_command, shell=True,
stdout=self._logcat_tmpoutfile)
def GetCurrentRecordedLogcat(self):
"""Return the current content of the logcat being recorded.
Call this after StartRecordingLogcat() and before StopRecordingLogcat().
This can be useful to perform timed polling/parsing.
Returns:
Current logcat output as a single string, or None if
StopRecordingLogcat() was already called.
"""
if not self._logcat_tmpoutfile:
return None
with open(self._logcat_tmpoutfile.name) as f:
return f.read()
def StopRecordingLogcat(self):
"""Stops an existing logcat recording subprocess and returns output.
Returns:
The logcat output as a string or an empty string if logcat was not
being recorded at the time.
"""
if not self.logcat_process:
return ''
# Cannot evaluate directly as 0 is a possible value.
# Better to read the self.logcat_process.stdout before killing it,
# Otherwise the communicate may return incomplete output due to pipe break.
if self.logcat_process.poll() is None:
self.logcat_process.kill()
self.logcat_process.wait()
self.logcat_process = None
self._logcat_tmpoutfile.seek(0)
output = self._logcat_tmpoutfile.read()
self._logcat_tmpoutfile.close()
self._logcat_tmpoutfile = None
return output
@staticmethod
def SearchLogcatRecord(record, message, thread_id=None, proc_id=None,
log_level=None, component=None):
"""Searches the specified logcat output and returns results.
This method searches through the logcat output specified by record for a
certain message, narrowing results by matching them against any other
specified criteria. It returns all matching lines as described below.
Args:
record: A string generated by Start/StopRecordingLogcat to search.
message: An output string to search for.
thread_id: The thread id that is the origin of the message.
proc_id: The process that is the origin of the message.
log_level: The log level of the message.
component: The name of the component that would create the message.
Returns:
A list of dictionaries represeting matching entries, each containing keys
thread_id, proc_id, log_level, component, and message.
"""
if thread_id:
thread_id = str(thread_id)
if proc_id:
proc_id = str(proc_id)
results = []
reg = re.compile('(\d+)\s+(\d+)\s+([A-Z])\s+([A-Za-z]+)\s*:(.*)$',
re.MULTILINE)
log_list = reg.findall(record)
for (tid, pid, log_lev, comp, msg) in log_list:
if ((not thread_id or thread_id == tid) and
(not proc_id or proc_id == pid) and
(not log_level or log_level == log_lev) and
(not component or component == comp) and msg.find(message) > -1):
match = dict({'thread_id': tid, 'proc_id': pid,
'log_level': log_lev, 'component': comp,
'message': msg})
results.append(match)
return results
def ExtractPid(self, process_name):
"""Extracts Process Ids for a given process name from Android Shell.
Args:
process_name: name of the process on the device.
Returns:
List of all the process ids (as strings) that match the given name.
If the name of a process exactly matches the given name, the pid of
that process will be inserted to the front of the pid list.
"""
pids = []
for line in self.RunShellCommand('ps', log_result=False):
data = line.split()
try:
if process_name in data[-1]: # name is in the last column
if process_name == data[-1]:
pids.insert(0, data[1]) # PID is in the second column
else:
pids.append(data[1])
except IndexError:
pass
return pids
def GetIoStats(self):
"""Gets cumulative disk IO stats since boot (for all processes).
Returns:
Dict of {num_reads, num_writes, read_ms, write_ms} or None if there
was an error.
"""
IoStats = collections.namedtuple(
'IoStats',
['device',
'num_reads_issued',
'num_reads_merged',
'num_sectors_read',
'ms_spent_reading',
'num_writes_completed',
'num_writes_merged',
'num_sectors_written',
'ms_spent_writing',
'num_ios_in_progress',
'ms_spent_doing_io',
'ms_spent_doing_io_weighted',
])
for line in self.GetFileContents('/proc/diskstats', log_result=False):
fields = line.split()
stats = IoStats._make([fields[2]] + [int(f) for f in fields[3:]])
if stats.device == 'mmcblk0':
return {
'num_reads': stats.num_reads_issued,
'num_writes': stats.num_writes_completed,
'read_ms': stats.ms_spent_reading,
'write_ms': stats.ms_spent_writing,
}
logging.warning('Could not find disk IO stats.')
return None
def GetMemoryUsageForPid(self, pid):
"""Returns the memory usage for given pid.
Args:
pid: The pid number of the specific process running on device.
Returns:
Dict of {metric:usage_kb}, for the process which has specified pid.
The metric keys which may be included are: Size, Rss, Pss, Shared_Clean,
Shared_Dirty, Private_Clean, Private_Dirty, VmHWM.
"""
showmap = self.RunShellCommand('showmap %d' % pid)
if not showmap or not showmap[-1].endswith('TOTAL'):
logging.warning('Invalid output for showmap %s', str(showmap))
return {}
items = showmap[-1].split()
if len(items) != 9:
logging.warning('Invalid TOTAL for showmap %s', str(items))
return {}
usage_dict = collections.defaultdict(int)
usage_dict.update({
'Size': int(items[0].strip()),
'Rss': int(items[1].strip()),
'Pss': int(items[2].strip()),
'Shared_Clean': int(items[3].strip()),
'Shared_Dirty': int(items[4].strip()),
'Private_Clean': int(items[5].strip()),
'Private_Dirty': int(items[6].strip()),
})
peak_value_kb = 0
for line in self.GetProtectedFileContents('/proc/%s/status' % pid):
if not line.startswith('VmHWM:'): # Format: 'VmHWM: +[0-9]+ kB'
continue
peak_value_kb = int(line.split(':')[1].strip().split(' ')[0])
break
usage_dict['VmHWM'] = peak_value_kb
if not peak_value_kb:
logging.warning('Could not find memory peak value for pid ' + str(pid))
return usage_dict
def ProcessesUsingDevicePort(self, device_port):
"""Lists processes using the specified device port on loopback interface.
Args:
device_port: Port on device we want to check.
Returns:
A list of (pid, process_name) tuples using the specified port.
"""
tcp_results = self.RunShellCommand('cat /proc/net/tcp', log_result=False)
tcp_address = '0100007F:%04X' % device_port
pids = []
for single_connect in tcp_results:
connect_results = single_connect.split()
# Column 1 is the TCP port, and Column 9 is the inode of the socket
if connect_results[1] == tcp_address:
socket_inode = connect_results[9]
socket_name = 'socket:[%s]' % socket_inode
lsof_results = self.RunShellCommand('lsof', log_result=False)
for single_process in lsof_results:
process_results = single_process.split()
# Ignore the line if it has less than nine columns in it, which may
# be the case when a process stops while lsof is executing.
if len(process_results) <= 8:
continue
# Column 0 is the executable name
# Column 1 is the pid
# Column 8 is the Inode in use
if process_results[8] == socket_name:
pids.append((int(process_results[1]), process_results[0]))
break
logging.info('PidsUsingDevicePort: %s', pids)
return pids
def FileExistsOnDevice(self, file_name):
"""Checks whether the given file exists on the device.
Args:
file_name: Full path of file to check.
Returns:
True if the file exists, False otherwise.
"""
assert '"' not in file_name, 'file_name cannot contain double quotes'
try:
status = self._adb.SendShellCommand(
'\'test -e "%s"; echo $?\'' % (file_name))
if 'test: not found' not in status:
return int(status) == 0
status = self._adb.SendShellCommand(
'\'ls "%s" >/dev/null 2>&1; echo $?\'' % (file_name))
return int(status) == 0
except ValueError:
if IsDeviceAttached(self._device):
raise errors.DeviceUnresponsiveError('Device may be offline.')
return False
def IsFileWritableOnDevice(self, file_name):
"""Checks whether the given file (or directory) is writable on the device.
Args:
file_name: Full path of file/directory to check.
Returns:
True if writable, False otherwise.
"""
assert '"' not in file_name, 'file_name cannot contain double quotes'
try:
status = self._adb.SendShellCommand(
'\'test -w "%s"; echo $?\'' % (file_name))
if 'test: not found' not in status:
return int(status) == 0
raise errors.AbortError('"test" binary not found. OS too old.')
except ValueError:
if IsDeviceAttached(self._device):
raise errors.DeviceUnresponsiveError('Device may be offline.')
return False
@staticmethod
def GetTimestamp():
return time.strftime('%Y-%m-%d-%H%M%S', time.localtime())
@staticmethod
def EnsureHostDirectory(host_file):
host_dir = os.path.dirname(os.path.abspath(host_file))
if not os.path.exists(host_dir):
os.makedirs(host_dir)
def TakeScreenshot(self, host_file=None):
"""Saves a screenshot image to |host_file| on the host.
Args:
host_file: Absolute path to the image file to store on the host or None to
use an autogenerated file name.
Returns:
Resulting host file name of the screenshot.
"""
host_file = os.path.abspath(host_file or
'screenshot-%s.png' % self.GetTimestamp())
self.EnsureHostDirectory(host_file)
device_file = '%s/screenshot.png' % self.GetExternalStorage()
self.RunShellCommand(
'/system/bin/screencap -p %s' % device_file)
self.PullFileFromDevice(device_file, host_file)
self.RunShellCommand('rm -f "%s"' % device_file)
return host_file
def PullFileFromDevice(self, device_file, host_file):
"""Download |device_file| on the device from to |host_file| on the host.
Args:
device_file: Absolute path to the file to retrieve from the device.
host_file: Absolute path to the file to store on the host.
"""
if not self._adb.Pull(device_file, host_file):
raise device_errors.AdbCommandFailedError(
['pull', device_file, host_file], 'Failed to pull file from device.')
assert os.path.exists(host_file)
def SetUtilWrapper(self, util_wrapper):
"""Sets a wrapper prefix to be used when running a locally-built
binary on the device (ex.: md5sum_bin).
"""
self._util_wrapper = util_wrapper
def RunUIAutomatorTest(self, test, test_package, timeout):
"""Runs a single uiautomator test.
Args:
test: Test class/method.
test_package: Name of the test jar.
timeout: Timeout time in seconds.
Returns:
An instance of am_instrument_parser.TestResult object.
"""
cmd = 'uiautomator runtest %s -e class %s' % (test_package, test)
self._LogShell(cmd)
output = self._adb.SendShellCommand(cmd, timeout_time=timeout)
# uiautomator doesn't fully conform to the instrumenation test runner
# convention and doesn't terminate with INSTRUMENTATION_CODE.
# Just assume the first result is valid.
(test_results, _) = am_instrument_parser.ParseAmInstrumentOutput(output)
if not test_results:
raise errors.InstrumentationError(
'no test results... device setup correctly?')
return test_results[0]
def DismissCrashDialogIfNeeded(self):
"""Dismiss the error/ANR dialog if present.
Returns: Name of the crashed package if a dialog is focused,
None otherwise.
"""
re_focus = re.compile(
r'\s*mCurrentFocus.*Application (Error|Not Responding): (\S+)}')
def _FindFocusedWindow():
match = None
for line in self.RunShellCommand('dumpsys window windows'):
match = re.match(re_focus, line)
if match:
break
return match
match = _FindFocusedWindow()
if not match:
return
package = match.group(2)
logging.warning('Trying to dismiss %s dialog for %s' % match.groups())
self.SendKeyEvent(KEYCODE_DPAD_RIGHT)
self.SendKeyEvent(KEYCODE_DPAD_RIGHT)
self.SendKeyEvent(KEYCODE_ENTER)
match = _FindFocusedWindow()
if match:
logging.error('Still showing a %s dialog for %s' % match.groups())
return package
def EfficientDeviceDirectoryCopy(self, source, dest):
""" Copy a directory efficiently on the device
Uses a shell script running on the target to copy new and changed files the
source directory to the destination directory and remove added files. This
is in some cases much faster than cp -r.
Args:
source: absolute path of source directory
dest: absolute path of destination directory
"""
logging.info('In EfficientDeviceDirectoryCopy %s %s', source, dest)
with DeviceTempFile(self, suffix=".sh") as temp_script_file:
host_script_path = os.path.join(constants.DIR_SOURCE_ROOT,
'build',
'android',
'pylib',
'efficient_android_directory_copy.sh')
self._adb.Push(host_script_path, temp_script_file.name)
out = self.RunShellCommand(
'sh %s %s %s' % (temp_script_file.name, source, dest),
timeout_time=120)
if self._device:
device_repr = self._device[-4:]
else:
device_repr = '????'
for line in out:
logging.info('[%s]> %s', device_repr, line)
def _GetControlUsbChargingCommand(self):
if self._control_usb_charging_command['cached']:
return self._control_usb_charging_command['command']
self._control_usb_charging_command['cached'] = True
if not self.IsRootEnabled():
return None
for command in CONTROL_USB_CHARGING_COMMANDS:
# Assert command is valid.
assert 'disable_command' in command
assert 'enable_command' in command
assert 'witness_file' in command
witness_file = command['witness_file']
if self.FileExistsOnDevice(witness_file):
self._control_usb_charging_command['command'] = command
return command
return None
def CanControlUsbCharging(self):
return self._GetControlUsbChargingCommand() is not None
def DisableUsbCharging(self, timeout=10):
command = self._GetControlUsbChargingCommand()
if not command:
raise Exception('Unable to act on usb charging.')
disable_command = command['disable_command']
t0 = time.time()
# Do not loop directly on self.IsDeviceCharging to cut the number of calls
# to the device.
while True:
if t0 + timeout - time.time() < 0:
raise pexpect.TIMEOUT('Unable to disable USB charging in time: %s' % (
self.GetBatteryInfo()))
self.RunShellCommand(disable_command)
if not self.IsDeviceCharging():
break
def EnableUsbCharging(self, timeout=10):
command = self._GetControlUsbChargingCommand()
if not command:
raise Exception('Unable to act on usb charging.')
disable_command = command['enable_command']
t0 = time.time()
# Do not loop directly on self.IsDeviceCharging to cut the number of calls
# to the device.
while True:
if t0 + timeout - time.time() < 0:
raise pexpect.TIMEOUT('Unable to enable USB charging in time.')
self.RunShellCommand(disable_command)
if self.IsDeviceCharging():
break
def IsDeviceCharging(self):
for line in self.RunShellCommand('dumpsys battery'):
if 'powered: ' in line:
if line.split('powered: ')[1] == 'true':
return True
class NewLineNormalizer(object):
"""A file-like object to normalize EOLs to '\n'.
Pexpect runs adb within a pseudo-tty device (see
http://www.noah.org/wiki/pexpect), so any '\n' printed by adb is written
as '\r\n' to the logfile. Since adb already uses '\r\n' to terminate
lines, the log ends up having '\r\r\n' at the end of each line. This
filter replaces the above with a single '\n' in the data stream.
"""
def __init__(self, output):
self._output = output
def write(self, data):
data = data.replace('\r\r\n', '\n')
self._output.write(data)
def flush(self):
self._output.flush()
| CTSRD-SOAAP/chromium-42.0.2311.135 | build/android/pylib/android_commands.py | Python | bsd-3-clause | 73,018 | [
"Galaxy"
] | 26b5e96c1e86d6870b9df85c0f6d2801c875f085ee3bf9c84f18876040ab3310 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Run parser unit tests for cclib."""
import sys
import unittest
sys.path.append('parser')
from testdata import *
from testlogfileparser import *
from testutils import *
if __name__ == "__main__":
unittest.main()
| gaursagar/cclib | test/test_parser.py | Python | bsd-3-clause | 420 | [
"cclib"
] | f8ae637c26dbaf95b9e022488bd4d11290b4d2f3e9cbd3d241bda8796b1ea9d8 |
# Gambit scripts
#
# Copyright (C) USC Information Sciences Institute
# Author: Nibir Bora <nbora@usc.edu>
# URL: <http://cbg.isi.edu/>
# For license information, see LICENSE
import os
import sys
import csv
import math
import pickle
import anyjson
import psycopg2
import matplotlib
import numpy as np
import lib.geo as geo
import matplotlib.pyplot as plt
from pylab import *
from datetime import *
from pprint import pprint
from pytz import timezone, utc
from multiprocessing import Pool
import settings as my
sys.path.insert(0, os.path.abspath('..'))
#
# ACTIVITY POINTS
#
def calc_activity():
'''Calculate activity matrix'''
if not os.path.exists('data/' + my.DATA_FOLDER + 'activity/'):
os.makedirs('data/' + my.DATA_FOLDER + 'activity/')
activity_mat = _load_matrix('activity', 'activity_mat')
activity_mat__dist_norm = _load_matrix('activity', 'activity_mat__dist_norm')
if not activity_mat:
activity_mat = _calc_activity_mat()
_save_matrix('activity', 'activity_mat', activity_mat)
if not activity_mat__dist_norm:
activity_mat__dist_norm = _calc_activity_mat(dist_norm=True)
_save_matrix('activity', 'activity_mat__dist_norm', activity_mat__dist_norm)
activity_mat__dtf_norm = _apply_twfreq_norm(activity_mat__dist_norm, activity_mat)
_save_matrix('activity', 'activity_mat__dtf_norm', activity_mat__dtf_norm)
activity_mat__din_norm = _apply_infreq_norm(activity_mat__dist_norm, activity_mat)
_save_matrix('activity', 'activity_mat__din_norm', activity_mat__din_norm)
activity_mat__dtfin_norm = _apply_infreq_norm(activity_mat__dtf_norm, activity_mat)
_save_matrix('activity', 'activity_mat__dtfin_norm', activity_mat__dtfin_norm)
_show_matrix(activity_mat)
_show_matrix(activity_mat__dist_norm)
_show_matrix(activity_mat__dtf_norm)
_show_matrix(activity_mat__din_norm)
def _calc_activity_mat(dist_norm=False):
'''Calculate activity matrix for all neighborhoods in region'''
mat = {}
con = psycopg2.connect(my.DB_CONN_STRING)
cur = con.cursor()
if not dist_norm:
SQL = 'SELECT nh.id, count(*) \
FROM (SELECT * FROM {rel_tweet} \
WHERE user_id IN \
(SELECT user_id FROM {rel_home} \
WHERE ST_WithIN(geo, \
(SELECT pol FROM {rel_nhood} \
WHERE id = %s)))) AS tw, \
(SELECT * FROM {rel_nhood} \
WHERE id IN %s) AS nh \
WHERE ST_WithIN(tw.geo, nh.pol) \
GROUP BY nh.id'.format(rel_tweet=my.REL_TWEET, rel_home=my.REL_HOME, rel_nhood=my.REL_NHOOD)
for from_id in _load_nhoodIDs():
mat[from_id] = dict([(to_id, 0) for to_id in _load_nhoodIDs()])
cur.execute(SQL, (from_id, tuple(_load_nhoodIDs())))
records = cur.fetchall()
for rec in records:
mat[from_id][rec[0]] = rec[1]
else:
SQL = 'SELECT nh.id, ST_Distance_Sphere(ST_MakePoint(ST_Y(tw.geo), ST_X(tw.geo)), \
ST_MakePoint(ST_Y(h.geo), ST_X(h.geo))) AS dist \
FROM (SELECT * FROM {rel_tweet} \
WHERE user_id IN \
(SELECT user_id FROM {rel_home} \
WHERE ST_WithIN(geo, \
(SELECT pol FROM {rel_nhood} \
WHERE id = %s)))) AS tw, \
(SELECT * FROM {rel_nhood} \
WHERE id IN %s) AS nh, \
(SELECT * FROM {rel_home} \
WHERE ST_WithIN(geo, \
(SELECT pol FROM {rel_nhood} \
WHERE id = %s))) as h \
WHERE ST_WithIN(tw.geo, nh.pol) \
AND h.user_id = tw.user_id'.format(rel_tweet=my.REL_TWEET, rel_home=my.REL_HOME, rel_nhood=my.REL_NHOOD)
norm = _calc_dist_norm()
for from_id in _load_nhoodIDs():
mat[from_id] = dict([(to_id, 0) for to_id in _load_nhoodIDs()])
cur.execute(SQL, (from_id, tuple(_load_nhoodIDs()), from_id))
records = cur.fetchall()
visit_dists = [rec for rec in records]
visits = dict((v, 0) for v in list(set([vd[0] for vd in visit_dists])))
for vd in visit_dists:
if int(vd[1]) > 0 and int(vd[1]/100 + 1) < 150:
visits[vd[0]] += 1.0 / norm[int(round(vd[1]/100 + 1))]
for to_id in visits:
mat[from_id][to_id] = int(visits[to_id])
con.close()
return mat
#
# VISITS
#
def calc_visits():
'''Calculate visit matrix'''
if not os.path.exists('data/' + my.DATA_FOLDER + 'visits/'):
os.makedirs('data/' + my.DATA_FOLDER + 'visits/')
visit_mat = _load_matrix('visits', 'visit_mat')
visit_mat__dist_norm = _load_matrix('visits', 'visit_mat__dist_norm')
if not visit_mat:
visit_mat = _calc_visit_mat()
_save_matrix('visits', 'visit_mat', visit_mat)
if not visit_mat__dist_norm:
visit_mat__dist_norm = _calc_visit_mat(dist_norm=True)
_save_matrix('visits', 'visit_mat__dist_norm', visit_mat__dist_norm)
visit_mat__dtf_norm = _apply_twfreq_norm(visit_mat__dist_norm, visit_mat)
_save_matrix('visits', 'visit_mat__dtf_norm', visit_mat__dtf_norm)
visit_mat__din_norm = _apply_infreq_norm(visit_mat__dist_norm, visit_mat)
_save_matrix('visits', 'visit_mat__din_norm', visit_mat__din_norm)
visit_mat__dtfin_norm = _apply_infreq_norm(visit_mat__dtf_norm, visit_mat)
_save_matrix('visits', 'visit_mat__dtfin_norm', visit_mat__dtfin_norm)
_show_matrix(visit_mat)
_show_matrix(visit_mat__dist_norm)
_show_matrix(visit_mat__dtf_norm)
_show_matrix(visit_mat__din_norm)
def _calc_visit_mat(dist_norm=False):
'''Calculate visit matrix for all neighborhoods in region'''
mat = {}
con = psycopg2.connect(my.DB_CONN_STRING)
cur = con.cursor()
if not dist_norm:
SQL = 'SELECT pts.id, count(*) \
FROM (SELECT nh.id, user_id, (timestamp AT TIME ZONE \'{timezone}\')::date AS ds, count(*) \
FROM (SELECT * FROM {rel_tweet} \
WHERE user_id IN \
(SELECT user_id FROM {rel_home} \
WHERE ST_WithIN(geo, \
(SELECT pol FROM {rel_nhood} \
WHERE id = %s)))) AS tw, \
(SELECT * FROM {rel_nhood} \
WHERE id IN %s) AS nh \
WHERE ST_WithIN(tw.geo, nh.pol) \
GROUP BY nh.id, user_id, ds) AS pts \
GROUP BY (pts.id)'.format(rel_tweet=my.REL_TWEET, rel_home=my.REL_HOME, rel_nhood=my.REL_NHOOD, timezone=my.TIMEZONE)
for from_id in _load_nhoodIDs():
mat[from_id] = dict([(to_id, 0) for to_id in _load_nhoodIDs()])
cur.execute(SQL, (from_id, tuple(_load_nhoodIDs())))
records = cur.fetchall()
for rec in records:
mat[from_id][rec[0]] = rec[1]
else:
SQL = 'SELECT id, dist \
FROM (SELECT nh.id, tw.user_id, (timestamp AT TIME ZONE \'{timezone}\')::date AS ds, \
max(ST_Distance_Sphere(ST_MakePoint(ST_Y(tw.geo), ST_X(tw.geo)), ST_MakePoint(ST_Y(h.geo), ST_X(h.geo)))) as dist, count(*) \
FROM (SELECT * FROM {rel_tweet} \
WHERE user_id IN \
(SELECT user_id FROM {rel_home} \
WHERE ST_WithIN(geo, \
(SELECT pol FROM {rel_nhood} \
WHERE id = %s)))) AS tw, \
(SELECT * FROM {rel_nhood} \
WHERE id IN %s) AS nh, \
(SELECT * FROM {rel_home} \
WHERE ST_WithIN(geo, \
(SELECT pol FROM {rel_nhood} \
WHERE id = %s))) as h \
WHERE ST_WithIN(tw.geo, nh.pol) \
AND h.user_id = tw.user_id \
GROUP BY nh.id, tw.user_id, ds) AS foo'.format(rel_tweet=my.REL_TWEET, rel_home=my.REL_HOME, rel_nhood=my.REL_NHOOD, timezone=my.TIMEZONE)
norm = _calc_dist_norm()
for from_id in _load_nhoodIDs():
mat[from_id] = dict([(to_id, 0) for to_id in _load_nhoodIDs()])
cur.execute(SQL, (from_id, tuple(_load_nhoodIDs()), from_id))
records = cur.fetchall()
visit_dists = [rec for rec in records]
visits = dict((v, 0) for v in list(set([vd[0] for vd in visit_dists])))
for vd in visit_dists:
if int(vd[1]) > 0 and int(vd[1]/100 + 1) < 150:
visits[vd[0]] += 1.0 / norm[int(round(vd[1]/100 + 1))]
for to_id in visits:
mat[from_id][to_id] = int(visits[to_id])
con.close()
return mat
#
# VISITORS
#
def calc_visitors():
'''Calculate visitor matrix'''
if not os.path.exists('data/' + my.DATA_FOLDER + 'visitors/'):
os.makedirs('data/' + my.DATA_FOLDER + 'visitors/')
visitor_mat = _load_matrix('visitors', 'visitor_mat')
visitor_mat__dist_norm = _load_matrix('visitors', 'visitor_mat__dist_norm')
if not visitor_mat:
visitor_mat = _calc_visitor_mat()
_save_matrix('visitors', 'visitor_mat', visitor_mat)
_show_matrix(visitor_mat)
def _calc_visitor_mat():
'''Calculate visitor matrix for all neighborhoods in region'''
mat = {}
SQL = 'SELECT nh.id, count(DISTINCT user_id) \
FROM (SELECT * FROM {rel_tweet} \
WHERE user_id IN \
(SELECT user_id FROM {rel_home} \
WHERE ST_WithIN(geo, \
(SELECT pol FROM {rel_nhood} \
WHERE id = %s)))) AS tw, \
(SELECT * FROM {rel_nhood} \
WHERE id IN %s) AS nh \
WHERE ST_WithIN(tw.geo, nh.pol) \
GROUP BY nh.id'.format(rel_tweet=my.REL_TWEET, rel_home=my.REL_HOME, rel_nhood=my.REL_NHOOD)
con = psycopg2.connect(my.DB_CONN_STRING)
cur = con.cursor()
for from_id in _load_nhoodIDs():
mat[from_id] = dict([(to_id, 0) for to_id in _load_nhoodIDs()])
cur.execute(SQL, (from_id, tuple(_load_nhoodIDs())))
records = cur.fetchall()
for rec in records:
mat[from_id][rec[0]] = rec[1]
con.close()
return mat
#
# NORMALIZE
#
def _calc_dist_norm():
'''Calculate distance norm function [1-CDF]'''
frac = dict([(i, 0) for i in range(1, 151)])
count = 1
with open('data/' + my.DATA_FOLDER + 'user_disp.csv', 'rb') as fp:
csv_reader = csv.reader(fp, delimiter=',')
for row in csv_reader:
dist_i = int(int(row[1])/100)+1
if dist_i > 0 and dist_i <= 150:
frac[dist_i] += 1
count += 1
cdf = {}
for i in range(1, 151):
cdf[i] = sum([frac[j] for j in range(1, i+1)])/float(count)
norm = {}
for i in range(1, 151):
norm[i] = 1-cdf[i] # dist_norm_3
return norm
def _apply_twfreq_norm(mat, mat__norm=None):
'''Apply Tweet frequency normalization'''
if mat__norm:
norm = dict((nid, sum(mat__norm[nid].values()) - mat__norm[nid][nid]) for nid in mat__norm)
norm = dict((nid, norm[nid]/float(sum(norm.values()))) for nid in norm)
else:
norm = dict((nid, sum(mat[nid].values()) - mat[nid][nid]) for nid in mat)
norm = dict((nid, norm[nid]/float(sum(norm.values()))) for nid in norm)
mat__out = dict((from_id, dict()) for from_id in mat)
for from_id in mat:
for to_id in mat:
if norm[from_id] != 0:
mat__out[from_id][to_id] = mat[from_id][to_id] / norm[from_id]
else:
mat__out[from_id][to_id] = 0
return mat__out
def _apply_infreq_norm(mat, mat__norm=None):
'''Apply incoming tw frequency normalization'''
if mat__norm:
norm = dict((to_id, sum([mat__norm[from_id][to_id] for from_id in mat__norm if from_id != to_id]))
for to_id in mat__norm)
norm = dict((nid, norm[nid]/float(sum(norm.values()))) for nid in norm)
else:
norm = dict((to_id, sum([mat[from_id][to_id] for from_id in mat if from_id != to_id]))
for to_id in mat)
norm = dict((nid, norm[nid]/float(sum(norm.values()))) for nid in norm)
mat__out = dict((from_id, dict()) for from_id in mat)
for from_id in mat:
for to_id in mat:
if norm[to_id] != 0:
mat__out[from_id][to_id] = mat[from_id][to_id] / norm[to_id]
else:
mat__out[from_id][to_id] = 0
return mat__out
#
# UTILITY FUNCTIONS
#
_load_nhoodIDs = lambda: [int(nid) for nid in anyjson.loads(
open('data/' + my.DATA_FOLDER + 'hood_ids.txt', 'rb').read())]
def _load_matrix(folder, file_name):
if os.path.exists('data/' + my.DATA_FOLDER + folder + '/' + file_name + '.pickle'):
with open('data/' + my.DATA_FOLDER + folder + '/' + file_name + '.pickle', 'rb') as fp1:
visit_mat = pickle.load(fp1)
return visit_mat
else:
return None
def _save_matrix(folder, file_name, mat):
with open('data/' + my.DATA_FOLDER + folder + '/' + file_name + '.pickle', 'wb') as fp1:
pickle.dump(mat, fp1)
def _show_matrix(mat):
line_str = '%5s |' + ' %5s' * (len(mat))
print '\n' + '_'*7 + '______' * (len(mat))
print line_str % tuple(['A->B'] + mat.keys())
print '_'*7 + '______' * (len(mat))
for nid in mat:
x = [nid]
vals = [str(val)[0:5] for val in mat[nid].values()]
x.extend(vals)
print line_str % tuple(x)
print '_'*7 + '______' * (len(mat))
'''def _save_visitMatJSONs():
# Stores all visit matrices in JSON format
# Reads from visit matrix pickles
if not os.path.exists('data/' + my.DATA_FOLDER + 'visits/' + 'json/'):
os.makedirs('data/' + my.DATA_FOLDER + 'visits/' + 'json/')
_save_visitMatJSON('visit_mat')
_save_visitMatJSON('visit_mat__twfreq_norm')
_save_visitMatJSON('visit_mat__dist_norm')
_save_visitMatJSON('visit_mat__dtf_norm')
'''
def _save_visitMatJSON(file_name):
with open('data/' + my.DATA_FOLDER + 'visits/' + file_name + '.pickle', 'rb') as fp1:
visit_mat = pickle.load(fp1)
with open('data/' + my.DATA_FOLDER + 'visits/json/' + file_name + '.json', 'wb') as fp1:
fp1.write(anyjson.dumps(visit_mat))
| nbir/gambit-scripts | scripts/calc_visits/src/calc_visits.py | Python | apache-2.0 | 12,535 | [
"VisIt"
] | dabd9d8300fce087c28e0fe12a4650ad01d35dcf5d4eec086a6c84d5329d3ff1 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2016 Robin Schneider <ypid@riseup.net>
# Copyright (C) 2016 DebOps <https://debops.org/>
# SPDX-License-Identifier: AGPL-3.0-only
#
# debops-api is part of DebOps.
#
# debops-api is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, version 3 of the
# License.
#
# debops-api is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import os
import sys
import json
import re
import logging
import pprint
from distutils.version import StrictVersion
import shutil
import yaml
import git
from docutils import core
from docutils.writers.html4css1 import Writer, HTMLTranslator
__license__ = 'AGPL-3.0-only'
__author__ = 'Robin Schneider <ypid@riseup.net>'
__version__ = '0.1.0'
"""
debops-api - Machine readable metadata about the DebOps Project.
"""
class NoHeaderHTMLTranslator(HTMLTranslator):
def __init__(self, document):
HTMLTranslator.__init__(self, document)
self.head_prefix = ['', '', '', '', '']
self.body_prefix = []
self.body_suffix = []
self.stylesheet = []
def reSTify(string):
_w = Writer()
_w.translator_class = NoHeaderHTMLTranslator
return core.publish_string(string, writer=_w)
class DebOpsAPI:
def __init__(
self,
strict=True,
docs_url_pattern=None,
changelog_url_pattern=None,
role_owner=None,
test_mode=False,
):
self._strict = strict
self._docs_url_pattern = docs_url_pattern
self._changelog_url_pattern = changelog_url_pattern
self._role_owner = role_owner
self._test_mode = test_mode
self._metadata = {}
self._roles = {}
def _get_role_full_name(self, role_owner, role_name):
"""
Return the Ansible role name according to Ansible Galaxy naming
convention.
"""
return '{}.{}'.format(role_owner, role_name)
def _get_repo_url(self, role_owner, role_name):
"""
Return repository URL for the given Ansible role.
"""
github_base_url = 'https://github.com'
return '/'.join([
github_base_url,
role_owner,
'ansible-' + self._get_normalized_role_name(role_name),
])
def _read_github_repos_api_response(
self,
api_res_encoded_json,
role_owner=None
):
"""
Read GitHub API response for a /users/:username/repos or
/orgs/:org/repos query handed over as encoded JSON string.
Unfinished because relying on GitHub API is discouraged for the DebOps
Project in case there are other ways to do it.
"""
api_response = json.load(api_res_encoded_json)
for repo in api_response:
_re = re.match(r'ansible-(?P<role_name>[a-z0-9_-]+)+$',
repo['name'])
if not _re:
continue
role_name = _re.group('role_name')
role_full_name = self._get_role_full_name(role_owner, role_name)
role_owner = self._get_owner_from_vcs_url(repo['html_url'])
metadata_from_api = {
'role_owner': role_owner,
'role_name': role_name,
'vcs_url': repo['html_url'],
'role_format_version': '0.1.0',
}
self._metadata.setdefault(role_full_name, {})
self._metadata[role_full_name].update(metadata_from_api)
def read_github_repos_api_file(self, file_path, role_owner=None):
"""
Read GitHub API response file for a /users/:username/repos or
/orgs/:org/repos query passed as file path.
"""
with open(args.github_api_response_file) as gh_rsp_fh:
self._read_github_repos_api_response(
gh_rsp_fh,
role_owner=role_owner,
)
def _interpret_role_dir_name(self, role_dir_name):
"""
Extract and return information from the role directory name.
"""
version_by_pattern_map = {
'0.1.0': re.compile(
r'^(?P<role_owner>[^.]+)\.(?P<role_name>[a-z0-9_-]+)\.rst$'),
'0.2.0': re.compile(
r'^ansible-(?P<role_name>[a-z0-9_-]+)$'),
}
for role_format_version, pattern in version_by_pattern_map.items():
_re = pattern.search(role_dir_name)
if _re:
role_owner = None
if 'role_owner' in _re.groups():
role_owner = _re.group('role_owner')
role_name = _re.group('role_name')
logger.debug('Detected docs format version {} '
'for owner: {}, name: {} from {}'.format(
role_format_version,
role_owner,
role_name,
role_dir_name,
))
return {
'role_format_version': role_format_version,
'role_owner': role_owner,
'role_name': role_name,
}
return None
def _get_decoded_yaml(self, yaml_file_path):
"""
Get decoded YAML file.
"""
try:
with open(yaml_file_path) as ansigenome_fh:
return yaml.safe_load(ansigenome_fh)
except OSError:
return {}
def _get_owner_from_vcs_url(self, vcs_url):
"""
Return owner name from VCS URL.
Return `ypid` for `https://github.com/ypid/ansible-packages/`.
"""
_re = re.match('[^:]+://[^/]+/(?P<owner_name>[^/]+)/', vcs_url)
if _re:
owner_name = _re.group('owner_name')
logger.debug("Detected owner '{}' for URL: {}".format(
owner_name,
vcs_url,
))
return owner_name
else:
return None
def _get_vcs_info(self, dir_path):
"""
Read VCS metadata for the given directory path.
"""
g = git.Git(dir_path)
# %cd: committer date
last_committer_date = g.log('-1', '--format=%cd', '--date=iso8601')
# logger.debug('Got last committer date {} for: {}'.format(
# last_committer_date,
# dir_path,
# ))
# describe_version = g.describe()
# logger.debug(describe_version)
try:
version = g.describe('--abbrev=0', '--tags')
except Exception:
# Did not work on Travis test.
# except git.exc.GitCommandError:
version = '0.0.0'
if self._test_mode:
# Fake committer date in test mode
last_committer_date = '1970-01-01 00:00:00 +0000'
metadata = {
'vcs_last_committer_date': last_committer_date,
'version': re.sub(r'^v', '', version),
}
if not self._test_mode and version != '0.0.0':
try:
commits_since_last_release = len(
g.log('{}...HEAD'.format(version), '--oneline').split('\n')
)
except Exception:
commits_since_last_release = None
if commits_since_last_release is not None:
metadata.update({
'vcs_commits_since_last_release': (
commits_since_last_release),
})
return metadata
def _get_maintainers_from_line(self, line):
# Modeled with the natural language processing from AIML in mind.
# TODO: Remove redundancy. Duplicated into ansigenome source code.
# Origin: debops-api
_re = re.match(
r'^[^.]*?maintainers?[\W_]+(:?is|are)[\W_]+`?(?P<nicks>.+?)\.?$',
line,
re.IGNORECASE
)
if _re:
return [x.rstrip('_') for x in re.split(r'[\s,]+',
_re.group('nicks')) if x not in ['and', ',']]
else:
return None
def _get_maintainers_from_changelog(self, changes_file):
# TODO: Remove redundancy. Duplicated into ansigenome source code.
# Origin: debops-api
"""
Extract the maintainer from CHANGES.rst file and return the nickname of
the maintainer.
"""
try:
with open(changes_file, 'r') as changes_fh:
for line in changes_fh:
nick = self._get_maintainers_from_line(line)
if nick is not None:
return nick
except FileNotFoundError:
return None
return None
def _get_role_metadata(self, role_path):
"""
Read metadata for the given role.
"""
role_metadata = {}
role_metadata['ansigenome'] = self._get_decoded_yaml(
os.path.join(role_path, 'meta', 'ansigenome.yml')
)['ansigenome_info']
role_metadata['meta'] = self._get_decoded_yaml(
os.path.join(role_path, 'meta', 'main.yml')
)
maintainer_nicks = self._get_maintainers_from_changelog(
os.path.join(role_path, 'CHANGES.rst')
)
if maintainer_nicks is not None:
role_metadata['maintainer_nicks'] = maintainer_nicks
role_metadata['role_format_version'] = '0.2.1'
return role_metadata
def _get_normalized_meta_ansigenome(self, meta_ansigenome):
"""
Returns normalized meta/ansigenome.yml data intended for inclusion in
self._metadata.
"""
metadata = {}
if 'authors' in meta_ansigenome:
metadata.setdefault('authors', [])
for author_item in meta_ansigenome['authors']:
metadata['authors'].append({
'name': author_item['name'],
'nick': author_item['github'],
'maintainer': False,
})
return metadata
def _get_normalized_role_name(self, role_name):
"""
Returns normalized role name as used in URLs
Example role name: `ansible`, returns: `role-ansible`.
"""
if role_name == 'ansible':
role_name = 'role-' + role_name
return role_name
def _get_normalized_meta_main(self, meta_main):
"""
Returns normalized meta/main.yml data intended for inclusion in
self._metadata.
"""
metadata = {}
if 'galaxy_info' not in meta_main:
return metadata
license_map = {
'GNU General Public License v3': 'GPL-3.0-only',
}
skip_keys = [
'company',
'author',
]
rename_keys = {
'galaxy_tags': 'tags',
}
for k, v in meta_main['galaxy_info'].items():
if k == 'license':
if v in license_map:
v = license_map[v]
if k in skip_keys:
continue
k = rename_keys.get(k, k)
metadata[k] = v
return metadata
def gen_role_metadata(self):
"""
Generate metadata based on already present metadata.
"""
for role_full_name, metadata in self._metadata.items():
role_owner = metadata['role_owner']
role_name = metadata['role_name']
additonal_metadata = {
'normalized_role_name': (
self._get_normalized_role_name(role_name)),
'ci_badge_url': (
'https://api.travis-ci.org/{}/ansible-{}.png'.format(
role_owner,
self._get_normalized_role_name(role_name),
)
),
'ci_url': 'https://travis-ci.org/{}/ansible-{}'.format(
role_owner,
self._get_normalized_role_name(role_name),
),
'test_suite_url': (
'https://github.com/debops/test-suite/'
'tree/master/ansible-{}'.format(
self._get_normalized_role_name(role_name),
)
),
'galaxy_url': 'https://galaxy.ansible.com/{}/{}'.format(
role_owner,
role_name,
),
}
if 'vcs_url' in metadata:
additonal_metadata.update({
'clone_url': metadata['vcs_url'] + '.git',
'issue_url': metadata['vcs_url'] + '/issues',
'pr_url': metadata['vcs_url'] + '/pulls',
})
if StrictVersion('0.2.0') <= \
StrictVersion(metadata['role_format_version']):
if self._docs_url_pattern:
additonal_metadata['docs_url'] = (
self._docs_url_pattern.format(
role_owner=role_owner,
role_name=role_name,
normalized_role_name=(
self._get_normalized_role_name(role_name)),
)
)
if self._changelog_url_pattern:
additonal_metadata['changelog_url'] = (
self._changelog_url_pattern.format(
role_owner=role_owner,
role_name=role_name,
normalized_role_name=(
self._get_normalized_role_name(role_name)),
)
)
self._metadata[role_full_name].update(additonal_metadata)
def read_role_metadata(self, role_path):
"""
Read metadata from each role available in role_path.
"""
for role_dir_name in os.listdir(role_path):
role_dir_info = self._interpret_role_dir_name(role_dir_name)
# Ignore roles with old docs format for now since it would be
# required to get the meta data from external servers (and to
# encourage conversion to the new docs format).
if role_dir_info:
if StrictVersion('0.2.0') <= StrictVersion(
role_dir_info['role_format_version']):
role_name = role_dir_info['role_name']
role_metadata = self._get_role_metadata(
os.path.join(role_path, role_dir_name))
role_vcs_url = role_metadata['ansigenome']['github_url']
role_owner = self._get_owner_from_vcs_url(role_vcs_url)
role_full_name = self._get_role_full_name(
role_owner, role_name)
metadata = {
'vcs_url': role_vcs_url,
'role_format_version':
role_metadata['role_format_version']
if 'role_format_version' in role_metadata
else role_dir_info['role_format_version'],
'role_owner': role_owner,
'role_name': role_name,
}
metadata.update(
self._get_vcs_info(os.path.join(
role_path, role_dir_name))
)
if 'meta' in role_metadata:
metadata.update(
self._get_normalized_meta_main(
role_metadata['meta'])
)
if 'ansigenome' in role_metadata:
metadata.update(
self._get_normalized_meta_ansigenome(
role_metadata['ansigenome'])
)
if 'maintainer_nicks' in role_metadata:
nicks = role_metadata['maintainer_nicks']
metadata.setdefault('authors', [])
author_present = set([])
for author_item in metadata['authors']:
if author_item['nick'] in nicks:
author_present.add(author_item['nick'])
author_item['maintainer'] = True
if not author_present:
if self._strict:
raise Exception(
"Nick(s) {nicks} are maintainers but"
" no other meta information for them"
" could be found in the repository."
" Affected role: {role_full_name}".format(
role_full_name=role_full_name,
nicks=set(nicks).difference(
author_present),
)
)
else:
# Legacy stuff.
role_name = role_dir_info['role_name']
role_owner = (role_dir_info['role_owner']
if role_dir_info['role_owner']
else self._role_owner)
if not role_owner:
raise Exception("Default role owner"
" not given but required.")
role_vcs_url = self._get_repo_url(
role_owner,
role_name,
)
role_full_name = self._get_role_full_name(
role_owner,
role_name,
)
metadata = {
'vcs_url': role_vcs_url,
'role_format_version': '0.1.0',
'role_owner': role_owner,
'role_name': role_name,
}
self._metadata.setdefault(role_full_name, {})
self._metadata[role_full_name].update(metadata)
def get_metadata(self, metadata=None):
"""
Return public metadata.
"""
if not metadata:
metadata = self._metadata
return metadata
if not isinstance(metadata, dict):
return metadata
public_metadata = {}
for k, v in metadata.items():
if not k.startswith('_'):
public_metadata[k] = self.get_metadata(v)
return public_metadata
def write_api_dir(self, api_dir):
"""
Write metadata to API directory.
The write is done to a temp directory which is later renamed to archive
a atomic operation and to ensure that the API returns consistent data.
"""
api_work_root_dir = api_dir
api_dir = os.path.join(api_dir, 'v0')
api_work_dir = api_dir + '_new'
try:
shutil.rmtree(api_work_dir)
except OSError:
pass
os.makedirs(os.path.join(api_work_dir, 'role'))
os.makedirs(os.path.join(api_work_dir, 'roles'))
# API: /
with open(os.path.join(api_work_dir, 'version'), 'w') as outfile:
outfile.write('{}\n'.format(__version__))
debops_api_base_dir = os.path.join(os.path.dirname(
os.path.realpath(__file__)), '..')
with open(os.path.join(api_work_dir, 'license'), 'w') as outfile:
with open(os.path.join(debops_api_base_dir,
'COPYRIGHT'), 'r') as copyright:
outfile.write(copyright.read())
outfile.write('\n\n\n')
with open(os.path.join(debops_api_base_dir,
'LICENSE'), 'r') as license:
outfile.write(license.read())
with open(os.path.join(debops_api_base_dir,
'README.rst'), 'r') as license:
readme_html_string = reSTify(
license.read()).decode('utf-8', 'strict')
with open(os.path.join(api_work_root_dir,
'README.html'), 'w') as outfile:
outfile.write(readme_html_string)
with open(os.path.join(api_work_dir,
'README.html'), 'w') as outfile:
outfile.write(readme_html_string)
# API: /role/
for role_full_name, metadata in self.get_metadata().items():
role_api_file = os.path.join(
api_work_dir,
'role',
role_full_name + '.json'
)
with open(role_api_file, 'w') as outfile:
json.dump(metadata, outfile, sort_keys=True)
outfile.write('\n')
# API: /roles/
with open(os.path.join(api_work_dir,
'roles', 'count'), 'w') as outfile:
outfile.write('{}\n'.format(len(self.get_metadata().keys())))
metadata_per_owner = {}
for role_full_name, metadata in self.get_metadata().items():
role_owner = metadata['role_owner']
metadata_per_owner.setdefault(role_owner, {})
metadata_per_owner[role_owner][role_full_name] = metadata
for role_owner, metadata in metadata_per_owner.items():
with open(os.path.join(api_work_dir, 'roles',
role_owner + '.list'),
'w') as outfile:
role_list = self.get_metadata(metadata).keys()
outfile.write('\n'.join(sorted(role_list)) + '\n')
with open(os.path.join(api_work_dir,
'roles', role_owner + '.json'),
'w') as outfile:
json.dump(self.get_metadata(metadata), outfile, sort_keys=True)
outfile.write('\n')
with open(os.path.join(api_work_dir,
'roles', 'count:' + role_owner),
'w') as outfile:
outfile.write('{}\n'.format(
len(self.get_metadata(metadata).keys())))
try:
shutil.rmtree(api_dir)
except OSError:
pass
os.rename(api_work_dir, api_dir)
if __name__ == '__main__':
from argparse import ArgumentParser
args_parser = ArgumentParser(
description=__doc__,
)
args_parser.add_argument(
'-V', '--version',
action='version',
version='%(prog)s {version}'.format(version=__version__)
)
args_parser.add_argument(
'-d', '--debug',
help="Print lots of debugging statements.",
action='store_const',
dest='loglevel',
const=logging.DEBUG,
default=logging.WARNING,
)
args_parser.add_argument(
'-v', '--verbose',
help="Be verbose.",
action='store_const',
dest='loglevel',
const=logging.INFO,
)
args_parser.add_argument(
'-n', '--no-strict',
help="Do not exit immediately when there is a inconsistency.",
dest='strict',
action='store_false',
default=True,
)
args_parser.add_argument(
'-i', '--github-api-response-file',
help="Responds file from a /users/:username/repos"
" or /orgs/:org/repos API query.",
)
args_parser.add_argument(
'-r', '--role-path',
help="Base directory where all roles are available.",
)
args_parser.add_argument(
'-o', '--role-owner',
help="Default role owner if not available from Ansible role metadata.",
default='debops',
)
args_parser.add_argument(
'-a', '--api-dir',
help="Write the static parts of api.debops.org"
" to the given directory."
" Note that all files in this directory are going to be"
" overwritten or deleted.",
)
args_parser.add_argument(
'-D', '--docs-url-pattern',
help="Documentation URL for each role.",
default='https://docs.debops.org/en/latest/'
'ansible/roles/ansible-{role_name}/docs/index.html',
)
args_parser.add_argument(
'-C', '--changelog-url-pattern',
help="Changelog URL for each role.",
default='https://docs.debops.org/en/latest/'
'ansible/roles/ansible-{role_name}/docs/changelog.html',
)
args_parser.add_argument(
'-t', '--test-mode',
help="Make the output reproducible by normalizing changing peaces like"
"timestamps.",
action='store_true',
default=False,
)
args = args_parser.parse_args()
if not args.github_api_response_file and not args.role_path:
args_parser.print_help()
sys.exit(1)
logger = logging.getLogger(__file__)
logging.basicConfig(
format='%(levelname)s: %(message)s',
level=args.loglevel,
)
debops_metadata = DebOpsAPI(
strict=args.strict,
docs_url_pattern=args.docs_url_pattern,
changelog_url_pattern=args.changelog_url_pattern,
role_owner=args.role_owner,
test_mode=args.test_mode,
)
if args.github_api_response_file:
debops_metadata.read_github_repos_api_file(
args.github_api_response_file,
)
if args.role_path:
debops_metadata.read_role_metadata(args.role_path)
debops_metadata.gen_role_metadata()
if args.api_dir:
debops_metadata.write_api_dir(args.api_dir)
logging.info("Metadata:\n{}".format(
pprint.pformat(debops_metadata.get_metadata()),
))
| ganto/debops | lib/debops-api/debops/api.py | Python | gpl-3.0 | 26,388 | [
"Galaxy"
] | 18bb7acbaff9e5951b24903e83db08d515fbb7d4fb080cd4aa44a4a063632302 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
knmi_to_cru31_full_bias.py
Use OCW to download, evaluate and plot (contour map) a dataset
against a reference dataset and OCW standard metrics (bias).
In this example:
1. Download a netCDF files from a local site.
AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc
2. Load the local files into OCW dataset objects.
3. Interface with the Regional Climate Model Evalutaion Database (https://rcmes.jpl.nasa.gov/)
to load the CRU3.1 Daily-Max Temp dataset (https://rcmes.jpl.nasa.gov/content/cru31).
4. Process each dataset to the same same shape.
5. Temporally rebin the datasets to a single timestep.
6. Spatially regrid the dataset objects to a 1/2 degree grid.
7. Build a bias metric to use for evaluation use the standard OCW metric set.
8. Create an evaluation object using the datasets and metric.
9. Plot the results of the evaluation (contour map).
OCW modules demonstrated:
1. datasource/local
2. datasource/rcmed
3. dataset
4. dataset_processor
5. evaluation
6. metrics
7. plotter
"""
from __future__ import print_function
import datetime
import ssl
import sys
from os import path
import numpy as np
import ocw.data_source.local as local
import ocw.data_source.rcmed as rcmed
import ocw.dataset_processor as dsp
import ocw.evaluation as evaluation
import ocw.metrics as metrics
import ocw.plotter as plotter
from ocw.dataset import Bounds as Bounds
if sys.version_info[0] >= 3:
from urllib.request import urlretrieve
else:
# Not Python 3 - today, it is most likely to be Python 2
# But note that this might need an update when Python 4
# might be around one day
from urllib import urlretrieve
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
# File URL leader
FILE_LEADER = "http://zipper.jpl.nasa.gov/dist/"
# This way we can easily adjust the time span of the retrievals
YEARS = 3
# Two Local Model Files
MODEL = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc"
# Filename for the output image/plot (without file extension)
OUTPUT_PLOT = "cru_31_tmax_knmi_africa_bias_full"
# Download necessary NetCDF file if not present
if not path.exists(MODEL):
urlretrieve(FILE_LEADER + MODEL, MODEL)
# Step 1: Load Local NetCDF File into OCW Dataset Objects.
print("Loading %s into an OCW Dataset Object" % (MODEL,))
knmi_dataset = local.load_file(MODEL, "tasmax")
print("KNMI_Dataset.values shape: (times, lats, lons) - %s \n" %
(knmi_dataset.values.shape,))
# Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module.
print("Working with the rcmed interface to get CRU3.1 Daily-Max Temp")
metadata = rcmed.get_parameters_metadata()
cru_31 = [m for m in metadata if m['parameter_id'] == "39"][0]
# The RCMED API uses the following function to query, subset and return the
# raw data from the database:
#
# rcmed.parameter_dataset(dataset_id, parameter_id, min_lat, max_lat, min_lon,
# max_lon, start_time, end_time)
#
# The first two required params are in the cru_31 variable we defined earlier
# Must cast to int since the rcmed api requires ints
dataset_id = int(cru_31['dataset_id'])
parameter_id = int(cru_31['parameter_id'])
print("We are going to use the Model to constrain the Spatial Domain")
# The spatial_boundaries() function returns the spatial extent of the dataset
print("The KNMI_Dataset spatial bounds (min_lat, max_lat, min_lon, max_lon) are: \n"
"%s\n" % (knmi_dataset.spatial_boundaries(), ))
print("The KNMI_Dataset spatial resolution (lat_resolution, lon_resolution) is: \n"
"%s\n\n" % (knmi_dataset.spatial_resolution(), ))
min_lat, max_lat, min_lon, max_lon = knmi_dataset.spatial_boundaries()
print("Calculating the Maximum Overlap in Time for the datasets")
cru_start = datetime.datetime.strptime(cru_31['start_date'], "%Y-%m-%d")
cru_end = datetime.datetime.strptime(cru_31['end_date'], "%Y-%m-%d")
knmi_start, knmi_end = knmi_dataset.temporal_boundaries()
# Grab the Max Start Time
start_time = max([cru_start, knmi_start])
# Grab the Min End Time
end_time = min([cru_end, knmi_end])
print("Overlap computed to be: %s to %s" % (start_time.strftime("%Y-%m-%d"),
end_time.strftime("%Y-%m-%d")))
print("We are going to grab the first %s year(s) of data" % YEARS)
end_time = datetime.datetime(
start_time.year + YEARS, start_time.month, start_time.day)
print("Final Overlap is: %s to %s" % (start_time.strftime("%Y-%m-%d"),
end_time.strftime("%Y-%m-%d")))
print("Fetching data from RCMED...")
cru31_dataset = rcmed.parameter_dataset(dataset_id,
parameter_id,
min_lat,
max_lat,
min_lon,
max_lon,
start_time,
end_time)
# Step 3: Resample Datasets so they are the same shape.
print("CRU31_Dataset.values shape: (times, lats, lons) - %s" %
(cru31_dataset.values.shape,))
print("KNMI_Dataset.values shape: (times, lats, lons) - %s" %
(knmi_dataset.values.shape,))
print("Our two datasets have a mis-match in time. We will subset on time to %s years\n" % YEARS)
# Create a Bounds object to use for subsetting
new_bounds = Bounds(lat_min=min_lat, lat_max=max_lat, lon_min=min_lon,
lon_max=max_lon, start=start_time, end=end_time)
knmi_dataset = dsp.subset(knmi_dataset, new_bounds)
print("CRU31_Dataset.values shape: (times, lats, lons) - %s" %
(cru31_dataset.values.shape,))
print("KNMI_Dataset.values shape: (times, lats, lons) - %s \n" %
(knmi_dataset.values.shape,))
print("Temporally Rebinning the Datasets to a Single Timestep")
# To run FULL temporal Rebinning
knmi_dataset = dsp.temporal_rebin(knmi_dataset, temporal_resolution='full')
cru31_dataset = dsp.temporal_rebin(cru31_dataset, temporal_resolution='full')
print("KNMI_Dataset.values shape: %s" % (knmi_dataset.values.shape,))
print("CRU31_Dataset.values shape: %s \n\n" % (cru31_dataset.values.shape,))
# Spatially Regrid the Dataset Objects to a 1/2 degree grid.
# Using the bounds we will create a new set of lats and lons on 0.5 degree step
new_lons = np.arange(min_lon, max_lon, 0.5)
new_lats = np.arange(min_lat, max_lat, 0.5)
# Spatially regrid datasets using the new_lats, new_lons numpy arrays
print("Spatially Regridding the KNMI_Dataset...")
knmi_dataset = dsp.spatial_regrid(knmi_dataset, new_lats, new_lons)
print("Spatially Regridding the CRU31_Dataset...")
cru31_dataset = dsp.spatial_regrid(cru31_dataset, new_lats, new_lons)
print("Final shape of the KNMI_Dataset:%s" % (knmi_dataset.values.shape, ))
print("Final shape of the CRU31_Dataset:%s" % (cru31_dataset.values.shape, ))
# Step 4: Build a Metric to use for Evaluation - Bias for this example.
# You can build your own metrics, but OCW also ships with some common metrics
print("Setting up a Bias metric to use for evaluation")
bias = metrics.Bias()
# Step 5: Create an Evaluation Object using Datasets and our Metric.
# The Evaluation Class Signature is:
# Evaluation(reference, targets, metrics, subregions=None)
# Evaluation can take in multiple targets and metrics, so we need to convert
# our examples into Python lists. Evaluation will iterate over the lists
print("Making the Evaluation definition")
bias_evaluation = evaluation.Evaluation(knmi_dataset, [cru31_dataset], [bias])
print("Executing the Evaluation using the object's run() method")
bias_evaluation.run()
# Step 6: Make a Plot from the Evaluation.results.
# The Evaluation.results are a set of nested lists to support many different
# possible Evaluation scenarios.
#
# The Evaluation results docs say:
# The shape of results is (num_metrics, num_target_datasets) if no subregion
# Accessing the actual results when we have used 1 metric and 1 dataset is
# done this way:
print("Accessing the Results of the Evaluation run")
results = bias_evaluation.results[0][0, :]
# From the bias output I want to make a Contour Map of the region
print("Generating a contour map using ocw.plotter.draw_contour_map()")
lats = new_lats
lons = new_lons
fname = OUTPUT_PLOT
# Using a 1 x 1 since we have a single Bias for the full time range
gridshape = (1, 1)
plot_title = "TASMAX Bias of KNMI Compared to CRU 3.1 (%s - %s)" % (
start_time.strftime("%Y/%d/%m"), end_time.strftime("%Y/%d/%m"))
sub_titles = ["Full Temporal Range"]
plotter.draw_contour_map(results, lats, lons, fname,
gridshape=gridshape, ptitle=plot_title,
subtitles=sub_titles)
| lewismc/climate | examples/knmi_to_cru31_full_bias.py | Python | apache-2.0 | 9,616 | [
"NetCDF"
] | d8d4af7e220268e088ebd3efb3baa872bc8b68680a51baec2036af90fbb27c25 |
#!/usr/bin/env python
"""
list replicas for files in the FileCatalog
"""
import os
import DIRAC
from COMDIRAC.Interfaces import critical, error
from COMDIRAC.Interfaces import DSession
from COMDIRAC.Interfaces import DCatalog
from COMDIRAC.Interfaces import pathFromArgument
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
if __name__ == "__main__":
import sys
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s lfn...' % Script.scriptName,
'Arguments:',
' lfn: logical file name', ] )
)
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
session = DSession()
catalog = DCatalog()
if len( args ) < 1:
error("No argument provided\n%s:" % Script.scriptName)
Script.showHelp()
DIRAC.exit( -1 )
Script.enableCS()
exitCode = 0
for arg in args:
# lfn
lfn = pathFromArgument( session, args[ 0 ] )
#fccli.do_replicas( lfn )
ret = returnSingleResult( catalog.catalog.getReplicas( lfn ) )
if ret['OK']:
replicas = ret['Value']
print lfn + ':'
for se, path in replicas.items():
print ' ', se, path
else:
error( lfn + ': ' + ret['Message'] )
exitCode = -2
DIRAC.exit( exitCode )
| calancha/COMDIRAC | Interfaces/scripts/dreplicas.py | Python | gpl-3.0 | 1,474 | [
"DIRAC"
] | 7fb4eb5259123d070e1ec4115cfb42222b7bc06799badd529f8996c885c819b5 |
#! /usr/bin/env python3
'''
Convert a mesh file to another.
'''
from __future__ import print_function
import numpy
import vtk
import meshio
def _main():
# Parse command line arguments.
args = _parse_options()
# read mesh data
points, cells, point_data, cell_data, field_data = \
meshio.read(args.infile, file_format="vtu-binary")
print('Number of points: {}'.format(len(points)))
print('Elements:')
for tpe, elems in cells.items():
print(' Number of {}s: {}'.format(tpe, len(elems)))
if point_data:
print('Point data: {}'.format(', '.join(point_data.keys())))
cell_data_keys = set()
for cell_type in cell_data:
cell_data_keys = cell_data_keys.union(cell_data[cell_type].keys())
if cell_data_keys:
print('Cell data: {}'.format(', '.join(cell_data_keys)))
if args.prune:
cells.pop('vertex', None)
cells.pop('line', None)
if 'tetra' in cells:
# remove_lower_order_cells
cells.pop('triangle', None)
# remove_orphaned_nodes.
# find which nodes are not mentioned in the cells and remove them
flat_cells = cells['tetra'].flatten()
orphaned_nodes = numpy.setdiff1d(numpy.arange(len(points)), flat_cells)
points = numpy.delete(points, orphaned_nodes, axis=0)
# also adapt the point data
for key in point_data:
point_data[key] = numpy.delete(
point_data[key],
orphaned_nodes,
axis=0
)
# reset GLOBAL_ID
if 'GLOBAL_ID' in point_data:
point_data['GLOBAL_ID'] = numpy.arange(1, len(points)+1)
# We now need to adapt the cells too.
diff = numpy.zeros(len(flat_cells), dtype=flat_cells.dtype)
for orphan in orphaned_nodes:
diff[numpy.argwhere(flat_cells > orphan)] += 1
flat_cells -= diff
cells['tetra'] = flat_cells.reshape(cells['tetra'].shape)
# Some converters (like VTK) require `points` to be contiguous.
points = numpy.ascontiguousarray(points)
# write it out
meshio.write(
args.outfile,
points,
cells,
file_format="gmsh-ascii",
point_data=point_data,
cell_data=cell_data,
field_data=field_data
)
return
def _parse_options():
'''Parse input options.'''
import argparse
parser = argparse.ArgumentParser(
description=(
'Convert vtu to gmsh mesh formats.'
)
)
parser.add_argument(
'infile',
type=str,
help='mesh file to be read from'
)
parser.add_argument(
'outfile',
type=str,
help='mesh file to be written to'
)
parser.add_argument(
'--prune', '-p',
action='store_true',
help='remove lower order cells, remove orphaned nodes'
)
parser.add_argument(
'--version', '-v',
action='version',
version='%(prog)s ' + ('(version %s)' % meshio.__version__)
)
return parser.parse_args()
if __name__ == '__main__':
# python read_vtu.py --input-format vtu-binary --output-format gmsh-binary /home/ksansom/caseFiles/ultrasound/cases/DSI020CALb/vmtk/DSI020CALb_vmtk_decimate_trim_ext2_mesh.vtu /home/ksansom/caseFiles/ultrasound/cases/DSI020CALb/vmtk/DSI020CALb_vmtk_decimate_trim_ext2_mesh.msh
_main()
reader = vtk.vtkXMLUnstructuredGridReader()
reader.SetFileName(fn)
reader.Update()
#want to keep track of cellIds
ids_filter = vtk.vtkIdFilter()
ids_filter.SetInputConnection(reader.GetOutputPort())
#ids_filter.PointIdsOn()
ids_filter.CellIdsOn()
ids_filter.FieldDataOn()
ids_filter.SetIdsArrayName("Ids")
ids_filter.Update()
vtkMesh = ids_filter.GetOutput()
numberOfCellArrays = vtkMesh.GetCellData().GetNumberOfArrays()
cell_entity_id = 0
cell_id_id = 0
arrayNames = []
for i in range(numberOfCellArrays):
arrayNames.append(vtkMesh.GetCellData().GetArrayName(i))
if (arrayNames[-1] == "CellEntityIds"):
cell_entity_id = i
if (arrayNames[-1] == "Ids"):
cell_id_id = i
entity_range = [0., 0.]
vtkMesh.GetCellData().GetArray(cell_entity_id).GetRange(entity_range)
#//vtkCellArray *cells;
#//cells = vtkMesh->GetCells();
begin = int(entity_range[0])
end = int(entity_range[1])
thresh = vtk.vtkThreshold()
thresh.SetInputData(mesh)
thresh.SetInputArrayToProcess(1, 0, 0, vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS, "CellEntityIds")
for j in range(begin, end+1):
thresh.ThresholdBetween(j, j)
thresh.Update()
#pointIdArray = thresh.GetOutput().GetPointData().GetArray(ids_name)
cellIdArray = thresh.GetOutput().GetCellData().GetArray(cell_id_id))
for i in range(cellIdArray.GetNumberOfTuples()):
#id of the cell
cell_id_filt = static_cast<vtkIdType>(std::round(cellIdArray->GetComponent(q, 0)));
# vtkMesh->GetCellPoints(cell_id_filt, npts, ptIds);
# cell_type = vtkMesh->GetCellType(cell_id_filt);
# vtkCellNumPoints = MapVtkCellType(cell_type, nekpp_type);
#
# if (vtkCellNumPoints == -1)
# {
# std::cout << "nonsensical, empty cell" << std::endl;
# continue;
# }
#
# for (j = 0; j < npts - vtkCellNumPoints + 1; ++j)
# {
# // Create element tags
# vector<int> tags;
# tags.push_back(int(q)); // composite
# tags.push_back(nekpp_type); // element type
#
# // Read element node list
# vector<NodeSharedPtr> nodeList;
# for (k = j; k < j + vtkCellNumPoints; ++k)
# {
# nodeList.push_back(m_mesh->m_node[ptIds[k]]);
# }
#
# // Create element
# ElmtConfig conf(nekpp_type, 1, false, false);
# ElementSharedPtr E = GetElementFactory().CreateInstance(
# nekpp_type, conf, nodeList, tags);
#
# // Determine mesh expansion dimension
# if (E->GetDim() > m_mesh->m_expDim)
# {
# m_mesh->m_expDim = E->GetDim();
# }
# m_mesh->m_element[E->GetDim()].push_back(E);
# }
# }
# }
| kayarre/Tools | vtk/read_vtu.py | Python | bsd-2-clause | 6,164 | [
"VTK"
] | dcb357962ac169a3d2eadb40a1d6a327816aafb05c3c0bef905de7aeed61e820 |
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2012 Doug Blank
# Copyright (C) 2013 John Ralls <jralls@ceridwen.us>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Provides constants for other modules
"""
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
import os
import sys
import uuid
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .git_revision import get_git_revision
from .constfunc import get_env_var
from ..version import VERSION, VERSION_TUPLE, major_version
from .utils.resourcepath import ResourcePath
from .utils.grampslocale import GrampsLocale
#-------------------------------------------------------------------------
#
# Gramps program name
#
#-------------------------------------------------------------------------
PROGRAM_NAME = "Gramps"
#-------------------------------------------------------------------------
#
# Standard Gramps Websites
#
#-------------------------------------------------------------------------
URL_HOMEPAGE = "http://gramps-project.org/"
URL_MAILINGLIST = "http://sourceforge.net/mail/?group_id=25770"
URL_BUGHOME = "http://bugs.gramps-project.org"
URL_BUGTRACKER = "http://bugs.gramps-project.org/bug_report_page.php"
URL_WIKISTRING = "http://gramps-project.org/wiki/index.php?title="
URL_MANUAL_PAGE = "Gramps_%s_Wiki_Manual" % major_version
URL_MANUAL_DATA = '%s_-_Entering_and_editing_data:_detailed' % URL_MANUAL_PAGE
URL_MANUAL_SECT1 = '%s_-_part_1' % URL_MANUAL_DATA
URL_MANUAL_SECT2 = '%s_-_part_2' % URL_MANUAL_DATA
URL_MANUAL_SECT3 = '%s_-_part_3' % URL_MANUAL_DATA
WIKI_FAQ = "FAQ"
WIKI_KEYBINDINGS = "Gramps_%s_Wiki_Manual_-_Keybindings" % major_version
WIKI_EXTRAPLUGINS = "%s_Addons" % major_version
WIKI_EXTRAPLUGINS_RAWDATA = "Plugins%s&action=raw" % major_version
#-------------------------------------------------------------------------
#
# Mime Types
#
#-------------------------------------------------------------------------
APP_FAMTREE = 'x-directory/normal'
APP_GRAMPS = "application/x-gramps"
APP_GRAMPS_XML = "application/x-gramps-xml"
APP_GEDCOM = "application/x-gedcom"
APP_GRAMPS_PKG = "application/x-gramps-package"
APP_GENEWEB = "application/x-geneweb"
APP_VCARD = ["text/x-vcard", "text/x-vcalendar"]
#-------------------------------------------------------------------------
#
# Determine the home directory. According to Wikipedia, most UNIX like
# systems use HOME. I'm assuming that this would apply to OS X as well.
# Windows apparently uses USERPROFILE
#
#-------------------------------------------------------------------------
if 'GRAMPSHOME' in os.environ:
USER_HOME = get_env_var('GRAMPSHOME')
HOME_DIR = os.path.join(USER_HOME, 'gramps')
elif 'USERPROFILE' in os.environ:
USER_HOME = get_env_var('USERPROFILE')
if 'APPDATA' in os.environ:
HOME_DIR = os.path.join(get_env_var('APPDATA'), 'gramps')
else:
HOME_DIR = os.path.join(USER_HOME, 'gramps')
else:
USER_HOME = get_env_var('HOME')
HOME_DIR = os.path.join(USER_HOME, '.gramps')
VERSION_DIR = os.path.join(
HOME_DIR, "gramps%s%s" % (VERSION_TUPLE[0], VERSION_TUPLE[1]))
CUSTOM_FILTERS = os.path.join(VERSION_DIR, "custom_filters.xml")
REPORT_OPTIONS = os.path.join(HOME_DIR, "report_options.xml")
TOOL_OPTIONS = os.path.join(HOME_DIR, "tool_options.xml")
ENV_DIR = os.path.join(HOME_DIR, "env")
TEMP_DIR = os.path.join(HOME_DIR, "temp")
THUMB_DIR = os.path.join(HOME_DIR, "thumb")
THUMB_NORMAL = os.path.join(THUMB_DIR, "normal")
THUMB_LARGE = os.path.join(THUMB_DIR, "large")
USER_PLUGINS = os.path.join(VERSION_DIR, "plugins")
USER_CSS = os.path.join(HOME_DIR, "css")
# dirs checked/made for each Gramps session
USER_DIRLIST = (USER_HOME, HOME_DIR, VERSION_DIR, ENV_DIR, TEMP_DIR, THUMB_DIR,
THUMB_NORMAL, THUMB_LARGE, USER_PLUGINS, USER_CSS)
#-------------------------------------------------------------------------
#
# Paths to python modules - assumes that the root directory is one level
# above this one, and that the plugins directory is below the root directory.
#
#-------------------------------------------------------------------------
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
sys.path.insert(0, ROOT_DIR)
git_revision = get_git_revision(ROOT_DIR).replace('\n', '')
if sys.platform == 'win32' and git_revision == "":
git_revision = get_git_revision(os.path.split(ROOT_DIR)[1])
VERSION += git_revision
#VERSION += "-1"
#
# Glade files
#
GLADE_DIR = os.path.join(ROOT_DIR, "gui", "glade")
GLADE_FILE = os.path.join(GLADE_DIR, "gramps.glade")
PERSON_GLADE = os.path.join(GLADE_DIR, "edit_person.glade")
PLUGINS_GLADE = os.path.join(GLADE_DIR, "plugins.glade")
MERGE_GLADE = os.path.join(GLADE_DIR, "mergedata.glade")
RULE_GLADE = os.path.join(GLADE_DIR, "rule.glade")
PLUGINS_DIR = os.path.join(ROOT_DIR, "plugins")
USE_TIPS = False
if sys.platform == 'win32':
USE_THUMBNAILER = False
else:
USE_THUMBNAILER = True
#-------------------------------------------------------------------------
#
# Paths to data files.
#
#-------------------------------------------------------------------------
_resources = ResourcePath()
DATA_DIR = _resources.data_dir
IMAGE_DIR = _resources.image_dir
TIP_DATA = os.path.join(DATA_DIR, "tips.xml")
PAPERSIZE = os.path.join(DATA_DIR, "papersize.xml")
ICON = os.path.join(IMAGE_DIR, "gramps.png")
LOGO = os.path.join(IMAGE_DIR, "logo.png")
SPLASH = os.path.join(IMAGE_DIR, "splash.jpg")
LICENSE_FILE = os.path.join(_resources.doc_dir, 'COPYING')
#-------------------------------------------------------------------------
#
# Gramps environment variables dictionary
#
#-------------------------------------------------------------------------
ENV = {
"USER_HOME": USER_HOME,
"HOME_DIR": HOME_DIR,
"VERSION": VERSION,
"major_version": major_version,
"VERSION_DIR": VERSION_DIR,
"ENV_DIR": ENV_DIR,
"TEMP_DIR": TEMP_DIR,
"THUMB_DIR": THUMB_DIR,
"THUMB_NORMAL": THUMB_NORMAL,
"THUMB_LARGE": THUMB_LARGE,
"USER_PLUGINS": USER_PLUGINS,
"ROOT_DIR": ROOT_DIR,
"GLADE_DIR": GLADE_DIR,
"PLUGINS_DIR": PLUGINS_DIR,
"DATA_DIR": DATA_DIR,
"IMAGE_DIR": IMAGE_DIR,
}
#-------------------------------------------------------------------------
#
# Init Localization
#
#-------------------------------------------------------------------------
GRAMPS_LOCALE = GrampsLocale(localedir=_resources.locale_dir)
_ = GRAMPS_LOCALE.translation.sgettext
GTK_GETTEXT_DOMAIN = 'gtk30'
#-------------------------------------------------------------------------
#
# About box information
#
#-------------------------------------------------------------------------
COPYRIGHT_MSG = "© 2001-2006 Donald N. Allingham\n" \
"© 2007-2017 The Gramps Developers"
COMMENTS = _("Gramps\n (Genealogical Research and Analysis "
"Management Programming System)\n"
"is a personal genealogy program.")
AUTHORS = [
"Alexander Roitman",
"Benny Malengier",
"Brian Matherly",
"Donald A. Peterson",
"Donald N. Allingham",
"David Hampton",
"Martin Hawlisch",
"Richard Taylor",
"Tim Waugh",
"John Ralls"
]
AUTHORS_FILE = os.path.join(DATA_DIR, "authors.xml")
DOCUMENTERS = [
'Alexander Roitman',
]
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
THUMBSCALE = 96.0
THUMBSCALE_LARGE = 180.0
XMLFILE = "data.gramps"
NO_SURNAME = "(%s)" % _("surname|none")
NO_GIVEN = "(%s)" % _("given-name|none")
ARABIC_COMMA = "،"
ARABIC_SEMICOLON = "؛"
DOCGEN_OPTIONS = 'Docgen Options'
COLON = _(':') # translators: needed for French, ignore otherwise
#-------------------------------------------------------------------------
#
# Options Constants
#
#-------------------------------------------------------------------------
LONGOPTS = [
"action=",
"class=",
"config=",
"debug=",
"display=",
"disable-sound",
"disable-crash-dialog",
"enable-sound",
"espeaker=",
"export=",
"force-unlock",
"format=",
"gdk-debug=",
"gdk-no-debug=",
"gtk-debug=",
"gtk-no-debug=",
"gtk-module=",
"g-fatal-warnings",
"help",
"import=",
"load-modules=",
"list"
"name=",
"oaf-activate-iid=",
"oaf-ior-fd=",
"oaf-private",
"open=",
"create=",
"options=",
"screen=",
"show",
"sm-client-id=",
"sm-config-prefix=",
"sm-disable",
"sync",
"remove=",
"usage",
"version",
"yes",
"quiet",
]
SHORTOPTS = "O:C:i:e:f:a:p:d:c:r:lLthuv?syq"
GRAMPS_UUID = uuid.UUID('516cd010-5a41-470f-99f8-eb22f1098ad6')
#-------------------------------------------------------------------------
#
# Fanchart Constants
#
#-------------------------------------------------------------------------
PIXELS_PER_GENERATION = 50 # size of radius for generation
BORDER_EDGE_WIDTH = 10 # empty white box size at edge to indicate parents
CHILDRING_WIDTH = 12 # width of the children ring inside the person
TRANSLATE_PX = 10 # size of the central circle, used to move the chart
PAD_PX = 4 # padding with edges
PAD_TEXT = 2 # padding for text in boxes
BACKGROUND_SCHEME1 = 0
BACKGROUND_SCHEME2 = 1
BACKGROUND_GENDER = 2
BACKGROUND_WHITE = 3
BACKGROUND_GRAD_GEN = 4
BACKGROUND_GRAD_AGE = 5
BACKGROUND_SINGLE_COLOR = 6
BACKGROUND_GRAD_PERIOD = 7
GENCOLOR = {
BACKGROUND_SCHEME1: ((255, 63, 0),
(255, 175, 15),
(255, 223, 87),
(255, 255, 111),
(159, 255, 159),
(111, 215, 255),
(79, 151, 255),
(231, 23, 255),
(231, 23, 121),
(210, 170, 124),
(189, 153, 112)),
BACKGROUND_SCHEME2: ((229, 191, 252),
(191, 191, 252),
(191, 222, 252),
(183, 219, 197),
(206, 246, 209)),
BACKGROUND_WHITE: ((255, 255, 255),
(255, 255, 255),),
}
MAX_AGE = 100
GRADIENTSCALE = 5
FORM_CIRCLE = 0
FORM_HALFCIRCLE = 1
FORM_QUADRANT = 2
COLLAPSED = 0
NORMAL = 1
EXPANDED = 2
TYPE_BOX_NORMAL = 0
TYPE_BOX_FAMILY = 1
| ennoborg/gramps | gramps/gen/const.py | Python | gpl-2.0 | 11,397 | [
"Brian"
] | 018602b1edfd2100505cb60dbaa583df92d16d4313a218f39ab3a6895f7a25ff |
"""Berendsen NVT dynamics class."""
import sys
import numpy as np
from ase.md.md import MolecularDynamics
# For parallel GPAW simulations, the random forces should be distributed.
if '_gpaw' in sys.modules:
# http://wiki.fysik.dtu.dk/gpaw
from gpaw.mpi import world as gpaw_world
else:
gpaw_world = None
class NVTBerendsen(MolecularDynamics):
"""Berendsen (constant N, V, T) molecular dynamics.
Usage: NVTBerendsen(atoms, timestep, temperature, taut, fixcm)
atoms
The list of atoms.
timestep
The time step.
temperature
The desired temperature, in Kelvin.
taut
Time constant for Berendsen temperature coupling.
fixcm
If True, the position and momentum of the center of mass is
kept unperturbed. Default: True.
"""
def __init__(self, atoms, timestep, temperature, taut, fixcm=True,
trajectory=None, logfile=None, loginterval=1,
communicator=gpaw_world):
MolecularDynamics.__init__(self, atoms, timestep, trajectory,
logfile, loginterval)
self.taut = taut
self.temperature = temperature
self.fixcm = fixcm # will the center of mass be held fixed?
self.communicator = communicator
def set_taut(self, taut):
self.taut = taut
def get_taut(self):
return self.taut
def set_temperature(self, temperature):
self.temperature = temperature
def get_temperature(self):
return self.temperature
def set_timestep(self, timestep):
self.dt = timestep
def get_timestep(self):
return self.dt
def scale_velocities(self):
""" Do the NVT Berendsen velocity scaling """
tautscl = self.dt / self.taut
old_temperature = self.atoms.get_temperature()
scl_temperature = np.sqrt(1.0+ (self.temperature/ old_temperature- 1.0)
*tautscl)
#limit the velocity scaling to reasonable values
if scl_temperature > 1.1:
scl_temperature = 1.1
if scl_temperature < 0.9:
scl_temperature = 0.9
atoms = self.atoms
p = self.atoms.get_momenta()
p = scl_temperature * p
self.atoms.set_momenta(p)
return
def step(self, f):
""" move one timestep forward using Berenden NVT molecular dynamics."""
self.scale_velocities()
#one step velocity verlet
atoms = self.atoms
p = self.atoms.get_momenta()
p += 0.5 * self.dt * f
if self.fixcm:
# calculate the center of mass
# momentum and subtract it
psum = p.sum(axis=0) / float(len(p))
p = p - psum
self.atoms.set_positions(self.atoms.get_positions() +
self.dt * p / self.atoms.get_masses()[:,np.newaxis])
# We need to store the momenta on the atoms before calculating
# the forces, as in a parallel Asap calculation atoms may
# migrate during force calculations, and the momenta need to
# migrate along with the atoms. For the same reason, we
# cannot use self.masses in the line above.
self.atoms.set_momenta(p)
f = self.atoms.get_forces()
atoms.set_momenta(self.atoms.get_momenta() + 0.5 * self.dt * f)
return f
| slabanja/ase | ase/md/nvtberendsen.py | Python | gpl-2.0 | 3,395 | [
"ASE",
"GPAW"
] | 0a39c94333889178a8c36a1e7c32e85e44325bd607485782285631564537f669 |
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
import os, sys
import logging
import warnings
import locale
from zeroinstall import localedir
if localedir:
# Tell GTK where to find the translations, if they're not in
# the default system location.
if hasattr(locale, 'bindtextdomain'):
locale.bindtextdomain('zero-install', localedir)
from optparse import OptionParser
from zeroinstall import _, SafeException
from zeroinstall.injector.config import load_config
from zeroinstall.support import tasks
_recalculate = tasks.Blocker('recalculate')
def recalculate():
"""Ask the mainloop to recalculate. If we're already recalculating, wait for that to finish
and then do it again."""
global _recalculate
_recalculate.trigger()
_recalculate = tasks.Blocker('recalculate')
def check_gui():
"""Returns True if the GUI works, or returns an exception if not."""
if sys.version_info[0] < 3:
try:
import pygtk; pygtk.require('2.0')
except ImportError as ex:
logging.info("No GUI available", exc_info = ex)
return ex
try:
if sys.version_info[0] > 2:
from zeroinstall.gtkui import pygtkcompat
pygtkcompat.enable()
pygtkcompat.enable_gtk(version = '3.0')
import gtk
except (ImportError, ValueError, RuntimeError) as ex:
logging.info("No GUI available", exc_info = ex)
return ex
if gtk.gdk.get_display() is None:
return SafeException("Failed to connect to display.")
return True
_gui_available = None
def gui_is_available(force_gui):
"""True if we have a usable GUI. False to fallback on console mode.
If force_gui is True, raise an exception if the GUI is missing."""
global _gui_available
if _gui_available is None:
with warnings.catch_warnings():
if not force_gui:
warnings.filterwarnings("ignore")
_gui_available = check_gui()
if _gui_available is True:
return True
if force_gui:
raise _gui_available
return False
class OCamlDriver:
def __init__(self, config):
self.config = config
self.watchers = []
def set_selections(self, ready, tree, sels):
self.ready = ready
self.tree = tree
self.sels = sels
for w in self.watchers: w()
def open_gui(args):
parser = OptionParser(usage=_("usage: %prog [options] interface"))
parser.add_option("", "--before", help=_("choose a version before this"), metavar='VERSION')
parser.add_option("", "--cpu", help=_("target CPU type"), metavar='CPU')
parser.add_option("", "--command", help=_("command to select"), metavar='COMMAND')
parser.add_option("-d", "--download-only", help=_("fetch but don't run"), action='store_true')
parser.add_option("-g", "--force-gui", help=_("display an error if there's no GUI"), action='store_true')
parser.add_option("", "--message", help=_("message to display when interacting with user"))
parser.add_option("", "--not-before", help=_("minimum version to choose"), metavar='VERSION')
parser.add_option("", "--os", help=_("target operation system type"), metavar='OS')
parser.add_option("-r", "--refresh", help=_("check for updates of all interfaces"), action='store_true')
parser.add_option("", "--select-only", help=_("only download the feeds"), action='store_true')
parser.add_option("-s", "--source", help=_("select source code"), action='store_true')
parser.add_option("", "--systray", help=_("download in the background"), action='store_true')
parser.add_option("-v", "--verbose", help=_("more verbose output"), action='count')
parser.add_option("-V", "--version", help=_("display version information"), action='store_true')
parser.add_option("", "--version-for", help=_("set version constraints for a specific interface"),
nargs=2, metavar='URI RANGE', action='append')
parser.add_option("", "--with-store", help=_("add an implementation cache"), action='append', metavar='DIR')
parser.disable_interspersed_args()
(options, args) = parser.parse_args(args)
if options.verbose:
logger = logging.getLogger()
if options.verbose == 1:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.DEBUG)
if options.version:
from zeroinstall.gui import gui
print("0launch-gui (zero-install) " + gui.version)
print("Copyright (C) 2010 Thomas Leonard")
print(_("This program comes with ABSOLUTELY NO WARRANTY,"
"\nto the extent permitted by law."
"\nYou may redistribute copies of this program"
"\nunder the terms of the GNU Lesser General Public License."
"\nFor more information about these matters, see the file named COPYING."))
sys.exit(0)
if not gui_is_available(options.force_gui):
sys.exit(100)
from zeroinstall.gui import gui
handler = gui.GUIHandler()
config = load_config(handler)
assert len(args) > 0
interface_uri = args[0]
if len(args) > 1:
parser.print_help()
sys.exit(1)
from zeroinstall.gui import mainwindow, dialog
widgets = dialog.Template('main')
root_iface = config.iface_cache.get_interface(interface_uri)
finished = tasks.Blocker("GUI finished")
def resolve(result):
finished.gui_result = result
finished.trigger()
driver = OCamlDriver(config)
window = mainwindow.MainWindow(driver, widgets, download_only = bool(options.download_only), resolve = resolve, select_only = bool(options.select_only))
handler.mainwindow = window
if options.message:
window.set_message(options.message)
window.window.connect('destroy', lambda w: handler.abort_all_downloads())
if options.systray:
window.use_systray_icon(root_iface)
logger = logging.getLogger()
def prepare_for_recalc(force_refresh):
window.refresh_button.set_sensitive(False)
window.browser.set_update_icons(force_refresh)
if not window.systray_icon:
window.show()
force_refresh = bool(options.refresh)
prepare_for_recalc(force_refresh)
# Called each time a complete solve_with_downloads is done.
@tasks.async
def run_gui(reply_holder):
window.refresh_button.set_sensitive(True)
window.browser.highlight_problems()
if window.systray_icon and window.systray_icon.get_visible() and \
window.systray_icon.is_embedded():
if driver.ready:
window.systray_icon.set_tooltip(_('Downloading updates for %s') % root_iface.get_name())
window.run_button.set_active(True)
else:
# Should already be reporting an error, but
# blink it again just in case
window.systray_icon.set_blinking(True)
refresh_clicked = dialog.ButtonClickedBlocker(window.refresh_button)
yield refresh_clicked, _recalculate, finished
if finished.happened:
reply_holder.append([finished.gui_result])
else:
reply_holder.append(["recalculate", refresh_clicked.happened])
prepare_for_recalc(refresh_clicked.happened)
return (run_gui, driver)
| afb/0install | zeroinstall/gui/main.py | Python | lgpl-2.1 | 6,705 | [
"VisIt"
] | 774c42108419fd9d3636b6a515f7e094a213266db5411f6120c07b6f2a68327e |
"""
============================================
client - Client Manager for the Pushover API
============================================
This module defines classes and functions necessary to act as a Client to the Pushover servers. For more details about
the Pushover API for clients visit `their site <https://pushover.net/api/client>`_
Creating a client:
------------------
For the first time, creating a client requires the following steps:
1. Create an object of class type ClientManager and pass in your app token
2. Have the user login to the Pushover service with their email and password
3. Register your client service as a new device
While doing these steps, you'll receive a 'secret' and 'device_id'. These are return with the `ClientManager.login`
and ``ClientManager.register_device`` methods. They are also stored in the `secret` and `device_id` properties. This
secret and device id MUST be stored in a safe location if stored at all.
Here is an example:
>>> import pypushover as py_po
>>> cm = py_po.client.ClientManager('<app token>')
>>> secret = cm.login('user email', 'user pass')
>>> device_id = cm.register_device('device_name')
If you already have a secret and device id, then you can pass those into the ClientManager upon creation:
>>> import pypushover as py_po
>>> cm = py_po.client.ClientManager('<app token>', secret='<user secret>', device_id='<device id>')
Retrieving Messages:
--------------------
Messages are retrieved from the Pushover Server by using the `retrieve_message` method. Once called, all messages
stored on the Pushover servers are then stored into the `messages` property. These messages are a list of
dictionaries with items as [defined in the Pushover API](https://pushover.net/api/client#download).
>>> cm.retrieve_message()
>>> for msg in cm.messages:
... print(msg['message'])
Clearing Messages from Pushover Server:
---------------------------------------
Messages stored on the Pushover Server should be cleared after being presented to the user. This is done using the
`clear_server_messages` method. Note: This only clears out the messages on Pushover's servers and not the local
copy stored in the objects `messages` property.
>>> cm.clear_server_messages()
Acknowledge an Emergency Message:
---------------------------------
If an emergency priority message is received, the Pushover Server should be acknowledged of that receipt per [their
API guidelines](https://pushover.net/api/client#p2). Once the user has acknowledged the message, using the
`acknowledge_message` method passing in the emergency messages `receipt`.
>>> cm.retrieve_message()
>>> for msg in cm.messages:
... print(msg['message'])
... if msg['priority'] == py_po.PRIORITIES.EMERGENCY:
... cm.acknowledge_message(msg['receipt'])
Listening Servers:
------------------
You can call the `listen` or `listen_async` method to constantly listen and respond to messages. Pass in a function
to these methods that accepts a single input for the received message(s).
Using the `listen` method is a Blocking method that will continually run until interrupted either manually (Ctrl+c)
or through and unrecoverable loss in connection to the Pushover Servers.
>>> def print_msg(messages):
... for msg in messages:
... print(msg['message'])
>>> cm.listen(print_msg)
Using the `listen_async` method is a non-blocking method that will continually run until interrupted using the
`stop_listening` method.
>>> cm.listen_async(print_msg)
>>> time.sleep(30)
>>> cm.stop_listening
"""
__all__ = ('ClientManager', )
import websocket
import logging
from multiprocessing import Process, Pipe
from pypushover import BaseManager, send, base_url
logging.getLogger(__name__).addHandler(logging.NullHandler())
class ClientManager(BaseManager):
"""
Manages the interface between the Pushover Servers and user. This can be instantiated with or without the user
secret and device id. If no secret is provided, the user MUST login before interfacing with the Pushover servers.
If no device id is provided, the user MUST register this client as a device before interfacing with the Pushover
servers.
"""
_login_url = base_url + "users/login.json"
_register_device_url = base_url + "devices.json"
_message_url = base_url + "messages.json"
_del_message_url = base_url + "devices/{device_id}/update_highest_message.json"
_ack_message_url = base_url + "receipts/{receipt_id}/acknowledge.json"
_ws_connect_url = "wss://client.pushover.net/push"
_ws_login = "login:{device_id}:{secret}\n"
def __init__(self, app_token, secret=None, device_id=None):
"""
:param str app_token: application id from Pushover API
:param str secret: (Optional) user secret given after validation of login
:param str device_id: (Optional) device id of this client
:return:
"""
super(ClientManager, self).__init__(app_token)
self.__secret__ = secret
self.__device_id__ = device_id
self.messages = []
self._ws_app = websocket.WebSocketApp(
self._ws_connect_url,
on_open=self._on_ws_open,
on_message=self._on_ws_message,
on_error=self._on_ws_error,
on_close=self._on_ws_close
)
self.__on_msg_receipt__ = None
self.__p__ = None
@property
def secret(self):
return self.__secret__
@property
def device_id(self):
return self.__device_id__
def login(self, email, password):
"""
Logs into the Pushover server with the user's email and password. Retrieves a secret key, stores it, and then
returns it.
:param email: the users email
:param password: the users password
:return:
"""
params = {
'email': email,
'password': password
}
self.latest_response_dict = send(self._login_url, data_out=params)
self.__secret__ = self.latest_response_dict['secret']
return self.__secret__
def register_device(self, name):
"""
Registers the device (this client) with the name of `name`. The devices id is then stored and returned.
:param str name: Name of the device to register
:return string: device_id of the device registered
"""
params = {
'secret': self.__secret__,
'name': name,
'os': 'O'
}
self.latest_response_dict = send(self._register_device_url, data_out=params)
self.__device_id__ = self.latest_response_dict['id']
return self.__device_id__
def retrieve_message(self):
"""
Retrieves messages stored on the Pushover servers and saves them into the `messages` property.
"""
params = {
'secret': self.__secret__,
'device_id': self.__device_id__
}
self.latest_response_dict = send(self._message_url, data_out=params, get_method=True)
self.messages = self.latest_response_dict['messages']
def clear_server_messages(self):
"""
Clears the messages stored on Pushover servers.
"""
if self.messages:
params = {
'secret': self.__secret__,
'message': max([i['id'] for i in self.messages])
}
self.latest_response_dict = send(self._del_message_url.format(device_id=self.__device_id__), params)
def acknowledge_message(self, receipt):
"""
Sends an acknowledgement to the server that the message was read.
:param receipt: receipt of the message to ack
"""
params = {
'secret': self.__secret__
}
self.latest_response_dict = send(self._ack_message_url.format(receipt_id=receipt), params)
def listen(self, on_msg_receipt):
"""
Listens for messages from the server. When a message is received, a call to the on_msg_receipt function with a
single parameter representing the messages received.
:param on_msg_receipt: function to call when a message is received
"""
self.__on_msg_receipt__ = on_msg_receipt
self._ws_app.run_forever()
def listen_async(self, on_msg_receipt):
"""
Creates a Process for listening to the Pushover server for new messages. This process then listens for messages
from the server. When a message is received, a call to the on_msg_receipt function with a single parameter
representing the messages received.
:param on_msg_receipt: function to call when a message is received
"""
self.__p__ = Process(target=self.listen, args=(on_msg_receipt,))
self.__p__.start()
def stop_listening(self):
"""
Stops the listening process from accepting any more messages.
"""
if self.__p__:
self.__p__.terminate()
self.__p__ = None
def _on_ws_open(self, ws):
"""
Function used when the websocket is opened for the first time.
:param ws: the websocket
"""
logging.info("Opening connection to Pushover server...")
ws.send(self._ws_login.format(device_id=self.__device_id__, secret=self.__secret__))
logging.info("----Server Connection Established----")
def _on_ws_message(self, ws, message):
"""
Function used for when the websocket recieves a message. Per the Pushover API guidelines 1 of 4 responses
will be sent:
1. `#` - Keep-alive packet, no response needed.
2. `!` - A new message has arrived; you should perform a sync.
3. `R` - Reload request; you should drop your connection and re-connect.
4. `E` - Error; a permanent problem occured and you should not automatically re-connect.
Prompt the user to login again or re-enable the device.
:param ws: the websocket
:param message: message received from remote server
"""
message = message.decode("utf-8")
logging.debug("Message received: " + message)
if message == "#":
pass
elif message == "!":
self.retrieve_message()
if self.__on_msg_receipt__:
self.__on_msg_receipt__(self.messages)
elif message == "R":
logging.info("Reconnecting to server (requested from server)...")
ws.close()
self.listen(self.__on_msg_receipt__)
elif message == "E":
logging.error("Server connection failure!")
else: # message isn't of the type expected. Raise an error.
raise NotImplementedError #todo Implement an appropriate exception
def _on_ws_error(self, ws, error):
"""
Function used when the websocket encounters an error. The error is logged
:param ws: the websocket
:param error: the error encountered
"""
logging.error('Error: ' + error)
def _on_ws_close(self, ws):
"""
Function used when the websocket closes the connection to the remote server.
:param ws: the websocket
"""
logging.info("----Server Connection Closed----")
self._ws_app = None
| KronosKoderS/pypushover | pypushover/client.py | Python | mit | 11,439 | [
"VisIt"
] | 8916cb38bef4c9c9aad185cbcc5cb6429275aca8b12874599fe31860489388a9 |
from setuptools import find_packages, setup
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
long_description = """
Pipelign
--------
An automated pipeline for multiple sequence alignment.
"""
setup(
name='pipelign',
version='0.1',
author='Mukarram Hossain',
author_email='asmmh2@cam.ac.uk',
packages=find_packages(exclude=[dir_path+'/docs']),
url='https://github.com/asmmhossain/pipelign',
license='MIT',
description='A pipeline for automated alignment',
long_description=long_description,
install_requires=[
'biopython',
'ete3'
],
scripts=[
dir_path+'/bin/pipelign',
dir_path+'/bin/gb2fas'
]
)
| asmmhossain/pipelign | setup.py | Python | mit | 700 | [
"Biopython"
] | 4c08bd95e67245e353f16fbb073f380fba1480a736d75c51a9328d3ff5dbb1af |
"""
This module contains functions needed to evaluate post selection
p-values for non polyhedral selection procedures through a variety of means.
These p-values appear for the group LASSO global null test as well as the nuclear norm
p-value test.
They are described in the `Kac Rice`_ paper.
.. _Kac Rice: http://arxiv.org/abs/1308.3020
.. _Spacings: http://arxiv.org/abs/1401.3889
.. _post selection LASSO: http://arxiv.org/abs/1311.6238
"""
import numpy as np
from scipy.stats import chi
from scipy.stats import norm as ndist, truncnorm
from scipy.integrate import quad
from mpmath import mp
mp.dps = 80
def norm_q(prob):
r"""
A multi-precision calculation of the
standard normal quantile function:
.. math::
\int_{-\infty}^{q(p)} \frac{e^{-z^2/2}}{\sqrt{2\pi}} \; dz = p
where $p$ is `prob`.
Parameters
----------
prob : float
Returns
-------
quantile : float
"""
return np.array(mp.erfinv(2*prob-1)*mp.sqrt(2))
def norm_pdf(observed):
r"""
A multi-precision calculation of the
standard normal density function:
.. math::
\frac{e^{-T^2/2}}{\sqrt{2\pi}}
where `T` is observed.
Parameters
----------
observed : float
Returns
-------
density : float
"""
return np.array(mp.npdf(observed))
def norm_interval(lower, upper):
r"""
A multiprecision evaluation of
.. math::
\Phi(U) - \Phi(L)
Parameters
----------
lower : float
The lower limit $L$
upper : float
The upper limit $U$
"""
if lower > 0 and upper > 0:
return mp.ncdf(-lower) - mp.ncdf(-upper)
else:
return mp.ncdf(upper) - mp.ncdf(lower)
def truncnorm_cdf(observed, lower, upper):
r"""
Compute the truncated normal
distribution function.
.. math::
\frac{\Phi(U) - \Phi(T)}{\Phi(U) - \Phi(L)}
where $T$ is `observed`, $L$ is `lower_bound` and $U$ is `upper_bound`.
Parameters
----------
observed : float
lower : float
upper : float
Returns
-------
P : float
"""
x, a, b = observed, lower, upper
x = max(x, a)
x = min(x, b)
if a > 0 and b > 0:
Fx, Fa, Fb = mp.ncdf(-x), mp.ncdf(-a), mp.ncdf(-b)
return float( ( Fa - Fx ) / ( Fa - Fb ) )
else:
Fx, Fa, Fb = mp.ncdf(x), mp.ncdf(a), mp.ncdf(b)
return float( ( Fx - Fa ) / ( Fb - Fa ) )
def chi_pvalue(observed, lower_bound, upper_bound, sd, df, method='MC', nsim=1000):
r"""
Compute a truncated $\chi$ p-value based on the
conditional survival function.
Parameters
----------
observed : float
lower_bound : float
upper_bound : float
sd : float
Standard deviation.
df : float
Degrees of freedom.
method: string
One of ['MC', 'cdf', 'sf']
Returns
-------
pvalue : float
Notes
-----
Let $T$ be `observed`, $L$ be `lower_bound` and $U$ be `upper_bound`,
and $\sigma$ be `sd`.
The p-value, for $L \leq T \leq U$ is
.. math::
\frac{P(\chi^2_k / \sigma^2 \geq T^2) - P(\chi^2_k / \sigma^2 \geq U^2)}
{P(\chi^2_k / \sigma^2 \geq L^2) - P(\chi^2_k / \sigma^2 \geq U^2)}
It can be computed using `scipy.stats.chi` either its `cdf` (distribution
function) or `sf` (survival function) or evaluated
by Monte Carlo if method is `MC`.
"""
L, T, U = lower_bound, observed, upper_bound # shorthand
if method == 'cdf':
pval = ((chi.cdf(U / sd, df) - chi.cdf(T / sd, df)) /
(chi.cdf(U / sd, df) - chi.cdf(L / sd, df)))
elif method == 'sf':
pval = ((chi.sf(U / sd, df) - chi.sf(T / sd, df)) /
(chi.sf(U / sd, df) - chi.sf(L / sd, df)))
elif method == 'MC':
if df == 1:
H = []
else:
H = [0]*(df-1)
pval = general_pvalue(T / sd, L / sd, U / sd, H, nsim=nsim)
else:
raise ValueError('method should be one of ["cdf", "sf", "MC"]')
if pval == 1: # the distribution functions may have failed -- use MC
pval = general_pvalue(T / sd, L / sd, U / sd, H, nsim=50000)
if pval > 1:
pval = 1
return pval
def gauss_poly(lower_bound, upper_bound, curvature, nsim=100):
r"""
Computes the integral of a polynomial times the
standard Gaussian density over an interval.
Introduced in `Kac Rice`_, display (33) of v2.
Parameters
----------
lower_bound : float
upper_bound : float
curvature : np.array
A diagonal matrix related to curvature.
It is assumed that `curvature + lower_bound I` is non-negative definite.
nsim : int
How many draws from $N(0,1)$ should we use?
Returns
-------
integral : float
Notes
-----
The return value is a Monte Carlo estimate of
.. math::
\int_{L}^{U} \det(\Lambda + z I)
\frac{e^{-z^2/2\sigma^2}}{\sqrt{2\pi\sigma^2}} \, dz
where $L$ is `lower_bound`, $U$ is `upper_bound` and $\Lambda$ is the
diagonal matrix `curvature`.
"""
T, H = lower_bound, curvature
Z = np.fabs(np.random.standard_normal(nsim))
keep = Z < upper_bound - T
proportion = keep.sum() * 1. / nsim
Z = Z[keep]
if H != []:
HT = H + T
exponent = np.log(np.fabs(np.add.outer(Z, HT))).sum(1) - T*Z - T**2/2.
else:
exponent = - T*Z - T**2/2.
C = exponent.max()
return np.exp(exponent - C).mean() * proportion, C
def general_pvalue(observed, lower_bound, upper_bound, curvature, nsim=100):
r"""
Computes the integral of a polynomial times the
standard Gaussian density over an interval.
Introduced in `Kac Rice`_, display (35) of v2.
Parameters
----------
observed : float
lower_bound : float
upper_bound : float
curvature : np.array
A diagonal matrix related to curvature.
It is assumed that `curvature + lower_bound I` is non-negative definite.
nsim : int
How many draws from $N(0,1)$ should we use?
Returns
-------
integral : float
Notes
-----
The return value is a Monte Carlo estimate of
.. math::
\frac{\int_{T}^{U} \det(\Lambda + z I)
\frac{e^{-z^2/2\sigma^2}}{\sqrt{2\pi\sigma^2}} \, dz}
{\int_{L}^{U} \det(\Lambda + z I)
\frac{e^{-z^2/2\sigma^2}}{\sqrt{2\pi\sigma^2}} \, dz}
where $T$ is `observed`, $L$ is `lower_bound`,
$U$ is `upper_bound` and $\Lambda$ is the
diagonal matrix `curvature`.
"""
exponent_1, C1 = gauss_poly(observed, upper_bound, curvature, nsim=nsim)
exponent_2, C2 = gauss_poly(lower_bound, upper_bound, curvature, nsim=nsim)
return np.exp(C1-C2) * exponent_1 / exponent_2
class SelectionInterval(object):
"""
Compute a selection interval for
a Gaussian truncated to an interval.
"""
def __init__(self, lower_bound, observed, upper_bound, sigma):
self.lower_bound, self.observed, self.upper_bound, self.sigma = lower_bound, observed, upper_bound, sigma
def pivot(self, exp):
L, obs, U, sd = self.lower_bound, self.observed, self.upper_bound, self.sigma
return truncnorm_cdf((obs-exp)/sd, (L-exp)/sd, (U-exp)/sd)
def conf_int(self, lb, ub, alpha=0.05):
F = lambda exp: self.pivot(exp)
L = _find_root(F, 1.0 - 0.5 * alpha, lb, ub)
U = _find_root(F, 0.5 * alpha, L, ub)
return L, U
def _find_root(f, y, lb, ub, tol=1e-6):
"""
searches for solution to f(x) = y in (lb, ub), where
f is a monotone decreasing function
"""
# make sure solution is in range
a, b = lb, ub
fa, fb = f(a), f(b)
# assume a < b
if fa > y and fb > y:
while fb > y :
b, fb = b + (b-a), f(b + (b-a))
elif fa < y and fb < y:
while fa < y :
a, fa = a - (b-a), f(a - (b-a))
# determine the necessary number of iterations
max_iter = int( np.ceil( ( np.log(tol) - np.log(b-a) ) / np.log(0.5) ) )
# bisect (slow but sure) until solution is obtained
for _ in xrange(max_iter):
c, fc = (a+b)/2, f((a+b)/2)
if fc > y: a = c
elif fc < y: b = c
return c
| stefanv/selective-inference | selection/distributions/pvalue.py | Python | bsd-3-clause | 8,308 | [
"Gaussian"
] | d654d341faccb140f874f43619eaebf541bb637fe3564cdc6f11fe0a78f0b97a |
# -*- encoding: utf-8 -*-
"""
Gaussian optics.
The module implements:
Ray transfer matrices for geometrical and gaussian optics
See RayTransferMatrix, GeometricRay and BeamParameter
Conjugation relations for geometrical and gaussian optics
See geometric_conj*, gauss_conj and conjugate_gauss_beams
The conventions for the distances are as follows:
focal distance - positive for convergent lenses
object distance - positive for real objects
image distance - positive for real images
"""
from sympy import (atan2, Expr, I, im, Matrix, oo, pi, re, sqrt, sympify,
together)
from sympy.utilities.misc import filldedent
###
# A, B, C, D matrices
###
class RayTransferMatrix(Matrix):
"""
Base class for a Ray Transfer Matrix.
It should be used if there isn't already a more specific subclass mentioned
in See Also.
Parameters
==========
parameters A, B, C and D or 2x2 matrix (Matrix(2, 2, [A, B, C, D]))
Examples
=======
>>> from sympy.physics.gaussopt import RayTransferMatrix, ThinLens
>>> from sympy import Symbol, Matrix
>>> mat = RayTransferMatrix(1, 2, 3, 4)
>>> mat
[1, 2]
[3, 4]
>>> RayTransferMatrix(Matrix([[1, 2], [3, 4]]))
[1, 2]
[3, 4]
>>> mat.A
1
>>> f = Symbol('f')
>>> lens = ThinLens(f)
>>> lens
[ 1, 0]
[-1/f, 1]
>>> lens.C
-1/f
See Also
========
GeometricRay, BeamParameter,
FreeSpace, FlatRefraction, CurvedRefraction,
FlatMirror, CurvedMirror, ThinLens
References
==========
[1] http://en.wikipedia.org/wiki/Ray_transfer_matrix_analysis
"""
def __new__(cls, *args):
if len(args) == 4:
temp = ((args[0], args[1]), (args[2], args[3]))
elif len(args) == 1 \
and isinstance(args[0], Matrix) \
and args[0].shape == (2, 2):
temp = args[0]
else:
raise ValueError(filldedent('''
Expecting 2x2 Matrix or the 4 elements of
the Matrix but got %s''' % str(args)))
return Matrix.__new__(cls, temp)
def __mul__(self, other):
if isinstance(other, RayTransferMatrix):
return RayTransferMatrix(Matrix.__mul__(self, other))
elif isinstance(other, GeometricRay):
return GeometricRay(Matrix.__mul__(self, other))
elif isinstance(other, BeamParameter):
temp = self*Matrix(((other.q,), (1,)))
q = (temp[0]/temp[1]).expand(complex=True)
return BeamParameter(other.wavelen, \
together(re(q)), \
z_r = together(im(q)))
else:
return Matrix.__mul__(self, other)
@property
def A(self):
"""
The A parameter of the Matrix.
Examples
========
>>> from sympy.physics.gaussopt import RayTransferMatrix
>>> mat = RayTransferMatrix(1, 2, 3, 4)
>>> mat.A
1
"""
return self[0, 0]
@property
def B(self):
"""
The B parameter of the Matrix.
Examples
========
>>> from sympy.physics.gaussopt import RayTransferMatrix
>>> mat = RayTransferMatrix(1, 2, 3, 4)
>>> mat.B
2
"""
return self[0, 1]
@property
def C(self):
"""
The C parameter of the Matrix.
Examples
========
>>> from sympy.physics.gaussopt import RayTransferMatrix
>>> mat = RayTransferMatrix(1, 2, 3, 4)
>>> mat.C
3
"""
return self[1, 0]
@property
def D(self):
"""
The D parameter of the Matrix.
Examples
========
>>> from sympy.physics.gaussopt import RayTransferMatrix
>>> mat = RayTransferMatrix(1, 2, 3, 4)
>>> mat.D
4
"""
return self[1, 1]
class FreeSpace(RayTransferMatrix):
"""
Ray Transfer Matrix for free space.
Parameters
==========
distance
See Also
========
RayTransferMatrix
Examples
========
>>> from sympy.physics.gaussopt import FreeSpace
>>> from sympy import symbols
>>> d = symbols('d')
>>> FreeSpace(d)
[1, d]
[0, 1]
"""
def __new__(cls, d):
return RayTransferMatrix.__new__(cls, 1, d, 0, 1)
class FlatRefraction(RayTransferMatrix):
"""
Ray Transfer Matrix for refraction.
Parameters
==========
n1: refractive index of one medium
n2: refractive index of other medium
See Also
========
RayTransferMatrix
Examples
========
>>> from sympy.physics.gaussopt import FlatRefraction
>>> from sympy import symbols
>>> n1, n2 = symbols('n1 n2')
>>> FlatRefraction(n1, n2)
[1, 0]
[0, n1/n2]
"""
def __new__(cls, n1, n2):
n1, n2 = sympify((n1, n2))
return RayTransferMatrix.__new__(cls, 1, 0, 0, n1/n2)
class CurvedRefraction(RayTransferMatrix):
"""
Ray Transfer Matrix for refraction on curved interface.
Parameters
==========
R: radius of curvature (positive for concave),
n1: refractive index of one medium
n2: refractive index of other medium
See Also
========
RayTransferMatrix
Examples
========
>>> from sympy.physics.gaussopt import CurvedRefraction
>>> from sympy import symbols
>>> R, n1, n2 = symbols('R n1 n2')
>>> CurvedRefraction(R, n1, n2)
[ 1, 0]
[(n1 - n2)/(R*n2), n1/n2]
"""
def __new__(cls, R, n1, n2):
R, n1 , n2 = sympify((R, n1, n2))
return RayTransferMatrix.__new__(cls, 1, 0, (n1-n2)/R/n2, n1/n2)
class FlatMirror(RayTransferMatrix):
"""
Ray Transfer Matrix for reflection.
See Also: RayTransferMatrix
Examples
========
>>> from sympy.physics.gaussopt import FlatMirror
>>> FlatMirror()
[1, 0]
[0, 1]
"""
def __new__(cls):
return RayTransferMatrix.__new__(cls, 1, 0, 0, 1)
class CurvedMirror(RayTransferMatrix):
"""
Ray Transfer Matrix for reflection from curved surface.
Parameters
==========
radius of curvature (positive for concave)
See Also
========
RayTransferMatrix
Examples
========
>>> from sympy.physics.gaussopt import CurvedMirror
>>> from sympy import symbols
>>> R = symbols('R')
>>> CurvedMirror(R)
[ 1, 0]
[-2/R, 1]
"""
def __new__(cls, R):
R = sympify(R)
return RayTransferMatrix.__new__(cls, 1, 0, -2/R, 1)
class ThinLens(RayTransferMatrix):
"""
Ray Transfer Matrix for a thin lens.
Parameters
==========
the focal distance
See Also
========
RayTransferMatrix
Examples
========
>>> from sympy.physics.gaussopt import ThinLens
>>> from sympy import symbols
>>> f = symbols('f')
>>> ThinLens(f)
[ 1, 0]
[-1/f, 1]
"""
def __new__(cls, f):
f = sympify(f)
return RayTransferMatrix.__new__(cls, 1, 0, -1/f, 1)
###
# Representation for geometric ray
###
class GeometricRay(Matrix):
"""
Representation for a geometric ray in the Ray Transfer Matrix formalism.
Parameters
==========
height and angle or 2x1 matrix (Matrix(2, 1, [height, angle]))
Examples
=======
>>> from sympy.physics.gaussopt import GeometricRay, FreeSpace
>>> from sympy import symbols, Matrix
>>> d, h, angle = symbols('d, h, angle')
>>> GeometricRay(h, angle)
[ h]
[angle]
>>> FreeSpace(d)*GeometricRay(h, angle)
[angle*d + h]
[ angle]
>>> GeometricRay( Matrix( ((h,), (angle,)) ) )
[ h]
[angle]
See Also
========
RayTransferMatrix
"""
def __new__(cls, *args):
if len(args) == 1 and isinstance(args[0], Matrix) \
and args[0].shape == (2, 1):
temp = args[0]
elif len(args) == 2:
temp = ((args[0],), (args[1],))
else:
raise ValueError(filldedent('''
Expecting 2x1 Matrix or the 2 elements of
the Matrix but got %s''' % str(args)))
return Matrix.__new__(cls, temp)
@property
def height(self):
"""
The distance from the optical axis.
Examples
========
>>> from sympy.physics.gaussopt import GeometricRay
>>> from sympy import symbols
>>> h, angle = symbols('h, angle')
>>> gRay = GeometricRay(h, angle)
>>> gRay.height
h
"""
return self[0]
@property
def angle(self):
"""
The angle with the optical axis.
Examples
========
>>> from sympy.physics.gaussopt import GeometricRay
>>> from sympy import symbols
>>> h, angle = symbols('h, angle')
>>> gRay = GeometricRay(h, angle)
>>> gRay.angle
angle
"""
return self[1]
###
# Representation for gauss beam
###
class BeamParameter(Expr):
"""
Representation for a gaussian ray in the Ray Transfer Matrix formalism.
Parameters
==========
wavelength, distance to waist, and w (waist) or z_r (rayleigh range)
Examples
========
>>> from sympy.physics.gaussopt import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.q
1 + 1.88679245283019*I*pi
>>> p.q.n()
1.0 + 5.92753330865999*I
>>> p.w_0.n()
0.00100000000000000
>>> p.z_r.n()
5.92753330865999
>>> from sympy.physics.gaussopt import FreeSpace
>>> fs = FreeSpace(10)
>>> p1 = fs*p
>>> p.w.n()
0.00101413072159615
>>> p1.w.n()
0.00210803120913829
See Also
========
RayTransferMatrix
References
==========
[1] http://en.wikipedia.org/wiki/Complex_beam_parameter
"""
#TODO A class Complex may be implemented. The BeamParameter may
# subclass it. See:
# https://groups.google.com/d/topic/sympy/7XkU07NRBEs/discussion
__slots__ = ['z', 'z_r', 'wavelen']
def __new__(cls, wavelen, z, **kwargs):
wavelen, z = sympify((wavelen, z))
inst = Expr.__new__(cls, wavelen, z, **kwargs)
inst.wavelen = wavelen
inst.z = z
if len(kwargs) !=1:
raise ValueError('Constructor expects exactly one named argument.')
elif 'z_r' in kwargs:
inst.z_r = sympify(kwargs['z_r'])
elif 'w' in kwargs:
inst.z_r = waist2rayleigh(sympify(kwargs['w']), wavelen)
else:
raise ValueError('The constructor needs named argument w or z_r')
return inst
@property
def q(self):
"""
The complex parameter representing the beam.
Examples
========
>>> from sympy.physics.gaussopt import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.q
1 + 1.88679245283019*I*pi
"""
return self.z + I*self.z_r
@property
def radius(self):
"""
The radius of curvature of the phase front.
Examples
========
>>> from sympy.physics.gaussopt import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.radius
0.2809/pi**2 + 1
"""
return self.z*(1+(self.z/self.z_r)**2)
@property
def w(self):
"""
The beam radius at 1/e^2 intensity.
See Also
========
w_0: minimal radius of beam
Examples
========
>>> from sympy.physics.gaussopt import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.w
0.001*sqrt(0.2809/pi**2 + 1)
"""
return self.w_0*sqrt(1+(self.z/self.z_r)**2)
@property
def w_0(self):
"""
The beam waist (minimal radius).
See Also
========
w: beam radius at 1/e^2 intensity
Examples
========
>>> from sympy.physics.gaussopt import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.w_0
0.00100000000000000
"""
return sqrt(self.z_r/pi*self.wavelen)
@property
def divergence(self):
"""
Half of the total angular spread.
Examples
========
>>> from sympy.physics.gaussopt import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.divergence
0.00053/pi
"""
return self.wavelen/pi/self.w_0
@property
def gouy(self):
"""
The Gouy phase.
Examples
========
>>> from sympy.physics.gaussopt import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.gouy
atan(0.53/pi)
"""
return atan2(self.z, self.z_r)
@property
def waist_approximation_limit(self):
"""
The minimal waist for which the gauss beam approximation is valid.
The gauss beam is a solution to the paraxial equation. For curvatures
that are too great it is not a valid approximation.
Examples
========
>>> from sympy.physics.gaussopt import BeamParameter
>>> p = BeamParameter(530e-9, 1, w=1e-3)
>>> p.waist_approximation_limit
1.06e-6/pi
"""
return 2*self.wavelen/pi
###
# Utilities
###
def waist2rayleigh(w, wavelen):
"""
Calculate the rayleigh range from the waist of a gaussian beam.
See Also
========
rayleigh2waist, BeamParameter
Examples
========
>>> from sympy.physics.gaussopt import waist2rayleigh
>>> from sympy import symbols
>>> w, wavelen = symbols('w wavelen')
>>> waist2rayleigh(w, wavelen)
pi*w**2/wavelen
"""
w, wavelen = sympify((w, wavelen))
return w**2*pi/wavelen
def rayleigh2waist(z_r, wavelen):
"""Calculate the waist from the rayleigh range of a gaussian beam.
See Also
========
waist2rayleigh, BeamParameter
Examples
========
>>> from sympy.physics.gaussopt import rayleigh2waist
>>> from sympy import symbols
>>> z_r, wavelen = symbols('z_r wavelen')
>>> rayleigh2waist(z_r, wavelen)
sqrt(wavelen*z_r)/sqrt(pi)
"""
z_r, wavelen = sympify((z_r, wavelen))
return sqrt(z_r/pi*wavelen)
def geometric_conj_ab(a, b):
"""
Conjugation relation for geometrical beams under paraxial conditions.
Takes the distances to the optical element and returns the needed
focal distance.
See Also
========
geometric_conj_af, geometric_conj_bf
Examples
========
>>> from sympy.physics.gaussopt import geometric_conj_ab
>>> from sympy import symbols
>>> a, b = symbols('a b')
>>> geometric_conj_ab(a, b)
a*b/(a + b)
"""
a, b = sympify((a, b))
if abs(a) == oo or abs(b) == oo:
return a if abs(b) == oo else b
else:
return a*b/(a+b)
def geometric_conj_af(a, f):
"""
Conjugation relation for geometrical beams under paraxial conditions.
Takes the object distance (for geometric_conj_af) or the image distance
(for geometric_conj_bf) to the optical element and the focal distance.
Then it returns the other distance needed for conjugation.
See Also
========
geometric_conj_ab
Examples
========
>>> from sympy.physics.gaussopt import geometric_conj_af, geometric_conj_bf
>>> from sympy import symbols
>>> a, b, f = symbols('a b f')
>>> geometric_conj_af(a, f)
a*f/(a - f)
>>> geometric_conj_bf(b, f)
b*f/(b - f)
"""
a, f = sympify((a, f))
return -geometric_conj_ab(a, -f)
geometric_conj_bf = geometric_conj_af
def gaussian_conj(s_in, z_r_in, f):
"""
Conjugation relation for gaussian beams.
Parameters
==========
s_in: distance to optical element from the waist
z_r_in: the rayleigh range of the incident beam
f: the focal length of the optical element
Returns
=======
A tuple containing (s_out, z_r_out, m)
- s_out - distance between the new waist and the optical element
- z_r_out - rayleigh range of the emergent beam
- m - the ration between the new and the old waists
Examples
========
>>> from sympy.physics.gaussopt import gaussian_conj
>>> from sympy import symbols
>>> s_in, z_r_in, f = symbols('s_in z_r_in f')
>>> gaussian_conj(s_in, z_r_in, f)[0]
1/(-1/(s_in + z_r_in**2/(-f + s_in)) + 1/f)
>>> gaussian_conj(s_in, z_r_in, f)[1]
z_r_in/(1 - s_in**2/f**2 + z_r_in**2/f**2)
>>> gaussian_conj(s_in, z_r_in, f)[2]
1/sqrt(1 - s_in**2/f**2 + z_r_in**2/f**2)
"""
s_in, z_r_in, f = sympify((s_in, z_r_in, f))
s_out = 1 / ( -1/(s_in + z_r_in**2/(s_in-f)) + 1/f )
m = 1/sqrt((1-(s_in/f)**2) + (z_r_in/f)**2)
z_r_out = z_r_in / ((1-(s_in/f)**2) + (z_r_in/f)**2)
return (s_out, z_r_out, m)
def conjugate_gauss_beams(wavelen, waist_in, waist_out, **kwargs):
"""
Find the optical setup conjugating the object/image waists.
Parameters
==========
wavelen: the wavelength of the beam
waist_in and waist_out: the waists to be conjugated
f: the focal distance of the element used in the conjugation
Returns
=======
A tuple containing (s_in, s_out, f)
- s_in - distance before the optical element
- s_out - distance after the optical element
- f - focal distance of the optical element
Examples
========
>>> from sympy.physics.gaussopt import conjugate_gauss_beams
>>> from sympy import symbols, factor
>>> l, w_i, w_o, f = symbols('l w_i w_o f')
>>> conjugate_gauss_beams(l, w_i, w_o, f=f)[0]
f*(-sqrt(w_i**2/w_o**2 - pi**2*w_i**4/(f**2*l**2)) + 1)
>>> factor(conjugate_gauss_beams(l, w_i, w_o, f=f)[1])
f*w_o**2*(w_i**2/w_o**2 - sqrt(w_i**2/w_o**2 - pi**2*w_i**4/(f**2*l**2)))/w_i**2
>>> conjugate_gauss_beams(l, w_i, w_o, f=f)[2]
f
"""
#TODO add the other possible arguments
wavelen, waist_in, waist_out = sympify((wavelen, waist_in, waist_out))
m = waist_out / waist_in
z = waist2rayleigh(waist_in, wavelen)
if len(kwargs) != 1:
raise ValueError("The function expects only one named argument")
elif 'dist' in kwargs:
raise NotImplementedError(filldedent('''
Currently only focal length is supported as a parameter'''))
elif 'f' in kwargs:
f = sympify(kwargs['f'])
s_in = f * (1 - sqrt(1/m**2 - z**2/f**2))
s_out = gaussian_conj(s_in, z, f)[0]
elif 's_in' in kwargs:
raise NotImplementedError(filldedent('''
Currently only focal length is supported as a parameter'''))
else:
raise ValueError(filldedent('''
The functions expects the focal length as a named argument'''))
return (s_in, s_out, f)
#TODO
#def plot_beam():
# """Plot the beam radius as it propagates in space."""
# pass
#TODO
#def plot_beam_conjugation():
# """
# Plot the intersection of two beams.
#
# Represents the conjugation relation.
#
# See Also
# ========
#
# conjugate_gauss_beams
# """
# pass
| ichuang/sympy | sympy/physics/gaussopt.py | Python | bsd-3-clause | 19,249 | [
"Gaussian"
] | 9b8d191a1397a7c1f5b05ce8ba3b2e3289c960dcd5a59f7c426a6ee24325cb0a |
# The MIT License
#
# Copyright 2014, 2015 Piotr Dabkowski
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
from __future__ import unicode_literals
from .pyjsparserdata import *
from .std_nodes import *
from pprint import pprint
import sys
__all__ = ['PyJsParser', 'parse', 'ENABLE_JS2PY_ERRORS', 'ENABLE_PYIMPORT', 'JsSyntaxError']
REGEXP_SPECIAL_SINGLE = ('\\', '^', '$', '*', '+', '?', '.', '[', ']', '(', ')', '{', '{', '|', '-')
ENABLE_PYIMPORT = False
ENABLE_JS2PY_ERRORS = False
PY3 = sys.version_info >= (3,0)
if PY3:
basestring = str
long = int
xrange = range
unicode = str
ESPRIMA_VERSION = '2.2.0'
DEBUG = False
# Small naming convention changes
# len -> leng
# id -> d
# type -> typ
# str -> st
true = True
false = False
null = None
class PyJsParser:
""" Usage:
parser = PyJsParser()
parser.parse('var JavaScriptCode = 5.1')
"""
def __init__(self):
self.clean()
def test(self, code):
pprint(self.parse(code))
def clean(self):
self.strict = None
self.sourceType = None
self.index = 0
self.lineNumber = 1
self.lineStart = 0
self.hasLineTerminator = None
self.lastIndex = None
self.lastLineNumber = None
self.lastLineStart = None
self.startIndex = None
self.startLineNumber = None
self.startLineStart = None
self.scanning = None
self.lookahead = None
self.state = None
self.extra = None
self.isBindingElement = None
self.isAssignmentTarget = None
self.firstCoverInitializedNameError = None
# 7.4 Comments
def skipSingleLineComment(self, offset):
start = self.index - offset;
while self.index < self.length:
ch = self.source[self.index];
self.index += 1
if isLineTerminator(ch):
if (ord(ch) == 13 and ord(self.source[self.index]) == 10):
self.index += 1
self.lineNumber += 1
self.hasLineTerminator = True
self.lineStart = self.index
return {
'type': 'Line',
'value': self.source[start + offset:self.index-2],
'leading': True,
'trailing': False,
'loc': None,
}
def skipMultiLineComment(self):
start = self.index
while self.index < self.length:
ch = ord(self.source[self.index])
if isLineTerminator(ch):
if (ch == 0x0D and ord(self.source[self.index + 1]) == 0x0A):
self.index += 1
self.lineNumber += 1
self.index += 1
self.hasLineTerminator = True
self.lineStart = self.index
elif ch == 0x2A:
# Block comment ends with '*/'.
if ord(self.source[self.index + 1]) == 0x2F:
self.index += 2
return {
'type': 'Block',
'value': self.source[start:self.index-2],
'leading': True,
'trailing': False,
'loc': None,
}
self.index += 1
else:
self.index += 1
self.tolerateUnexpectedToken()
def skipComment(self):
self.hasLineTerminator = False
startIndex = self.index
start = (self.index == 0)
comments = []
while self.index < self.length:
ch = ord(self.source[self.index])
if isWhiteSpace(ch):
self.index += 1
elif isLineTerminator(ch):
self.hasLineTerminator = True
self.index += 1
if (ch == 0x0D and ord(self.source[self.index]) == 0x0A):
self.index += 1
self.lineNumber += 1
self.lineStart = self.index
start = True
elif (ch == 0x2F): # U+002F is '/'
ch = ord(self.source[self.index + 1])
if (ch == 0x2F):
self.index += 2
comments.append(self.skipSingleLineComment(2))
start = True
elif (ch == 0x2A): # U+002A is '*'
self.index += 2
comments.append(self.skipMultiLineComment())
else:
break
elif (start and ch == 0x2D): # U+002D is '-'
# U+003E is '>'
if (ord(self.source[self.index + 1]) == 0x2D) and (ord(self.source[self.index + 2]) == 0x3E):
# '-->' is a single-line comment
self.index += 3
self.skipSingleLineComment(3)
else:
break
elif (ch == 0x3C): # U+003C is '<'
if self.source[self.index + 1: self.index + 4] == '!--':
# <!--
self.index += 4
self.skipSingleLineComment(4)
else:
break
else:
break
return filter(None, comments)
def scanHexEscape(self, prefix):
code = 0
leng = 4 if (prefix == 'u') else 2
for i in xrange(leng):
if self.index < self.length and isHexDigit(self.source[self.index]):
ch = self.source[self.index]
self.index += 1
code = code * 16 + HEX_CONV[ch]
else:
return ''
return unichr(code)
def scanUnicodeCodePointEscape(self):
ch = self.source[self.index]
code = 0
# At least, one hex digit is required.
if ch == '}':
self.throwUnexpectedToken()
while (self.index < self.length):
ch = self.source[self.index]
self.index += 1
if not isHexDigit(ch):
break
code = code * 16 + HEX_CONV[ch]
if code > 0x10FFFF or ch != '}':
self.throwUnexpectedToken()
# UTF-16 Encoding
if (code <= 0xFFFF):
return unichr(code)
cu1 = ((code - 0x10000) >> 10) + 0xD800;
cu2 = ((code - 0x10000) & 1023) + 0xDC00;
return unichr(cu1) + unichr(cu2)
def ccode(self, offset=0):
return ord(self.source[self.index + offset])
def log_err_case(self):
if not DEBUG:
return
print('INDEX', self.index)
print(self.source[self.index - 10:self.index + 10])
print('')
def at(self, loc):
return None if loc >= self.length else self.source[loc]
def substr(self, le, offset=0):
return self.source[self.index + offset:self.index + offset + le]
def getEscapedIdentifier(self):
d = self.source[self.index]
ch = ord(d)
self.index += 1
# '\u' (U+005C, U+0075) denotes an escaped character.
if (ch == 0x5C):
if (ord(self.source[self.index]) != 0x75):
self.throwUnexpectedToken()
self.index += 1
ch = self.scanHexEscape('u')
if not ch or ch == '\\' or not isIdentifierStart(ch[0]):
self.throwUnexpectedToken()
d = ch
while (self.index < self.length):
ch = self.ccode()
if not isIdentifierPart(ch):
break
self.index += 1
d += unichr(ch)
# '\u' (U+005C, U+0075) denotes an escaped character.
if (ch == 0x5C):
d = d[0: len(d) - 1]
if (self.ccode() != 0x75):
self.throwUnexpectedToken()
self.index += 1
ch = self.scanHexEscape('u');
if (not ch or ch == '\\' or not isIdentifierPart(ch[0])):
self.throwUnexpectedToken()
d += ch
return d
def getIdentifier(self):
start = self.index
self.index += 1
while (self.index < self.length):
ch = self.ccode()
if (ch == 0x5C):
# Blackslash (U+005C) marks Unicode escape sequence.
self.index = start
return self.getEscapedIdentifier()
if (isIdentifierPart(ch)):
self.index += 1
else:
break
return self.source[start: self.index]
def scanIdentifier(self):
start = self.index
# Backslash (U+005C) starts an escaped character.
d = self.getEscapedIdentifier() if (self.ccode() == 0x5C) else self.getIdentifier()
# There is no keyword or literal with only one character.
# Thus, it must be an identifier.
if (len(d) == 1):
type = Token.Identifier
elif (isKeyword(d)):
type = Token.Keyword
elif (d == 'null'):
type = Token.NullLiteral
elif (d == 'true' or d == 'false'):
type = Token.BooleanLiteral
else:
type = Token.Identifier;
return {
'type': type,
'value': d,
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index
}
# 7.7 Punctuators
def scanPunctuator(self):
token = {
'type': Token.Punctuator,
'value': '',
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': self.index,
'end': self.index
}
# Check for most common single-character punctuators.
st = self.source[self.index]
if st == '{':
self.state['curlyStack'].append('{')
self.index += 1
elif st == '}':
self.index += 1
self.state['curlyStack'].pop()
elif st in ('.', '(', ')', ';', ',', '[', ']', ':', '?', '~'):
self.index += 1
else:
# 4-character punctuator.
st = self.substr(4)
if (st == '>>>='):
self.index += 4
else:
# 3-character punctuators.
st = st[0:3]
if st in ('===', '!==', '>>>', '<<=', '>>='):
self.index += 3
else:
# 2-character punctuators.
st = st[0:2]
if st in ('&&', '||', '==', '!=', '+=', '-=', '*=', '/=', '++', '--', '<<', '>>', '&=', '|=', '^=',
'%=', '<=', '>=', '=>'):
self.index += 2
else:
# 1-character punctuators.
st = self.source[self.index]
if st in ('<', '>', '=', '!', '+', '-', '*', '%', '&', '|', '^', '/'):
self.index += 1
if self.index == token['start']:
self.throwUnexpectedToken()
token['end'] = self.index;
token['value'] = st
return token
# 7.8.3 Numeric Literals
def scanHexLiteral(self, start):
number = ''
while (self.index < self.length):
if (not isHexDigit(self.source[self.index])):
break
number += self.source[self.index]
self.index += 1
if not number:
self.throwUnexpectedToken()
if isIdentifierStart(self.ccode()):
self.throwUnexpectedToken()
return {
'type': Token.NumericLiteral,
'value': int(number, 16),
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index}
def scanBinaryLiteral(self, start):
number = ''
while (self.index < self.length):
ch = self.source[self.index]
if (ch != '0' and ch != '1'):
break
number += self.source[self.index]
self.index += 1
if not number:
# only 0b or 0B
self.throwUnexpectedToken()
if (self.index < self.length):
ch = self.source[self.index]
# istanbul ignore else
if (isIdentifierStart(ch) or isDecimalDigit(ch)):
self.throwUnexpectedToken();
return {
'type': Token.NumericLiteral,
'value': int(number, 2),
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index}
def scanOctalLiteral(self, prefix, start):
if isOctalDigit(prefix):
octal = True
number = '0' + self.source[self.index]
self.index += 1
else:
octal = False
self.index += 1
number = ''
while (self.index < self.length):
if (not isOctalDigit(self.source[self.index])):
break
number += self.source[self.index]
self.index += 1
if (not octal and not number):
# only 0o or 0O
self.throwUnexpectedToken()
if (isIdentifierStart(self.ccode()) or isDecimalDigit(self.ccode())):
self.throwUnexpectedToken()
return {
'type': Token.NumericLiteral,
'value': int(number, 8),
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index}
def octalToDecimal(self, ch):
# \0 is not octal escape sequence
octal = (ch != '0')
code = int(ch, 8)
if (self.index < self.length and isOctalDigit(self.source[self.index])):
octal = True
code = code * 8 + int(self.source[self.index], 8)
self.index += 1
# 3 digits are only allowed when string starts
# with 0, 1, 2, 3
if (ch in '0123' and self.index < self.length and isOctalDigit(self.source[self.index])):
code = code * 8 + int((self.source[self.index]), 8)
self.index += 1
return {
'code': code,
'octal': octal}
def isImplicitOctalLiteral(self):
# Implicit octal, unless there is a non-octal digit.
# (Annex B.1.1 on Numeric Literals)
for i in xrange(self.index + 1, self.length):
ch = self.source[i];
if (ch == '8' or ch == '9'):
return False;
if (not isOctalDigit(ch)):
return True
return True
def scanNumericLiteral(self):
ch = self.source[self.index]
assert isDecimalDigit(ch) or (ch == '.'), 'Numeric literal must start with a decimal digit or a decimal point'
start = self.index
number = ''
if ch != '.':
number = self.source[self.index]
self.index += 1
ch = self.source[self.index]
# Hex number starts with '0x'.
# Octal number starts with '0'.
# Octal number in ES6 starts with '0o'.
# Binary number in ES6 starts with '0b'.
if (number == '0'):
if (ch == 'x' or ch == 'X'):
self.index += 1
return self.scanHexLiteral(start);
if (ch == 'b' or ch == 'B'):
self.index += 1
return self.scanBinaryLiteral(start)
if (ch == 'o' or ch == 'O'):
return self.scanOctalLiteral(ch, start)
if (isOctalDigit(ch)):
if (self.isImplicitOctalLiteral()):
return self.scanOctalLiteral(ch, start);
while (isDecimalDigit(self.ccode())):
number += self.source[self.index]
self.index += 1
ch = self.source[self.index];
if (ch == '.'):
number += self.source[self.index]
self.index += 1
while (isDecimalDigit(self.source[self.index])):
number += self.source[self.index]
self.index += 1
ch = self.source[self.index]
if (ch == 'e' or ch == 'E'):
number += self.source[self.index]
self.index += 1
ch = self.source[self.index]
if (ch == '+' or ch == '-'):
number += self.source[self.index]
self.index += 1
if (isDecimalDigit(self.source[self.index])):
while (isDecimalDigit(self.source[self.index])):
number += self.source[self.index]
self.index += 1
else:
self.throwUnexpectedToken()
if (isIdentifierStart(self.source[self.index])):
self.throwUnexpectedToken();
return {
'type': Token.NumericLiteral,
'value': float(number),
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index}
# 7.8.4 String Literals
def _interpret_regexp(self, string, flags):
'''Perform sctring escape - for regexp literals'''
self.index = 0
self.length = len(string)
self.source = string
self.lineNumber = 0
self.lineStart = 0
octal = False
st = ''
inside_square = 0
while (self.index < self.length):
template = '[%s]' if not inside_square else '%s'
ch = self.source[self.index]
self.index += 1
if ch == '\\':
ch = self.source[self.index]
self.index += 1
if (not isLineTerminator(ch)):
if ch == 'u':
digs = self.source[self.index:self.index + 4]
if len(digs) == 4 and all(isHexDigit(d) for d in digs):
st += template % unichr(int(digs, 16))
self.index += 4
else:
st += 'u'
elif ch == 'x':
digs = self.source[self.index:self.index + 2]
if len(digs) == 2 and all(isHexDigit(d) for d in digs):
st += template % unichr(int(digs, 16))
self.index += 2
else:
st += 'x'
# special meaning - single char.
elif ch == '0':
st += '\\0'
elif ch == 'n':
st += '\\n'
elif ch == 'r':
st += '\\r'
elif ch == 't':
st += '\\t'
elif ch == 'f':
st += '\\f'
elif ch == 'v':
st += '\\v'
# unescape special single characters like . so that they are interpreted literally
elif ch in REGEXP_SPECIAL_SINGLE:
st += '\\' + ch
# character groups
elif ch == 'b':
st += '\\b'
elif ch == 'B':
st += '\\B'
elif ch == 'w':
st += '\\w'
elif ch == 'W':
st += '\\W'
elif ch == 'd':
st += '\\d'
elif ch == 'D':
st += '\\D'
elif ch == 's':
st += template % u' \f\n\r\t\v\u00a0\u1680\u180e\u2000-\u200a\u2028\u2029\u202f\u205f\u3000\ufeff'
elif ch == 'S':
st += template % u'\u0000-\u0008\u000e-\u001f\u0021-\u009f\u00a1-\u167f\u1681-\u180d\u180f-\u1fff\u200b-\u2027\u202a-\u202e\u2030-\u205e\u2060-\u2fff\u3001-\ufefe\uff00-\uffff'
else:
if isDecimalDigit(ch):
num = ch
while self.index < self.length and isDecimalDigit(self.source[self.index]):
num += self.source[self.index]
self.index += 1
st += '\\' + num
else:
st += ch # DONT ESCAPE!!!
else:
self.lineNumber += 1
if (ch == '\r' and self.source[self.index] == '\n'):
self.index += 1
self.lineStart = self.index
else:
if ch == '[':
inside_square = True
elif ch == ']':
inside_square = False
st += ch
# print string, 'was transformed to', st
return st
def scanStringLiteral(self):
st = ''
octal = False
quote = self.source[self.index]
assert quote == '\'' or quote == '"', 'String literal must starts with a quote'
start = self.index;
self.index += 1
while (self.index < self.length):
ch = self.source[self.index]
self.index += 1
if (ch == quote):
quote = ''
break
elif (ch == '\\'):
ch = self.source[self.index]
self.index += 1
if (not isLineTerminator(ch)):
if ch in 'ux':
if (self.source[self.index] == '{'):
self.index += 1
st += self.scanUnicodeCodePointEscape()
else:
unescaped = self.scanHexEscape(ch)
if (not unescaped):
self.throwUnexpectedToken() # with throw I don't know whats the difference
st += unescaped
elif ch == 'n':
st += '\n';
elif ch == 'r':
st += '\r';
elif ch == 't':
st += '\t';
elif ch == 'b':
st += '\b';
elif ch == 'f':
st += '\f';
elif ch == 'v':
st += '\x0B'
# elif ch in '89':
# self.throwUnexpectedToken() # again with throw....
else:
if isOctalDigit(ch):
octToDec = self.octalToDecimal(ch)
octal = octToDec.get('octal') or octal
st += unichr(octToDec['code'])
else:
st += ch
else:
self.lineNumber += 1
if (ch == '\r' and self.source[self.index] == '\n'):
self.index += 1
self.lineStart = self.index
elif isLineTerminator(ch):
break
else:
st += ch;
if (quote != ''):
self.throwUnexpectedToken()
return {
'type': Token.StringLiteral,
'value': st,
'octal': octal,
'lineNumber': self.lineNumber,
'lineStart': self.startLineStart,
'start': start,
'end': self.index}
def scanTemplate(self):
cooked = ''
terminated = False
tail = False
start = self.index
head = (self.source[self.index] == '`')
rawOffset = 2
self.index += 1
while (self.index < self.length):
ch = self.source[self.index]
self.index += 1
if (ch == '`'):
rawOffset = 1;
tail = True
terminated = True
break
elif (ch == '$'):
if (self.source[self.index] == '{'):
self.state['curlyStack'].append('${')
self.index += 1
terminated = True
break;
cooked += ch
elif (ch == '\\'):
ch = self.source[self.index]
self.index += 1
if (not isLineTerminator(ch)):
if ch == 'n':
cooked += '\n'
elif ch == 'r':
cooked += '\r'
elif ch == 't':
cooked += '\t'
elif ch in 'ux':
if (self.source[self.index] == '{'):
self.index += 1
cooked += self.scanUnicodeCodePointEscape()
else:
restore = self.index
unescaped = self.scanHexEscape(ch)
if (unescaped):
cooked += unescaped
else:
self.index = restore
cooked += ch
elif ch == 'b':
cooked += '\b'
elif ch == 'f':
cooked += '\f'
elif ch == 'v':
cooked += '\v'
else:
if (ch == '0'):
if isDecimalDigit(self.ccode()):
# Illegal: \01 \02 and so on
self.throwError(Messages.TemplateOctalLiteral)
cooked += '\0'
elif (isOctalDigit(ch)):
# Illegal: \1 \2
self.throwError(Messages.TemplateOctalLiteral)
else:
cooked += ch
else:
self.lineNumber += 1
if (ch == '\r' and self.source[self.index] == '\n'):
self.index += 1
self.lineStart = self.index
elif (isLineTerminator(ch)):
self.lineNumber += 1
if (ch == '\r' and self.source[self.index] == '\n'):
self.index += 1
self.lineStart = self.index
cooked += '\n'
else:
cooked += ch;
if (not terminated):
self.throwUnexpectedToken()
if (not head):
self.state['curlyStack'].pop();
return {
'type': Token.Template,
'value': {
'cooked': cooked,
'raw': self.source[start + 1:self.index - rawOffset]},
'head': head,
'tail': tail,
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': start,
'end': self.index}
def testRegExp(self, pattern, flags):
# todo: you should return python regexp object
return (pattern, flags)
def scanRegExpBody(self):
ch = self.source[self.index]
assert ch == '/', 'Regular expression literal must start with a slash'
st = ch
self.index += 1
classMarker = False
terminated = False
while (self.index < self.length):
ch = self.source[self.index]
self.index += 1
st += ch
if (ch == '\\'):
ch = self.source[self.index]
self.index += 1
# ECMA-262 7.8.5
if (isLineTerminator(ch)):
self.throwUnexpectedToken(None, Messages.UnterminatedRegExp)
st += ch
elif (isLineTerminator(ch)):
self.throwUnexpectedToken(None, Messages.UnterminatedRegExp)
elif (classMarker):
if (ch == ']'):
classMarker = False
else:
if (ch == '/'):
terminated = True
break
elif (ch == '['):
classMarker = True;
if (not terminated):
self.throwUnexpectedToken(None, Messages.UnterminatedRegExp)
# Exclude leading and trailing slash.
body = st[1:-1]
return {
'value': body,
'literal': st}
def scanRegExpFlags(self):
st = ''
flags = ''
while (self.index < self.length):
ch = self.source[self.index]
if (not isIdentifierPart(ch)):
break
self.index += 1
if (ch == '\\' and self.index < self.length):
ch = self.source[self.index]
if (ch == 'u'):
self.index += 1
restore = self.index
ch = self.scanHexEscape('u')
if (ch):
flags += ch
st += '\\u'
while restore < self.index:
st += self.source[restore]
restore += 1
else:
self.index = restore
flags += 'u'
st += '\\u'
self.tolerateUnexpectedToken()
else:
st += '\\'
self.tolerateUnexpectedToken()
else:
flags += ch
st += ch
return {
'value': flags,
'literal': st}
def scanRegExp(self, comments):
self.scanning = True
self.lookahead = None
comments.extend(self.skipComment())
start = self.index
body = self.scanRegExpBody()
flags = self.scanRegExpFlags()
value = self.testRegExp(body['value'], flags['value'])
scanning = False
return {
'literal': body['literal'] + flags['literal'],
'value': value,
'regex': {
'pattern': body['value'],
'flags': flags['value']
},
'start': start,
'end': self.index,
'comments': comments}
def collectRegex(self):
return self.scanRegExp(self.skipComment())
def isIdentifierName(self, token):
return token['type'] in (1, 3, 4, 5)
# def advanceSlash(self): ???
def advanceWithComments(self, comments):
token = self.advance()
token['comments'] = comments
return token
def advance(self):
if (self.index >= self.length):
return {
'type': Token.EOF,
'lineNumber': self.lineNumber,
'lineStart': self.lineStart,
'start': self.index,
'end': self.index}
ch = self.ccode()
if isIdentifierStart(ch):
token = self.scanIdentifier()
if (self.strict and isStrictModeReservedWord(token['value'])):
token['type'] = Token.Keyword
return token
# Very common: ( and ) and ;
if (ch == 0x28 or ch == 0x29 or ch == 0x3B):
return self.scanPunctuator()
# String literal starts with single quote (U+0027) or double quote (U+0022).
if (ch == 0x27 or ch == 0x22):
return self.scanStringLiteral()
# Dot (.) U+002E can also start a floating-point number, hence the need
# to check the next character.
if (ch == 0x2E):
if (isDecimalDigit(self.ccode(1))):
return self.scanNumericLiteral()
return self.scanPunctuator();
if (isDecimalDigit(ch)):
return self.scanNumericLiteral()
# Slash (/) U+002F can also start a regex.
# if (extra.tokenize && ch == 0x2F):
# return advanceSlash();
# Template literals start with ` (U+0060) for template head
# or } (U+007D) for template middle or template tail.
if (ch == 0x60 or (ch == 0x7D and self.state['curlyStack'][len(self.state['curlyStack']) - 1] == '${')):
return self.scanTemplate()
return self.scanPunctuator();
# def collectToken(self):
# loc = {
# 'start': {
# 'line': self.lineNumber,
# 'column': self.index - self.lineStart}}
#
# token = self.advance()
#
# loc['end'] = {
# 'line': self.lineNumber,
# 'column': self.index - self.lineStart}
# if (token['type'] != Token.EOF):
# value = self.source[token['start']: token['end']]
# entry = {
# 'type': TokenName[token['type']],
# 'value': value,
# 'range': [token['start'], token['end']],
# 'loc': loc}
# if (token.get('regex')):
# entry['regex'] = {
# 'pattern': token['regex']['pattern'],
# 'flags': token['regex']['flags']}
# self.extra['tokens'].append(entry)
# return token;
def lex(self):
self.scanning = True
self.lastIndex = self.index
self.lastLineNumber = self.lineNumber
self.lastLineStart = self.lineStart
comments = self.skipComment()
token = self.lookahead
self.startIndex = self.index
self.startLineNumber = self.lineNumber
self.startLineStart = self.lineStart
self.lookahead = self.advanceWithComments(comments)
self.scanning = False
return token
def peek(self):
self.scanning = True
comments = self.skipComment()
self.lastIndex = self.index
self.lastLineNumber = self.lineNumber
self.lastLineStart = self.lineStart
self.startIndex = self.index
self.startLineNumber = self.lineNumber
self.startLineStart = self.lineStart
self.lookahead = self.advanceWithComments(comments)
self.scanning = False
def createError(self, line, pos, description):
global ENABLE_PYIMPORT
msg = 'Line ' + unicode(line) + ': ' + unicode(description)
if ENABLE_JS2PY_ERRORS:
return ENABLE_JS2PY_ERRORS(msg)
else:
return JsSyntaxError(msg)
# Throw an exception
def throwError(self, messageFormat, *args):
msg = messageFormat % tuple(unicode(e) for e in args)
raise self.createError(self.lastLineNumber, self.lastIndex, msg);
def tolerateError(self, messageFormat, *args):
return self.throwError(messageFormat, *args)
# Throw an exception because of the token.
def unexpectedTokenError(self, token={}, message=''):
msg = message or Messages.UnexpectedToken
if (token):
typ = token['type']
if (not message):
if typ == Token.EOF:
msg = Messages.UnexpectedEOS
elif (typ == Token.Identifier):
msg = Messages.UnexpectedIdentifier
elif (typ == Token.NumericLiteral):
msg = Messages.UnexpectedNumber
elif (typ == Token.StringLiteral):
msg = Messages.UnexpectedString
elif (typ == Token.Template):
msg = Messages.UnexpectedTemplate
else:
msg = Messages.UnexpectedToken;
if (typ == Token.Keyword):
if (isFutureReservedWord(token['value'])):
msg = Messages.UnexpectedReserved
elif (self.strict and isStrictModeReservedWord(token['value'])):
msg = Messages.StrictReservedWord
value = token['value']['raw'] if (typ == Token.Template) else token.get('value')
else:
value = 'ILLEGAL'
msg = msg.replace('%s', unicode(value))
return (self.createError(token['lineNumber'], token['start'], msg) if (token and token.get('lineNumber')) else
self.createError(self.lineNumber if self.scanning else self.lastLineNumber,
self.index if self.scanning else self.lastIndex, msg))
def throwUnexpectedToken(self, token={}, message=''):
raise self.unexpectedTokenError(token, message)
def tolerateUnexpectedToken(self, token={}, message=''):
self.throwUnexpectedToken(token, message)
# Expect the next token to match the specified punctuator.
# If not, an exception will be thrown.
def expect(self, value):
token = self.lex()
if (token['type'] != Token.Punctuator or token['value'] != value):
self.throwUnexpectedToken(token)
# /**
# * @name expectCommaSeparator
# * @description Quietly expect a comma when in tolerant mode, otherwise delegates
# * to <code>expect(value)</code>
# * @since 2.0
# */
def expectCommaSeparator(self):
self.expect(',')
# Expect the next token to match the specified keyword.
# If not, an exception will be thrown.
def expectKeyword(self, keyword):
token = self.lex();
if (token['type'] != Token.Keyword or token['value'] != keyword):
self.throwUnexpectedToken(token)
# Return true if the next token matches the specified punctuator.
def match(self, value):
return self.lookahead['type'] == Token.Punctuator and self.lookahead['value'] == value
# Return true if the next token matches the specified keyword
def matchKeyword(self, keyword):
return self.lookahead['type'] == Token.Keyword and self.lookahead['value'] == keyword
# Return true if the next token matches the specified contextual keyword
# (where an identifier is sometimes a keyword depending on the context)
def matchContextualKeyword(self, keyword):
return self.lookahead['type'] == Token.Identifier and self.lookahead['value'] == keyword
# Return true if the next token is an assignment operator
def matchAssign(self):
if (self.lookahead['type'] != Token.Punctuator):
return False;
op = self.lookahead['value']
return op in ('=', '*=', '/=', '%=', '+=', '-=', '<<=', '>>=', '>>>=', '&=', '^=', '|=')
def consumeSemicolon(self):
# Catch the very common case first: immediately a semicolon (U+003B).
if (self.at(self.startIndex) == ';' or self.match(';')):
self.lex()
return
if (self.hasLineTerminator):
return
# TODO: FIXME(ikarienator): this is seemingly an issue in the previous location info convention.
self.lastIndex = self.startIndex
self.lastLineNumber = self.startLineNumber
self.lastLineStart = self.startLineStart
if (self.lookahead['type'] != Token.EOF and not self.match('}')):
self.throwUnexpectedToken(self.lookahead)
# // Cover grammar support.
# //
# // When an assignment expression position starts with an left parenthesis, the determination of the type
# // of the syntax is to be deferred arbitrarily long until the end of the parentheses pair (plus a lookahead)
# // or the first comma. This situation also defers the determination of all the expressions nested in the pair.
# //
# // There are three productions that can be parsed in a parentheses pair that needs to be determined
# // after the outermost pair is closed. They are:
# //
# // 1. AssignmentExpression
# // 2. BindingElements
# // 3. AssignmentTargets
# //
# // In order to avoid exponential backtracking, we use two flags to denote if the production can be
# // binding element or assignment target.
# //
# // The three productions have the relationship:
# //
# // BindingElements <= AssignmentTargets <= AssignmentExpression
# //
# // with a single exception that CoverInitializedName when used directly in an Expression, generates
# // an early error. Therefore, we need the third state, firstCoverInitializedNameError, to track the
# // first usage of CoverInitializedName and report it when we reached the end of the parentheses pair.
# //
# // isolateCoverGrammar function runs the given parser function with a new cover grammar context, and it does not
# // effect the current flags. This means the production the parser parses is only used as an expression. Therefore
# // the CoverInitializedName check is conducted.
# //
# // inheritCoverGrammar function runs the given parse function with a new cover grammar context, and it propagates
# // the flags outside of the parser. This means the production the parser parses is used as a part of a potential
# // pattern. The CoverInitializedName check is deferred.
def isolateCoverGrammar(self, parser):
oldIsBindingElement = self.isBindingElement
oldIsAssignmentTarget = self.isAssignmentTarget
oldFirstCoverInitializedNameError = self.firstCoverInitializedNameError
self.isBindingElement = true
self.isAssignmentTarget = true
self.firstCoverInitializedNameError = null
result = parser()
if (self.firstCoverInitializedNameError != null):
self.throwUnexpectedToken(self.firstCoverInitializedNameError)
self.isBindingElement = oldIsBindingElement
self.isAssignmentTarget = oldIsAssignmentTarget
self.firstCoverInitializedNameError = oldFirstCoverInitializedNameError
return result
def inheritCoverGrammar(self, parser):
oldIsBindingElement = self.isBindingElement
oldIsAssignmentTarget = self.isAssignmentTarget
oldFirstCoverInitializedNameError = self.firstCoverInitializedNameError
self.isBindingElement = true
self.isAssignmentTarget = true
self.firstCoverInitializedNameError = null
result = parser()
self.isBindingElement = self.isBindingElement and oldIsBindingElement
self.isAssignmentTarget = self.isAssignmentTarget and oldIsAssignmentTarget
self.firstCoverInitializedNameError = oldFirstCoverInitializedNameError or self.firstCoverInitializedNameError
return result
def parseArrayPattern(self):
node = Node()
elements = []
self.expect('[');
while (not self.match(']')):
if (self.match(',')):
self.lex()
elements.append(null)
else:
if (self.match('...')):
restNode = Node()
self.lex()
rest = self.parseVariableIdentifier()
elements.append(restNode.finishRestElement(rest))
break
else:
elements.append(self.parsePatternWithDefault())
if (not self.match(']')):
self.expect(',')
self.expect(']')
return node.finishArrayPattern(elements)
def parsePropertyPattern(self):
node = Node()
computed = self.match('[')
if (self.lookahead['type'] == Token.Identifier):
key = self.parseVariableIdentifier()
if (self.match('=')):
self.lex();
init = self.parseAssignmentExpression()
return node.finishProperty(
'init', key, false, WrappingNode(key).finishAssignmentPattern(key, init), false, false)
elif (not self.match(':')):
return node.finishProperty('init', key, false, key, false, true)
else:
key = self.parseObjectPropertyKey()
self.expect(':')
init = self.parsePatternWithDefault()
return node.finishProperty('init', key, computed, init, false, false)
def parseObjectPattern(self):
node = Node()
properties = []
self.expect('{')
while (not self.match('}')):
properties.append(self.parsePropertyPattern())
if (not self.match('}')):
self.expect(',')
self.lex()
return node.finishObjectPattern(properties)
def parsePattern(self):
if (self.lookahead['type'] == Token.Identifier):
return self.parseVariableIdentifier()
elif (self.match('[')):
return self.parseArrayPattern()
elif (self.match('{')):
return self.parseObjectPattern()
self.throwUnexpectedToken(self.lookahead)
def parsePatternWithDefault(self):
startToken = self.lookahead
pattern = self.parsePattern()
if (self.match('=')):
self.lex()
right = self.isolateCoverGrammar(self.parseAssignmentExpression)
pattern = WrappingNode(startToken).finishAssignmentPattern(pattern, right)
return pattern
# 11.1.4 Array Initialiser
def parseArrayInitialiser(self):
elements = []
node = Node()
self.expect('[')
while (not self.match(']')):
if (self.match(',')):
self.lex()
elements.append(null)
elif (self.match('...')):
restSpread = Node()
self.lex()
restSpread.finishSpreadElement(self.inheritCoverGrammar(self.parseAssignmentExpression))
if (not self.match(']')):
self.isAssignmentTarget = self.isBindingElement = false
self.expect(',')
elements.append(restSpread)
else:
elements.append(self.inheritCoverGrammar(self.parseAssignmentExpression))
if (not self.match(']')):
self.expect(',')
self.lex();
return node.finishArrayExpression(elements)
# 11.1.5 Object Initialiser
def parsePropertyFunction(self, node, paramInfo):
self.isAssignmentTarget = self.isBindingElement = false;
previousStrict = self.strict;
body = self.isolateCoverGrammar(self.parseFunctionSourceElements);
if (self.strict and paramInfo['firstRestricted']):
self.tolerateUnexpectedToken(paramInfo['firstRestricted'], paramInfo.get('message'))
if (self.strict and paramInfo.get('stricted')):
self.tolerateUnexpectedToken(paramInfo.get('stricted'), paramInfo.get('message'));
self.strict = previousStrict;
return node.finishFunctionExpression(null, paramInfo.get('params'), paramInfo.get('defaults'), body)
def parsePropertyMethodFunction(self):
node = Node();
params = self.parseParams(null);
method = self.parsePropertyFunction(node, params);
return method;
def parseObjectPropertyKey(self):
node = Node()
token = self.lex();
# // Note: This function is called only from parseObjectProperty(), where
# // EOF and Punctuator tokens are already filtered out.
typ = token['type']
if typ in [Token.StringLiteral, Token.NumericLiteral]:
if self.strict and token.get('octal'):
self.tolerateUnexpectedToken(token, Messages.StrictOctalLiteral);
return node.finishLiteral(token);
elif typ in (Token.Identifier, Token.BooleanLiteral, Token.NullLiteral, Token.Keyword):
return node.finishIdentifier(token['value']);
elif typ == Token.Punctuator:
if (token['value'] == '['):
expr = self.isolateCoverGrammar(self.parseAssignmentExpression)
self.expect(']')
return expr
self.throwUnexpectedToken(token)
def lookaheadPropertyName(self):
typ = self.lookahead['type']
if typ in (Token.Identifier, Token.StringLiteral, Token.BooleanLiteral, Token.NullLiteral, Token.NumericLiteral,
Token.Keyword):
return true
if typ == Token.Punctuator:
return self.lookahead['value'] == '['
return false
# // This function is to try to parse a MethodDefinition as defined in 14.3. But in the case of object literals,
# // it might be called at a position where there is in fact a short hand identifier pattern or a data property.
# // This can only be determined after we consumed up to the left parentheses.
# //
# // In order to avoid back tracking, it returns `null` if the position is not a MethodDefinition and the caller
# // is responsible to visit other options.
def tryParseMethodDefinition(self, token, key, computed, node):
if (token['type'] == Token.Identifier):
# check for `get` and `set`;
if (token['value'] == 'get' and self.lookaheadPropertyName()):
computed = self.match('[');
key = self.parseObjectPropertyKey()
methodNode = Node()
self.expect('(')
self.expect(')')
value = self.parsePropertyFunction(methodNode, {
'params': [],
'defaults': [],
'stricted': null,
'firstRestricted': null,
'message': null
})
return node.finishProperty('get', key, computed, value, false, false)
elif (token['value'] == 'set' and self.lookaheadPropertyName()):
computed = self.match('[')
key = self.parseObjectPropertyKey()
methodNode = Node()
self.expect('(')
options = {
'params': [],
'defaultCount': 0,
'defaults': [],
'firstRestricted': null,
'paramSet': {}
}
if (self.match(')')):
self.tolerateUnexpectedToken(self.lookahead);
else:
self.parseParam(options);
if (options['defaultCount'] == 0):
options['defaults'] = []
self.expect(')')
value = self.parsePropertyFunction(methodNode, options);
return node.finishProperty('set', key, computed, value, false, false);
if (self.match('(')):
value = self.parsePropertyMethodFunction();
return node.finishProperty('init', key, computed, value, true, false)
return null;
def checkProto(self, key, computed, hasProto):
if (computed == false and (key['type'] == Syntax.Identifier and key['name'] == '__proto__' or
key['type'] == Syntax.Literal and key['value'] == '__proto__')):
if (hasProto['value']):
self.tolerateError(Messages.DuplicateProtoProperty);
else:
hasProto['value'] = true;
def parseObjectProperty(self, hasProto):
token = self.lookahead
node = Node()
node.comments = self.lookahead.get('comments', [])
computed = self.match('[');
key = self.parseObjectPropertyKey();
maybeMethod = self.tryParseMethodDefinition(token, key, computed, node)
if (maybeMethod):
self.checkProto(maybeMethod['key'], maybeMethod['computed'], hasProto);
return maybeMethod;
# // init property or short hand property.
self.checkProto(key, computed, hasProto);
if (self.match(':')):
self.lex();
value = self.inheritCoverGrammar(self.parseAssignmentExpression)
return node.finishProperty('init', key, computed, value, false, false)
if (token['type'] == Token.Identifier):
if (self.match('=')):
self.firstCoverInitializedNameError = self.lookahead;
self.lex();
value = self.isolateCoverGrammar(self.parseAssignmentExpression);
return node.finishProperty('init', key, computed,
WrappingNode(token).finishAssignmentPattern(key, value), false, true)
return node.finishProperty('init', key, computed, key, false, true)
self.throwUnexpectedToken(self.lookahead)
def parseObjectInitialiser(self):
properties = []
hasProto = {'value': false}
node = Node();
node.comments = self.lookahead.get('comments', [])
self.expect('{');
while (not self.match('}')):
properties.append(self.parseObjectProperty(hasProto));
if (not self.match('}')):
self.expectCommaSeparator()
self.expect('}');
return node.finishObjectExpression(properties)
def reinterpretExpressionAsPattern(self, expr):
typ = (expr['type'])
if typ in (Syntax.Identifier, Syntax.MemberExpression, Syntax.RestElement, Syntax.AssignmentPattern):
pass
elif typ == Syntax.SpreadElement:
expr['type'] = Syntax.RestElement
self.reinterpretExpressionAsPattern(expr.argument)
elif typ == Syntax.ArrayExpression:
expr['type'] = Syntax.ArrayPattern
for i in xrange(len(expr['elements'])):
if (expr['elements'][i] != null):
self.reinterpretExpressionAsPattern(expr['elements'][i])
elif typ == Syntax.ObjectExpression:
expr['type'] = Syntax.ObjectPattern
for i in xrange(len(expr['properties'])):
self.reinterpretExpressionAsPattern(expr['properties'][i]['value']);
elif Syntax.AssignmentExpression:
expr['type'] = Syntax.AssignmentPattern;
self.reinterpretExpressionAsPattern(expr['left'])
else:
# // Allow other node type for tolerant parsing.
return
def parseTemplateElement(self, option):
if (self.lookahead['type'] != Token.Template or (option['head'] and not self.lookahead['head'])):
self.throwUnexpectedToken()
node = Node();
token = self.lex();
return node.finishTemplateElement({'raw': token['value']['raw'], 'cooked': token['value']['cooked']},
token['tail'])
def parseTemplateLiteral(self):
node = Node()
quasi = self.parseTemplateElement({'head': true})
quasis = [quasi]
expressions = []
while (not quasi['tail']):
expressions.append(self.parseExpression());
quasi = self.parseTemplateElement({'head': false});
quasis.append(quasi)
return node.finishTemplateLiteral(quasis, expressions)
# 11.1.6 The Grouping Operator
def parseGroupExpression(self):
self.expect('(');
if (self.match(')')):
self.lex();
if (not self.match('=>')):
self.expect('=>')
return {
'type': PlaceHolders.ArrowParameterPlaceHolder,
'params': []}
startToken = self.lookahead
if (self.match('...')):
expr = self.parseRestElement();
self.expect(')');
if (not self.match('=>')):
self.expect('=>')
return {
'type': PlaceHolders.ArrowParameterPlaceHolder,
'params': [expr]}
self.isBindingElement = true;
expr = self.inheritCoverGrammar(self.parseAssignmentExpression);
if (self.match(',')):
self.isAssignmentTarget = false;
expressions = [expr]
while (self.startIndex < self.length):
if (not self.match(',')):
break
self.lex();
if (self.match('...')):
if (not self.isBindingElement):
self.throwUnexpectedToken(self.lookahead)
expressions.append(self.parseRestElement())
self.expect(')');
if (not self.match('=>')):
self.expect('=>');
self.isBindingElement = false
for i in xrange(len(expressions)):
self.reinterpretExpressionAsPattern(expressions[i])
return {
'type': PlaceHolders.ArrowParameterPlaceHolder,
'params': expressions}
expressions.append(self.inheritCoverGrammar(self.parseAssignmentExpression))
expr = WrappingNode(startToken).finishSequenceExpression(expressions);
self.expect(')')
if (self.match('=>')):
if (not self.isBindingElement):
self.throwUnexpectedToken(self.lookahead);
if (expr['type'] == Syntax.SequenceExpression):
for i in xrange(len(expr.expressions)):
self.reinterpretExpressionAsPattern(expr['expressions'][i])
else:
self.reinterpretExpressionAsPattern(expr);
expr = {
'type': PlaceHolders.ArrowParameterPlaceHolder,
'params': expr['expressions'] if expr['type'] == Syntax.SequenceExpression else [expr]}
self.isBindingElement = false
return expr
# 11.1 Primary Expressions
def parsePrimaryExpression(self):
if (self.match('(')):
self.isBindingElement = false;
return self.inheritCoverGrammar(self.parseGroupExpression)
if (self.match('[')):
return self.inheritCoverGrammar(self.parseArrayInitialiser)
if (self.match('{')):
return self.inheritCoverGrammar(self.parseObjectInitialiser)
typ = self.lookahead['type']
node = Node();
node.comments = self.lookahead.get('comments', [])
if (typ == Token.Identifier):
expr = node.finishIdentifier(self.lex()['value']);
elif (typ == Token.StringLiteral or typ == Token.NumericLiteral):
self.isAssignmentTarget = self.isBindingElement = false
if (self.strict and self.lookahead.get('octal')):
self.tolerateUnexpectedToken(self.lookahead, Messages.StrictOctalLiteral)
expr = node.finishLiteral(self.lex())
elif (typ == Token.Keyword):
self.isAssignmentTarget = self.isBindingElement = false
if (self.matchKeyword('function')):
return self.parseFunctionExpression()
if (self.matchKeyword('this')):
self.lex()
return node.finishThisExpression()
if (self.matchKeyword('class')):
return self.parseClassExpression()
self.throwUnexpectedToken(self.lex())
elif (typ == Token.BooleanLiteral):
isAssignmentTarget = self.isBindingElement = false
token = self.lex();
token['value'] = (token['value'] == 'true')
expr = node.finishLiteral(token)
elif (typ == Token.NullLiteral):
self.isAssignmentTarget = self.isBindingElement = false
token = self.lex()
token['value'] = null;
expr = node.finishLiteral(token)
elif (self.match('/') or self.match('/=')):
self.isAssignmentTarget = self.isBindingElement = false;
self.index = self.startIndex;
token = self.scanRegExp([]); # hehe, here you are!
self.lex();
expr = node.finishLiteral(token);
elif (typ == Token.Template):
expr = self.parseTemplateLiteral()
else:
self.throwUnexpectedToken(self.lex());
return expr;
# 11.2 Left-Hand-Side Expressions
def parseArguments(self):
args = [];
self.expect('(');
if (not self.match(')')):
while (self.startIndex < self.length):
args.append(self.isolateCoverGrammar(self.parseAssignmentExpression))
if (self.match(')')):
break
self.expectCommaSeparator()
self.expect(')')
return args;
def parseNonComputedProperty(self):
node = Node()
token = self.lex();
if (not self.isIdentifierName(token)):
self.throwUnexpectedToken(token)
return node.finishIdentifier(token['value'])
def parseNonComputedMember(self):
self.expect('.')
return self.parseNonComputedProperty();
def parseComputedMember(self):
self.expect('[')
expr = self.isolateCoverGrammar(self.parseExpression)
self.expect(']')
return expr
def parseNewExpression(self):
node = Node()
self.expectKeyword('new')
callee = self.isolateCoverGrammar(self.parseLeftHandSideExpression)
args = self.parseArguments() if self.match('(') else []
self.isAssignmentTarget = self.isBindingElement = false
return node.finishNewExpression(callee, args)
def parseLeftHandSideExpressionAllowCall(self):
previousAllowIn = self.state['allowIn']
startToken = self.lookahead;
self.state['allowIn'] = true;
if (self.matchKeyword('super') and self.state['inFunctionBody']):
expr = Node();
self.lex();
expr = expr.finishSuper()
if (not self.match('(') and not self.match('.') and not self.match('[')):
self.throwUnexpectedToken(self.lookahead);
else:
expr = self.inheritCoverGrammar(
self.parseNewExpression if self.matchKeyword('new') else self.parsePrimaryExpression)
while True:
if (self.match('.')):
self.isBindingElement = false;
self.isAssignmentTarget = true;
property = self.parseNonComputedMember();
expr = WrappingNode(startToken).finishMemberExpression('.', expr, property)
elif (self.match('(')):
self.isBindingElement = false;
self.isAssignmentTarget = false;
args = self.parseArguments();
expr = WrappingNode(startToken).finishCallExpression(expr, args)
elif (self.match('[')):
self.isBindingElement = false;
self.isAssignmentTarget = true;
property = self.parseComputedMember();
expr = WrappingNode(startToken).finishMemberExpression('[', expr, property)
elif (self.lookahead['type'] == Token.Template and self.lookahead['head']):
quasi = self.parseTemplateLiteral()
expr = WrappingNode(startToken).finishTaggedTemplateExpression(expr, quasi)
else:
break
self.state['allowIn'] = previousAllowIn
return expr
def parseLeftHandSideExpression(self):
assert self.state['allowIn'], 'callee of new expression always allow in keyword.'
startToken = self.lookahead
if (self.matchKeyword('super') and self.state['inFunctionBody']):
expr = Node();
self.lex();
expr = expr.finishSuper();
if (not self.match('[') and not self.match('.')):
self.throwUnexpectedToken(self.lookahead)
else:
expr = self.inheritCoverGrammar(
self.parseNewExpression if self.matchKeyword('new') else self.parsePrimaryExpression);
while True:
if (self.match('[')):
self.isBindingElement = false;
self.isAssignmentTarget = true;
property = self.parseComputedMember();
expr = WrappingNode(startToken).finishMemberExpression('[', expr, property)
elif (self.match('.')):
self.isBindingElement = false;
self.isAssignmentTarget = true;
property = self.parseNonComputedMember();
expr = WrappingNode(startToken).finishMemberExpression('.', expr, property);
elif (self.lookahead['type'] == Token.Template and self.lookahead['head']):
quasi = self.parseTemplateLiteral();
expr = WrappingNode(startToken).finishTaggedTemplateExpression(expr, quasi)
else:
break
return expr
# 11.3 Postfix Expressions
def parsePostfixExpression(self):
startToken = self.lookahead
expr = self.inheritCoverGrammar(self.parseLeftHandSideExpressionAllowCall)
if (not self.hasLineTerminator and self.lookahead['type'] == Token.Punctuator):
if (self.match('++') or self.match('--')):
# 11.3.1, 11.3.2
if (self.strict and expr.type == Syntax.Identifier and isRestrictedWord(expr.name)):
self.tolerateError(Messages.StrictLHSPostfix)
if (not self.isAssignmentTarget):
self.tolerateError(Messages.InvalidLHSInAssignment);
self.isAssignmentTarget = self.isBindingElement = false;
token = self.lex();
expr = WrappingNode(startToken).finishPostfixExpression(token['value'], expr);
return expr;
# 11.4 Unary Operators
def parseUnaryExpression(self):
if (self.lookahead['type'] != Token.Punctuator and self.lookahead['type'] != Token.Keyword):
expr = self.parsePostfixExpression();
elif (self.match('++') or self.match('--')):
startToken = self.lookahead;
token = self.lex();
expr = self.inheritCoverGrammar(self.parseUnaryExpression);
# 11.4.4, 11.4.5
if (self.strict and expr.type == Syntax.Identifier and isRestrictedWord(expr.name)):
self.tolerateError(Messages.StrictLHSPrefix)
if (not self.isAssignmentTarget):
self.tolerateError(Messages.InvalidLHSInAssignment)
expr = WrappingNode(startToken).finishUnaryExpression(token['value'], expr)
self.isAssignmentTarget = self.isBindingElement = false
elif (self.match('+') or self.match('-') or self.match('~') or self.match('!')):
startToken = self.lookahead;
token = self.lex();
expr = self.inheritCoverGrammar(self.parseUnaryExpression);
expr = WrappingNode(startToken).finishUnaryExpression(token['value'], expr)
self.isAssignmentTarget = self.isBindingElement = false;
elif (self.matchKeyword('delete') or self.matchKeyword('void') or self.matchKeyword('typeof')):
startToken = self.lookahead;
token = self.lex();
expr = self.inheritCoverGrammar(self.parseUnaryExpression);
expr = WrappingNode(startToken).finishUnaryExpression(token['value'], expr);
if (self.strict and expr.operator == 'delete' and expr.argument.type == Syntax.Identifier):
self.tolerateError(Messages.StrictDelete)
self.isAssignmentTarget = self.isBindingElement = false;
else:
expr = self.parsePostfixExpression()
return expr
def binaryPrecedence(self, token, allowIn):
prec = 0;
typ = token['type']
if (typ != Token.Punctuator and typ != Token.Keyword):
return 0;
val = token['value']
if val == 'in' and not allowIn:
return 0
return PRECEDENCE.get(val, 0)
# 11.5 Multiplicative Operators
# 11.6 Additive Operators
# 11.7 Bitwise Shift Operators
# 11.8 Relational Operators
# 11.9 Equality Operators
# 11.10 Binary Bitwise Operators
# 11.11 Binary Logical Operators
def parseBinaryExpression(self):
marker = self.lookahead;
left = self.inheritCoverGrammar(self.parseUnaryExpression);
token = self.lookahead;
prec = self.binaryPrecedence(token, self.state['allowIn']);
if (prec == 0):
return left
self.isAssignmentTarget = self.isBindingElement = false;
token['prec'] = prec
self.lex()
markers = [marker, self.lookahead];
right = self.isolateCoverGrammar(self.parseUnaryExpression);
stack = [left, token, right];
while True:
prec = self.binaryPrecedence(self.lookahead, self.state['allowIn'])
if not prec > 0:
break
# Reduce: make a binary expression from the three topmost entries.
while ((len(stack) > 2) and (prec <= stack[len(stack) - 2]['prec'])):
right = stack.pop();
operator = stack.pop()['value']
left = stack.pop()
markers.pop()
expr = WrappingNode(markers[len(markers) - 1]).finishBinaryExpression(operator, left, right)
stack.append(expr)
# Shift
token = self.lex();
token['prec'] = prec;
stack.append(token);
markers.append(self.lookahead);
expr = self.isolateCoverGrammar(self.parseUnaryExpression);
stack.append(expr);
# Final reduce to clean-up the stack.
i = len(stack) - 1;
expr = stack[i]
markers.pop()
while (i > 1):
expr = WrappingNode(markers.pop()).finishBinaryExpression(stack[i - 1]['value'], stack[i - 2], expr);
i -= 2
return expr
# 11.12 Conditional Operator
def parseConditionalExpression(self):
startToken = self.lookahead
expr = self.inheritCoverGrammar(self.parseBinaryExpression);
if (self.match('?')):
self.lex()
previousAllowIn = self.state['allowIn']
self.state['allowIn'] = true;
consequent = self.isolateCoverGrammar(self.parseAssignmentExpression);
self.state['allowIn'] = previousAllowIn;
self.expect(':');
alternate = self.isolateCoverGrammar(self.parseAssignmentExpression)
expr = WrappingNode(startToken).finishConditionalExpression(expr, consequent, alternate);
self.isAssignmentTarget = self.isBindingElement = false;
return expr
# [ES6] 14.2 Arrow Function
def parseConciseBody(self):
if (self.match('{')):
return self.parseFunctionSourceElements()
return self.isolateCoverGrammar(self.parseAssignmentExpression)
def checkPatternParam(self, options, param):
typ = param.type
if typ == Syntax.Identifier:
self.validateParam(options, param, param.name);
elif typ == Syntax.RestElement:
self.checkPatternParam(options, param.argument)
elif typ == Syntax.AssignmentPattern:
self.checkPatternParam(options, param.left)
elif typ == Syntax.ArrayPattern:
for i in xrange(len(param.elements)):
if (param.elements[i] != null):
self.checkPatternParam(options, param.elements[i]);
else:
assert typ == Syntax.ObjectPattern, 'Invalid type'
for i in xrange(len(param.properties)):
self.checkPatternParam(options, param.properties[i]['value']);
def reinterpretAsCoverFormalsList(self, expr):
defaults = [];
defaultCount = 0;
params = [expr];
typ = expr.type
if typ == Syntax.Identifier:
pass
elif typ == PlaceHolders.ArrowParameterPlaceHolder:
params = expr.params
else:
return null
options = {
'paramSet': {}}
le = len(params)
for i in xrange(le):
param = params[i]
if param.type == Syntax.AssignmentPattern:
params[i] = param.left;
defaults.append(param.right);
defaultCount += 1
self.checkPatternParam(options, param.left);
else:
self.checkPatternParam(options, param);
params[i] = param;
defaults.append(null);
if (options.get('message') == Messages.StrictParamDupe):
token = options.get('stricted') if self.strict else options['firstRestricted']
self.throwUnexpectedToken(token, options.get('message'));
if (defaultCount == 0):
defaults = []
return {
'params': params,
'defaults': defaults,
'stricted': options['stricted'],
'firstRestricted': options['firstRestricted'],
'message': options.get('message')}
def parseArrowFunctionExpression(self, options, node):
if (self.hasLineTerminator):
self.tolerateUnexpectedToken(self.lookahead)
self.expect('=>')
previousStrict = self.strict;
body = self.parseConciseBody();
if (self.strict and options['firstRestricted']):
self.throwUnexpectedToken(options['firstRestricted'], options.get('message'));
if (self.strict and options['stricted']):
self.tolerateUnexpectedToken(options['stricted'], options['message']);
self.strict = previousStrict
return node.finishArrowFunctionExpression(options['params'], options['defaults'], body,
body.type != Syntax.BlockStatement)
# 11.13 Assignment Operators
def parseAssignmentExpression(self):
startToken = self.lookahead;
token = self.lookahead;
expr = self.parseConditionalExpression();
if (expr.type == PlaceHolders.ArrowParameterPlaceHolder or self.match('=>')):
self.isAssignmentTarget = self.isBindingElement = false;
lis = self.reinterpretAsCoverFormalsList(expr)
if (lis):
self.firstCoverInitializedNameError = null;
return self.parseArrowFunctionExpression(lis, WrappingNode(startToken))
return expr
if (self.matchAssign()):
if (not self.isAssignmentTarget):
self.tolerateError(Messages.InvalidLHSInAssignment)
# 11.13.1
if (self.strict and expr.type == Syntax.Identifier and isRestrictedWord(expr.name)):
self.tolerateUnexpectedToken(token, Messages.StrictLHSAssignment);
if (not self.match('=')):
self.isAssignmentTarget = self.isBindingElement = false;
else:
self.reinterpretExpressionAsPattern(expr)
token = self.lex();
right = self.isolateCoverGrammar(self.parseAssignmentExpression)
expr = WrappingNode(startToken).finishAssignmentExpression(token['value'], expr, right);
self.firstCoverInitializedNameError = null
return expr
# 11.14 Comma Operator
def parseExpression(self):
startToken = self.lookahead
expr = self.isolateCoverGrammar(self.parseAssignmentExpression)
if (self.match(',')):
expressions = [expr];
while (self.startIndex < self.length):
if (not self.match(',')):
break
self.lex();
expressions.append(self.isolateCoverGrammar(self.parseAssignmentExpression))
expr = WrappingNode(startToken).finishSequenceExpression(expressions);
return expr
# 12.1 Block
def parseStatementListItem(self):
if (self.lookahead['type'] == Token.Keyword):
val = (self.lookahead['value'])
if val == 'export':
if (self.sourceType != 'module'):
self.tolerateUnexpectedToken(self.lookahead, Messages.IllegalExportDeclaration)
return self.parseExportDeclaration();
elif val == 'import':
if (self.sourceType != 'module'):
self.tolerateUnexpectedToken(self.lookahead, Messages.IllegalImportDeclaration);
return self.parseImportDeclaration();
elif val == 'const' or val == 'let':
return self.parseLexicalDeclaration({'inFor': false});
elif val == 'function':
return self.parseFunctionDeclaration(Node());
elif val == 'class':
return self.parseClassDeclaration();
elif ENABLE_PYIMPORT and val == 'pyimport': # <<<<< MODIFIED HERE
return self.parsePyimportStatement()
return self.parseStatement();
def parsePyimportStatement(self):
print(ENABLE_PYIMPORT)
assert ENABLE_PYIMPORT
n = Node()
self.lex()
n.finishPyimport(self.parseVariableIdentifier())
self.consumeSemicolon()
return n
def parseStatementList(self):
list = [];
while (self.startIndex < self.length):
if (self.match('}')):
break
list.append(self.parseStatementListItem())
return list
def parseBlock(self):
node = Node();
self.expect('{');
block = self.parseStatementList()
self.expect('}');
return node.finishBlockStatement(block);
# 12.2 Variable Statement
def parseVariableIdentifier(self):
node = Node()
token = self.lex()
if (token['type'] != Token.Identifier):
if (self.strict and token['type'] == Token.Keyword and isStrictModeReservedWord(token['value'])):
self.tolerateUnexpectedToken(token, Messages.StrictReservedWord);
else:
self.throwUnexpectedToken(token)
return node.finishIdentifier(token['value'])
def parseVariableDeclaration(self):
init = null
node = Node();
d = self.parsePattern();
# 12.2.1
if (self.strict and isRestrictedWord(d.name)):
self.tolerateError(Messages.StrictVarName);
if (self.match('=')):
self.lex();
init = self.isolateCoverGrammar(self.parseAssignmentExpression);
elif (d.type != Syntax.Identifier):
self.expect('=')
return node.finishVariableDeclarator(d, init)
def parseVariableDeclarationList(self):
lis = []
while True:
lis.append(self.parseVariableDeclaration())
if (not self.match(',')):
break
self.lex();
if not (self.startIndex < self.length):
break
return lis;
def parseVariableStatement(self, node):
self.expectKeyword('var')
declarations = self.parseVariableDeclarationList()
self.consumeSemicolon()
return node.finishVariableDeclaration(declarations)
def parseLexicalBinding(self, kind, options):
init = null
node = Node()
d = self.parsePattern();
# 12.2.1
if (self.strict and d.type == Syntax.Identifier and isRestrictedWord(d.name)):
self.tolerateError(Messages.StrictVarName);
if (kind == 'const'):
if (not self.matchKeyword('in')):
self.expect('=')
init = self.isolateCoverGrammar(self.parseAssignmentExpression)
elif ((not options['inFor'] and d.type != Syntax.Identifier) or self.match('=')):
self.expect('=');
init = self.isolateCoverGrammar(self.parseAssignmentExpression);
return node.finishVariableDeclarator(d, init)
def parseBindingList(self, kind, options):
list = [];
while True:
list.append(self.parseLexicalBinding(kind, options));
if (not self.match(',')):
break
self.lex();
if not (self.startIndex < self.length):
break
return list;
def parseLexicalDeclaration(self, options):
node = Node();
kind = self.lex()['value']
assert kind == 'let' or kind == 'const', 'Lexical declaration must be either let or const'
declarations = self.parseBindingList(kind, options);
self.consumeSemicolon();
return node.finishLexicalDeclaration(declarations, kind);
def parseRestElement(self):
node = Node();
self.lex();
if (self.match('{')):
self.throwError(Messages.ObjectPatternAsRestParameter)
param = self.parseVariableIdentifier();
if (self.match('=')):
self.throwError(Messages.DefaultRestParameter);
if (not self.match(')')):
self.throwError(Messages.ParameterAfterRestParameter);
return node.finishRestElement(param);
# 12.3 Empty Statement
def parseEmptyStatement(self, node):
self.expect(';');
return node.finishEmptyStatement()
# 12.4 Expression Statement
def parseExpressionStatement(self, node):
expr = self.parseExpression();
self.consumeSemicolon();
return node.finishExpressionStatement(expr);
# 12.5 If statement
def parseIfStatement(self, node):
self.expectKeyword('if');
self.expect('(');
test = self.parseExpression();
self.expect(')');
consequent = self.parseStatement();
if (self.matchKeyword('else')):
self.lex();
alternate = self.parseStatement();
else:
alternate = null;
return node.finishIfStatement(test, consequent, alternate)
# 12.6 Iteration Statements
def parseDoWhileStatement(self, node):
self.expectKeyword('do')
oldInIteration = self.state['inIteration']
self.state['inIteration'] = true
body = self.parseStatement();
self.state['inIteration'] = oldInIteration;
self.expectKeyword('while');
self.expect('(');
test = self.parseExpression();
self.expect(')')
if (self.match(';')):
self.lex()
return node.finishDoWhileStatement(body, test)
def parseWhileStatement(self, node):
self.expectKeyword('while')
self.expect('(')
test = self.parseExpression()
self.expect(')')
oldInIteration = self.state['inIteration']
self.state['inIteration'] = true
body = self.parseStatement()
self.state['inIteration'] = oldInIteration
return node.finishWhileStatement(test, body)
def parseForStatement(self, node):
previousAllowIn = self.state['allowIn']
init = test = update = null
self.expectKeyword('for')
self.expect('(')
if (self.match(';')):
self.lex()
else:
if (self.matchKeyword('var')):
init = Node()
self.lex()
self.state['allowIn'] = false;
init = init.finishVariableDeclaration(self.parseVariableDeclarationList())
self.state['allowIn'] = previousAllowIn
if (len(init.declarations) == 1 and self.matchKeyword('in')):
self.lex()
left = init
right = self.parseExpression()
init = null
else:
self.expect(';')
elif (self.matchKeyword('const') or self.matchKeyword('let')):
init = Node()
kind = self.lex()['value']
self.state['allowIn'] = false
declarations = self.parseBindingList(kind, {'inFor': true})
self.state['allowIn'] = previousAllowIn
if (len(declarations) == 1 and declarations[0].init == null and self.matchKeyword('in')):
init = init.finishLexicalDeclaration(declarations, kind);
self.lex();
left = init;
right = self.parseExpression();
init = null;
else:
self.consumeSemicolon();
init = init.finishLexicalDeclaration(declarations, kind);
else:
initStartToken = self.lookahead
self.state['allowIn'] = false
init = self.inheritCoverGrammar(self.parseAssignmentExpression);
self.state['allowIn'] = previousAllowIn;
if (self.matchKeyword('in')):
if (not self.isAssignmentTarget):
self.tolerateError(Messages.InvalidLHSInForIn)
self.lex();
self.reinterpretExpressionAsPattern(init);
left = init;
right = self.parseExpression();
init = null;
else:
if (self.match(',')):
initSeq = [init];
while (self.match(',')):
self.lex();
initSeq.append(self.isolateCoverGrammar(self.parseAssignmentExpression))
init = WrappingNode(initStartToken).finishSequenceExpression(initSeq)
self.expect(';');
if ('left' not in locals()):
if (not self.match(';')):
test = self.parseExpression();
self.expect(';');
if (not self.match(')')):
update = self.parseExpression();
self.expect(')');
oldInIteration = self.state['inIteration']
self.state['inIteration'] = true;
body = self.isolateCoverGrammar(self.parseStatement)
self.state['inIteration'] = oldInIteration;
return node.finishForStatement(init, test, update, body) if (
'left' not in locals()) else node.finishForInStatement(left, right, body);
# 12.7 The continue statement
def parseContinueStatement(self, node):
label = null
self.expectKeyword('continue');
# Optimize the most common form: 'continue;'.
if ord(self.source[self.startIndex]) == 0x3B:
self.lex();
if (not self.state['inIteration']):
self.throwError(Messages.IllegalContinue)
return node.finishContinueStatement(null)
if (self.hasLineTerminator):
if (not self.state['inIteration']):
self.throwError(Messages.IllegalContinue);
return node.finishContinueStatement(null);
if (self.lookahead['type'] == Token.Identifier):
label = self.parseVariableIdentifier();
key = '$' + label.name;
if not key in self.state['labelSet']: # todo make sure its correct!
self.throwError(Messages.UnknownLabel, label.name);
self.consumeSemicolon()
if (label == null and not self.state['inIteration']):
self.throwError(Messages.IllegalContinue)
return node.finishContinueStatement(label)
# 12.8 The break statement
def parseBreakStatement(self, node):
label = null
self.expectKeyword('break');
# Catch the very common case first: immediately a semicolon (U+003B).
if (ord(self.source[self.lastIndex]) == 0x3B):
self.lex();
if (not (self.state['inIteration'] or self.state['inSwitch'])):
self.throwError(Messages.IllegalBreak)
return node.finishBreakStatement(null)
if (self.hasLineTerminator):
if (not (self.state['inIteration'] or self.state['inSwitch'])):
self.throwError(Messages.IllegalBreak);
return node.finishBreakStatement(null);
if (self.lookahead['type'] == Token.Identifier):
label = self.parseVariableIdentifier();
key = '$' + label.name;
if not (key in self.state['labelSet']):
self.throwError(Messages.UnknownLabel, label.name);
self.consumeSemicolon();
if (label == null and not (self.state['inIteration'] or self.state['inSwitch'])):
self.throwError(Messages.IllegalBreak)
return node.finishBreakStatement(label);
# 12.9 The return statement
def parseReturnStatement(self, node):
argument = null;
self.expectKeyword('return');
if (not self.state['inFunctionBody']):
self.tolerateError(Messages.IllegalReturn);
# 'return' followed by a space and an identifier is very common.
if (ord(self.source[self.lastIndex]) == 0x20):
if (isIdentifierStart(self.source[self.lastIndex + 1])):
argument = self.parseExpression();
self.consumeSemicolon();
return node.finishReturnStatement(argument)
if (self.hasLineTerminator):
# HACK
return node.finishReturnStatement(null)
if (not self.match(';')):
if (not self.match('}') and self.lookahead['type'] != Token.EOF):
argument = self.parseExpression();
self.consumeSemicolon();
return node.finishReturnStatement(argument);
# 12.10 The with statement
def parseWithStatement(self, node):
if (self.strict):
self.tolerateError(Messages.StrictModeWith)
self.expectKeyword('with');
self.expect('(');
obj = self.parseExpression();
self.expect(')');
body = self.parseStatement();
return node.finishWithStatement(obj, body);
# 12.10 The swith statement
def parseSwitchCase(self):
consequent = []
node = Node();
if (self.matchKeyword('default')):
self.lex();
test = null;
else:
self.expectKeyword('case');
test = self.parseExpression();
self.expect(':');
while (self.startIndex < self.length):
if (self.match('}') or self.matchKeyword('default') or self.matchKeyword('case')):
break
statement = self.parseStatementListItem()
consequent.append(statement)
return node.finishSwitchCase(test, consequent)
def parseSwitchStatement(self, node):
self.expectKeyword('switch');
self.expect('(');
discriminant = self.parseExpression();
self.expect(')');
self.expect('{');
cases = [];
if (self.match('}')):
self.lex();
return node.finishSwitchStatement(discriminant, cases);
oldInSwitch = self.state['inSwitch'];
self.state['inSwitch'] = true;
defaultFound = false;
while (self.startIndex < self.length):
if (self.match('}')):
break;
clause = self.parseSwitchCase();
if (clause.test == null):
if (defaultFound):
self.throwError(Messages.MultipleDefaultsInSwitch);
defaultFound = true;
cases.append(clause);
self.state['inSwitch'] = oldInSwitch;
self.expect('}');
return node.finishSwitchStatement(discriminant, cases);
# 12.13 The throw statement
def parseThrowStatement(self, node):
self.expectKeyword('throw');
if (self.hasLineTerminator):
self.throwError(Messages.NewlineAfterThrow);
argument = self.parseExpression();
self.consumeSemicolon();
return node.finishThrowStatement(argument);
# 12.14 The try statement
def parseCatchClause(self):
node = Node();
self.expectKeyword('catch');
self.expect('(');
if (self.match(')')):
self.throwUnexpectedToken(self.lookahead);
param = self.parsePattern();
# 12.14.1
if (self.strict and isRestrictedWord(param.name)):
self.tolerateError(Messages.StrictCatchVariable);
self.expect(')');
body = self.parseBlock();
return node.finishCatchClause(param, body);
def parseTryStatement(self, node):
handler = null
finalizer = null;
self.expectKeyword('try');
block = self.parseBlock();
if (self.matchKeyword('catch')):
handler = self.parseCatchClause()
if (self.matchKeyword('finally')):
self.lex();
finalizer = self.parseBlock();
if (not handler and not finalizer):
self.throwError(Messages.NoCatchOrFinally)
return node.finishTryStatement(block, handler, finalizer)
# 12.15 The debugger statement
def parseDebuggerStatement(self, node):
self.expectKeyword('debugger');
self.consumeSemicolon();
return node.finishDebuggerStatement();
# 12 Statements
def parseStatement(self):
typ = self.lookahead['type']
if (typ == Token.EOF):
self.throwUnexpectedToken(self.lookahead)
if (typ == Token.Punctuator and self.lookahead['value'] == '{'):
return self.parseBlock()
self.isAssignmentTarget = self.isBindingElement = true;
node = Node();
node.comments = self.lookahead.get('comments', [])
val = self.lookahead['value']
if (typ == Token.Punctuator):
if val == ';':
return self.parseEmptyStatement(node);
elif val == '(':
return self.parseExpressionStatement(node);
elif (typ == Token.Keyword):
if val == 'break':
return self.parseBreakStatement(node);
elif val == 'continue':
return self.parseContinueStatement(node);
elif val == 'debugger':
return self.parseDebuggerStatement(node);
elif val == 'do':
return self.parseDoWhileStatement(node);
elif val == 'for':
return self.parseForStatement(node);
elif val == 'function':
return self.parseFunctionDeclaration(node);
elif val == 'if':
return self.parseIfStatement(node);
elif val == 'return':
return self.parseReturnStatement(node);
elif val == 'switch':
return self.parseSwitchStatement(node);
elif val == 'throw':
return self.parseThrowStatement(node);
elif val == 'try':
return self.parseTryStatement(node);
elif val == 'var':
return self.parseVariableStatement(node);
elif val == 'while':
return self.parseWhileStatement(node);
elif val == 'with':
return self.parseWithStatement(node);
expr = self.parseExpression();
# 12.12 Labelled Statements
if ((expr.type == Syntax.Identifier) and self.match(':')):
self.lex();
key = '$' + expr.name
if key in self.state['labelSet']:
self.throwError(Messages.Redeclaration, 'Label', expr.name);
self.state['labelSet'][key] = true
labeledBody = self.parseStatement()
del self.state['labelSet'][key]
return node.finishLabeledStatement(expr, labeledBody)
self.consumeSemicolon();
return node.finishExpressionStatement(expr)
# 13 Function Definition
def parseFunctionSourceElements(self):
body = []
node = Node()
firstRestricted = None
self.expect('{')
while (self.startIndex < self.length):
if (self.lookahead['type'] != Token.StringLiteral):
break
token = self.lookahead;
statement = self.parseStatementListItem()
body.append(statement)
if (statement.expression.type != Syntax.Literal):
# this is not directive
break
directive = self.source[token['start'] + 1: token['end'] - 1]
if (directive == 'use strict'):
self.strict = true;
if (firstRestricted):
self.tolerateUnexpectedToken(firstRestricted, Messages.StrictOctalLiteral);
else:
if (not firstRestricted and token.get('octal')):
firstRestricted = token;
oldLabelSet = self.state['labelSet']
oldInIteration = self.state['inIteration']
oldInSwitch = self.state['inSwitch']
oldInFunctionBody = self.state['inFunctionBody']
oldParenthesisCount = self.state['parenthesizedCount']
self.state['labelSet'] = {}
self.state['inIteration'] = false
self.state['inSwitch'] = false
self.state['inFunctionBody'] = true
self.state['parenthesizedCount'] = 0
while (self.startIndex < self.length):
if (self.match('}')):
break
body.append(self.parseStatementListItem())
self.expect('}')
self.state['labelSet'] = oldLabelSet;
self.state['inIteration'] = oldInIteration;
self.state['inSwitch'] = oldInSwitch;
self.state['inFunctionBody'] = oldInFunctionBody;
self.state['parenthesizedCount'] = oldParenthesisCount;
return node.finishBlockStatement(body)
def validateParam(self, options, param, name):
key = '$' + name
if (self.strict):
if (isRestrictedWord(name)):
options['stricted'] = param;
options['message'] = Messages.StrictParamName
if key in options['paramSet']:
options['stricted'] = param;
options['message'] = Messages.StrictParamDupe;
elif (not options['firstRestricted']):
if (isRestrictedWord(name)):
options['firstRestricted'] = param;
options['message'] = Messages.StrictParamName;
elif (isStrictModeReservedWord(name)):
options['firstRestricted'] = param;
options['message'] = Messages.StrictReservedWord;
elif key in options['paramSet']:
options['firstRestricted'] = param
options['message'] = Messages.StrictParamDupe;
options['paramSet'][key] = true
def parseParam(self, options):
token = self.lookahead
de = None
if (token['value'] == '...'):
param = self.parseRestElement();
self.validateParam(options, param.argument, param.argument.name);
options['params'].append(param);
options['defaults'].append(null);
return false
param = self.parsePatternWithDefault();
self.validateParam(options, token, token['value']);
if (param.type == Syntax.AssignmentPattern):
de = param.right;
param = param.left;
options['defaultCount'] += 1
options['params'].append(param);
options['defaults'].append(de)
return not self.match(')')
def parseParams(self, firstRestricted):
options = {
'params': [],
'defaultCount': 0,
'defaults': [],
'firstRestricted': firstRestricted}
self.expect('(');
if (not self.match(')')):
options['paramSet'] = {};
while (self.startIndex < self.length):
if (not self.parseParam(options)):
break
self.expect(',');
self.expect(')');
if (options['defaultCount'] == 0):
options['defaults'] = [];
return {
'params': options['params'],
'defaults': options['defaults'],
'stricted': options.get('stricted'),
'firstRestricted': options.get('firstRestricted'),
'message': options.get('message')}
def parseFunctionDeclaration(self, node, identifierIsOptional=None):
node.comments = self.lookahead.get('comments', [])
d = null
params = []
defaults = []
message = None
firstRestricted = None
self.expectKeyword('function');
if (identifierIsOptional or not self.match('(')):
token = self.lookahead;
d = self.parseVariableIdentifier();
if (self.strict):
if (isRestrictedWord(token['value'])):
self.tolerateUnexpectedToken(token, Messages.StrictFunctionName);
else:
if (isRestrictedWord(token['value'])):
firstRestricted = token;
message = Messages.StrictFunctionName;
elif (isStrictModeReservedWord(token['value'])):
firstRestricted = token;
message = Messages.StrictReservedWord;
tmp = self.parseParams(firstRestricted);
params = tmp['params']
defaults = tmp['defaults']
stricted = tmp.get('stricted')
firstRestricted = tmp['firstRestricted']
if (tmp.get('message')):
message = tmp['message'];
previousStrict = self.strict;
body = self.parseFunctionSourceElements();
if (self.strict and firstRestricted):
self.throwUnexpectedToken(firstRestricted, message);
if (self.strict and stricted):
self.tolerateUnexpectedToken(stricted, message);
self.strict = previousStrict;
return node.finishFunctionDeclaration(d, params, defaults, body);
def parseFunctionExpression(self):
id = null
params = []
defaults = []
node = Node();
node.comments = self.lookahead.get('comments', [])
firstRestricted = None
message = None
self.expectKeyword('function');
if (not self.match('(')):
token = self.lookahead;
id = self.parseVariableIdentifier();
if (self.strict):
if (isRestrictedWord(token['value'])):
self.tolerateUnexpectedToken(token, Messages.StrictFunctionName);
else:
if (isRestrictedWord(token['value'])):
firstRestricted = token;
message = Messages.StrictFunctionName;
elif (isStrictModeReservedWord(token['value'])):
firstRestricted = token;
message = Messages.StrictReservedWord;
tmp = self.parseParams(firstRestricted);
params = tmp['params']
defaults = tmp['defaults']
stricted = tmp.get('stricted')
firstRestricted = tmp['firstRestricted']
if (tmp.get('message')):
message = tmp['message']
previousStrict = self.strict;
body = self.parseFunctionSourceElements();
if (self.strict and firstRestricted):
self.throwUnexpectedToken(firstRestricted, message);
if (self.strict and stricted):
self.tolerateUnexpectedToken(stricted, message);
self.strict = previousStrict;
return node.finishFunctionExpression(id, params, defaults, body);
# todo Translate parse class functions!
def parseClassExpression(self):
raise NotImplementedError()
def parseClassDeclaration(self):
raise NotImplementedError()
# 14 Program
def parseScriptBody(self):
body = []
firstRestricted = None
while (self.startIndex < self.length):
token = self.lookahead;
if (token['type'] != Token.StringLiteral):
break
statement = self.parseStatementListItem();
body.append(statement);
if (statement.expression.type != Syntax.Literal):
# this is not directive
break
directive = self.source[token['start'] + 1: token['end'] - 1]
if (directive == 'use strict'):
self.strict = true;
if (firstRestricted):
self.tolerateUnexpectedToken(firstRestricted, Messages.StrictOctalLiteral)
else:
if (not firstRestricted and token.get('octal')):
firstRestricted = token;
while (self.startIndex < self.length):
statement = self.parseStatementListItem();
# istanbul ignore if
if (statement is None):
break
body.append(statement);
return body;
def parseProgram(self):
self.peek()
node = Node()
body = self.parseScriptBody()
return node.finishProgram(body)
# DONE!!!
def parse(self, code, options={}):
if options:
raise NotImplementedError('Options not implemented! You can only use default settings.')
self.clean()
self.source = unicode(code) + ' \n ; //END' # I have to add it in order not to check for EOF every time
self.index = 0
self.lineNumber = 1 if len(self.source) > 0 else 0
self.lineStart = 0
self.startIndex = self.index
self.startLineNumber = self.lineNumber;
self.startLineStart = self.lineStart;
self.length = len(self.source)
self.lookahead = null;
self.state = {
'allowIn': true,
'labelSet': {},
'inFunctionBody': false,
'inIteration': false,
'inSwitch': false,
'lastCommentStart': -1,
'curlyStack': [],
'parenthesizedCount': None}
self.sourceType = 'script';
self.strict = false;
program = self.parseProgram();
return node_to_dict(program)
def parse(javascript_code):
"""Returns syntax tree of javascript_code.
Same as PyJsParser().parse For your convenience :) """
p = PyJsParser()
return p.parse(javascript_code)
if __name__ == '__main__':
import time
test_path = None
if test_path:
f = open(test_path, 'rb')
x = f.read()
f.close()
else:
x = 'var $ = "Hello!"'
p = PyJsParser()
t = time.time()
res = p.parse(x)
dt = time.time() - t + 0.000000001
if test_path:
print(len(res))
else:
pprint(res)
print()
print('Parsed everyting in', round(dt, 5), 'seconds.')
print('Thats %d characters per second' % int(len(x) / dt))
| alfa-jor/addon | plugin.video.alfa/lib/pyjsparser/parser.py | Python | gpl-3.0 | 107,162 | [
"VisIt"
] | 571df1243611750985ecadba2cfda047ff5a43f176422ecb95f946b9ceb207be |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.