text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env vpython
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities for checking for disallowed usage of non-Blink declarations.
The scanner assumes that usage of non-Blink code is always namespace qualified.
Identifiers in the global namespace are always ignored. For convenience, the
script can be run in standalone mode to check for existing violations.
Example command:
$ git ls-files third_party/blink \
| python third_party/blink/tools/blinkpy/presubmit/audit_non_blink_usage.py
"""
from __future__ import print_function
import os
import re
import sys
_DISALLOW_NON_BLINK_MOJOM = (
# network::mojom::Foo is allowed to use as non-blink mojom type.
'(|::)(?!network::)(\w+::)?mojom::(?!blink).+',
'Using non-blink mojom types, consider using "::mojom::blink::Foo" instead'
'of "::mojom::Foo" unless you have clear reasons not to do so.',
'Warning')
_CONFIG = [
{
'paths': ['third_party/blink/renderer/'],
'allowed': [
# TODO(dcheng): Should these be in a more specific config?
'gfx::ColorSpace',
'gfx::CubicBezier',
'gfx::ICCProfile',
'gfx::RadToDeg',
# absl optional constructs.
'absl::optional',
'absl::in_place',
# //base constructs that are allowed everywhere
'base::AdoptRef',
'base::ApplyMetadataToPastSamples',
'base::AutoReset',
'base::Contains',
'base::CreateSequencedTaskRunner',
'base::Days',
'base::DefaultTickClock',
'base::ElapsedTimer',
'base::JobDelegate',
'base::JobHandle',
'base::PostJob',
'base::File',
'base::FileErrorOr',
'base::FilePath',
'base::GetUniqueIdForProcess',
'base::GUID',
'base::HexStringToUInt64',
'base::Hours',
"base::i18n::TextDirection",
"base::i18n::ToChar16Ptr",
"base::i18n::ToUCharPtr",
'base::Location',
'base::MakeRefCounted',
'base::Microseconds',
'base::Milliseconds',
'base::Minutes',
'base::Nanoseconds',
'base::OptionalFromPtr',
'base::OptionalOrNullptr',
'base::PlatformThread',
'base::PlatformThreadId',
'base::RefCountedData',
'base::RunLoop',
'base::ReadOnlySharedMemoryMapping',
'base::ReadOnlySharedMemoryRegion',
'base::RemoveChars',
'base::RepeatingTimer',
'base::Seconds',
'base::SequencedTaskRunner',
'base::SingleThreadTaskRunner',
'base::ScopedAllowBlocking',
'base::ScopedFD',
'base::ScopedClosureRunner',
'base::StringPiece',
'base::SupportsWeakPtr',
'base::SysInfo',
'base::ThreadChecker',
'base::ThreadTicks',
'base::TickClock',
'base::Time',
'base::TimeDelta',
'base::TimeTicks',
'base::trace_event::.*',
'base::Token',
'base::UnguessableToken',
'base::UnguessableTokenHash',
'base::UnsafeSharedMemoryRegion',
'base::WeakPtr',
'base::WeakPtrFactory',
'base::WrapRefCounted',
'base::WritableSharedMemoryMapping',
'base::as_bytes',
'base::in_place',
'absl::make_optional',
'base::make_span',
'absl::nullopt',
'absl::nullopt_t',
'base::ranges::.+',
'base::sequence_manager::TaskTimeObserver',
'base::size',
'base::span',
'logging::GetVlogLevel',
'logging::SetLogItems',
'base::PassKey',
# //base/allocator/partition_allocator/partition_alloc_constants.h
'base::kAlignment',
# //base/task/bind_post_task.h
'base::BindPostTask',
# //base/bits.h
'base::bits::.+',
# //base/observer_list.h.
'base::ObserverList',
'base::CheckedObserver',
# //base/callback_helpers.h.
'base::DoNothing',
'base::SplitOnceCallback',
# //base/callback.h is allowed, but you need to use WTF::Bind or
# WTF::BindRepeating to create callbacks in Blink.
'base::BarrierClosure',
'base::NullCallback',
'base::OnceCallback',
'base::OnceClosure',
'base::RepeatingCallback',
'base::RepeatingClosure',
# //base/cancelable_callback.h
'base::CancelableOnceCallback',
'base::CancelableOnceClosure',
'base::CancelableRepeatingCallback',
'base::CancelableRepeatingClosure',
# //base/mac/scoped_nsobject.h
'base::scoped_nsobject',
# //base/memory/scoped_policy.h
'base::scoped_policy::RETAIN',
# //base/memory/ptr_util.h.
'base::WrapUnique',
# //base/allocator/partition_allocator/oom_callback.h.
'base::SetPartitionAllocOomCallback',
# //base/containers/adapters.h
'base::Reversed',
# //base/metrics/histogram_functions.h
'base::UmaHistogram.+',
# //base/metrics/histogram.h
'base::Histogram',
'base::HistogramBase',
'base::LinearHistogram',
# //base/metrics/field_trial_params.h.
'base::GetFieldTrialParamValueByFeature',
'base::GetFieldTrialParamByFeatureAsBool',
'base::GetFieldTrialParamByFeatureAsDouble',
'base::GetFieldTrialParamByFeatureAsInt',
# //base/numerics/safe_conversions.h.
'base::as_signed',
'base::as_unsigned',
'base::checked_cast',
'base::saturated_cast',
'base::strict_cast',
'base::ClampCeil',
'base::ClampFloor',
'base::IsTypeInRangeForNumericType',
'base::IsValueInRangeForNumericType',
'base::IsValueNegative',
'base::MakeStrictNum',
'base::ClampRound',
'base::SafeUnsignedAbs',
'base::StrictNumeric',
# //base/strings/char_traits.h.
'base::CharTraits',
# //base/synchronization/waitable_event.h.
'base::WaitableEvent',
# //base/numerics/checked_math.h.
'base::CheckedNumeric',
'base::IsValidForType',
'base::ValueOrDieForType',
'base::ValueOrDefaultForType',
'base::MakeCheckedNum',
'base::CheckMax',
'base::CheckMin',
'base::CheckAdd',
'base::CheckSub',
'base::CheckMul',
'base::CheckDiv',
'base::CheckMod',
'base::CheckLsh',
'base::CheckRsh',
'base::CheckAnd',
'base::CheckOr',
'base::CheckXor',
# //base/numerics/clamped_math.h.
'base::ClampAdd',
'base::ClampedNumeric',
'base::ClampMax',
'base::ClampSub',
'base::MakeClampedNum',
# //base/cxx17_backports.h.
"base::clamp",
# //base/strings/strcat.h.
'base::StrCat',
# //base/template_util.h.
'base::void_t',
'base::remove_cvref_t',
# Debugging helpers from //base/debug are allowed everywhere.
'base::debug::.+',
# Base atomic utilities
'base::AtomicFlag',
'base::AtomicSequenceNumber',
# Task traits
'base::TaskTraits',
'base::MayBlock',
'base::TaskPriority',
'base::TaskShutdownBehavior',
'base::WithBaseSyncPrimitives',
'base::ThreadPolicy',
'base::ThreadPool',
# Byte order
'base::ByteSwap',
'base::ReadBigEndian',
'base::NetToHost(16|32|64)',
'base::HostToNet(16|32|64)',
# (Cryptographic) random number generation
'base::RandUint64',
'base::RandInt',
'base::RandGenerator',
'base::RandDouble',
'base::RandBytes',
'base::RandBytesAsString',
# Feature list checking.
'base::Feature.*',
'base::FEATURE_.+',
"base::GetFieldTrial.*",
'features::.+',
# PartitionAlloc
'base::PartitionFree',
'base::PartitionAllocZeroFill',
'base::PartitionAllocReturnNull',
# For TaskObserver.
'base::PendingTask',
# Time
'base::Clock',
'base::DefaultClock',
'base::DefaultTickClock',
'base::TestMockTimeTaskRunner',
'base::TickClock',
# cc painting types.
'cc::PaintCanvas',
'cc::PaintFlags',
'cc::PaintImage',
'cc::PaintImageBuilder',
'cc::PaintRecord',
'cc::RecordPaintCanvas',
'cc::PaintShader',
'cc::PaintWorkletInput',
'cc::NodeId',
'cc::NodeInfo',
'cc::UsePaintCache',
# Chromium geometry types.
'gfx::Insets',
'gfx::InsetsF',
'gfx::Point',
'gfx::PointF',
'gfx::Point3F',
'gfx::QuadF',
'gfx::Quaternion',
'gfx::Rect',
'gfx::RectF',
'gfx::RRectF',
'gfx::Size',
'gfx::SizeF',
'gfx::Transform',
'gfx::Vector2d',
'gfx::Vector2dF',
# Chromium geometry operations.
'cc::MathUtil',
'gfx::BoundingRect',
'gfx::ComputeApproximateMaxScale',
'gfx::Determinant',
'gfx::IntersectRects',
'gfx::MapRect',
'gfx::PointAtOffsetFromOrigin',
'gfx::PointFToSkPoint',
'gfx::PointToSkIPoint',
'gfx::MapRect',
'gfx::MaximumCoveredRect',
'gfx::RectFToSkRect',
'gfx::RectToSkIRect',
'gfx::RectToSkRect',
'gfx::ScalePoint',
'gfx::ScaleToCeiledSize',
'gfx::ScaleToEnclosingRect',
'gfx::ScaleToFlooredSize',
'gfx::ScaleToRoundedRect',
'gfx::ScaleToRoundedSize',
'gfx::ScaleSize',
'gfx::ScalePoint',
'gfx::ScaleToRoundedPoint',
'gfx::ScaleVector2d',
'gfx::SizeFToSkSize',
'gfx::SizeToSkISize',
'gfx::SkIPointToPoint',
'gfx::SkIRectToRect',
'gfx::SkISizeToSize',
'gfx::SkPointToPointF',
'gfx::SkRectToRectF',
'gfx::SkSizeToSizeF',
'gfx::SubtractRects',
'gfx::ToCeiledPoint',
'gfx::ToCeiledSize',
'gfx::ToCeiledVector2d',
'gfx::ToEnclosedRect',
'gfx::ToEnclosingRect',
'gfx::ToFlooredPoint',
'gfx::ToFlooredSize',
'gfx::ToFlooredVector2d',
'gfx::ToRoundedPoint',
'gfx::ToRoundedRect',
'gfx::ToRoundedSize',
'gfx::ToRoundedVector2d',
'gfx::TransposePoint',
'gfx::TransposeRect',
'gfx::TransposeSize',
'gfx::TryComputeTransform2dScaleComponents',
'gfx::UnionRects',
# Range type.
'gfx::Range',
# Wrapper of SkRegion used in Chromium.
'cc::Region',
# A geometric set of TouchActions associated with areas, and only
# depends on the geometry types above.
'cc::TouchActionRegion',
# Selection bounds.
'cc::LayerSelection',
'cc::LayerSelectionBound',
'gfx::SelectionBound',
# cc::Layers.
'cc::Layer',
'cc::LayerClient',
'cc::LayerTreeDebugState',
'cc::LayerTreeHost',
'cc::PictureLayer',
'cc::SurfaceLayer',
# cc::Layer helper data structs.
'cc::BrowserControlsParams',
'cc::ElementId',
'cc::LayerPositionConstraint',
'cc::OverscrollBehavior',
'cc::Scrollbar',
'cc::ScrollbarLayerBase',
'cc::ScrollbarOrientation',
'cc::ScrollbarPart',
'cc::StickyPositionConstraint',
'cc::StickyPositionNodeData',
'cc::ViewportLayers',
# cc::Layer helper enums.
'cc::HORIZONTAL',
'cc::VERTICAL',
'cc::THUMB',
'cc::TRACK_BUTTONS_TICKMARKS',
'cc::BrowserControlsState',
'cc::EventListenerClass',
'cc::EventListenerProperties',
# Animation
'cc::AnimationHost',
# UMA Enums
'cc::PaintHoldingCommitTrigger',
'cc::PaintHoldingReason',
# Scrolling
'cc::kManipulationInfoPinchZoom',
'cc::kManipulationInfoPrecisionTouchPad',
'cc::kManipulationInfoTouch',
'cc::kManipulationInfoWheel',
'cc::kManipulationInfoScrollbar',
'cc::kManipulationInfoNone',
'cc::kPixelsPerLineStep',
'cc::kMinFractionToStepWhenPaging',
'cc::kPercentDeltaForDirectionalScroll',
'cc::MainThreadScrollingReason',
'cc::ManipulationInfo',
'cc::ScrollSnapAlign',
'cc::ScrollSnapType',
'cc::ScrollOffsetAnimationCurve',
'cc::ScrollStateData',
'cc::ScrollUtils',
'cc::SnapAlignment',
'cc::SnapAreaData',
'cc::SnapAxis',
'cc::SnapContainerData',
'cc::SnapFlingClient',
'cc::SnapFlingController',
'cc::SnapSelectionStrategy',
'cc::SnapStrictness',
'cc::TargetSnapAreaElementIds',
'ui::ScrollGranularity',
# Document transitions
'cc::DocumentTransitionRequest',
'cc::SharedElementLayer',
'viz::SharedElementResourceId',
# base/types/strong_alias.h
'base::StrongAlias',
# Common display structs across display <-> Blink.
'display::ScreenInfo',
'display::ScreenInfos',
# Standalone utility libraries that only depend on //base
'skia::.+',
'url::.+',
# Power scheduling instrumentation, which only depends on //base
"power_scheduler::.+",
# Nested namespaces under the blink namespace
'bindings::.+',
'canvas_heuristic_parameters::.+',
'compositor_target_property::.+',
'cors::.+',
'css_parsing_utils::.+',
'cssvalue::.+',
'encoding::.+',
'encoding_enum::.+',
'event_handling_util::.+',
'event_util::.+',
'file_error::.+',
'geometry_util::.+',
'inspector_\\w+_event::.+',
'inspector_async_task::.+',
'inspector_set_layer_tree_id::.+',
'inspector_tracing_started_in_frame::.+',
'keywords::.+',
'layered_api::.+',
'layout_invalidation_reason::.+',
'media_constraints_impl::.+',
'media_element_parser_helpers::.+',
'mobile_metrics_test_helpers::.+',
'file_system_access_error::.+',
'network_utils::.+',
'origin_trials::.+',
'paint_filter_builder::.+',
'root_scroller_util::.+',
'scheduler::.+',
'scroll_customization::.+',
'scroll_timeline_util::.+',
'style_change_extra_data::.+',
'style_change_reason::.+',
'svg_path_parser::.+',
'touch_action_util::.+',
'trace_event::.+',
'unicode::.+',
'vector_math::.+',
'web_core_test_support::.+',
'worker_pool::.+',
'xpath::.+',
'[a-z_]+_names::.+',
# Third-party libraries that don't depend on non-Blink Chrome code
# are OK.
'icu::.+',
'perfetto::.+', # tracing
'testing::.+', # googlemock / googletest
'v8::.+',
'v8_inspector::.+',
'inspector_protocol_encoding::.+',
# Inspector instrumentation and protocol
'probe::.+',
'protocol::.+',
# Blink code shouldn't need to be qualified with the Blink namespace,
# but there are exceptions.
'blink::.+',
# Assume that identifiers where the first qualifier is internal are
# nested in the blink namespace.
'internal::.+',
# HTTP structured headers
'net::structured_headers::.+',
# CanonicalCookie and related headers
'net::CanonicalCookie',
'net::CookiePriority',
'net::CookieSameSite',
'net::CookieSourceScheme',
# HTTP status codes
'net::HTTP_.+',
# For ConnectionInfo enumeration
'net::HttpResponseInfo',
# Network service.
'network::.+',
# Used in network service types.
'net::SiteForCookies',
# Some test helpers live in the blink::test namespace.
'test::.+',
# Some test helpers that live in the blink::frame_test_helpers
# namespace.
'frame_test_helpers::.+',
# Blink uses Mojo, so it needs mojo::Receiver, mojo::Remote, et
# cetera, as well as generated Mojo bindings.
# Note that the Mojo callback helpers are explicitly forbidden:
# Blink already has a signal for contexts being destroyed, and
# other types of failures should be explicitly signalled.
'mojo::(?!WrapCallback).+',
'mojo_base::BigBuffer.*',
'(?:.+::)?mojom::.+',
'service_manager::InterfaceProvider',
# STL containers such as std::string and std::vector are discouraged
# but still needed for interop with WebKit/common. Note that other
# STL types such as std::unique_ptr are encouraged.
'std::.+',
# UI Cursor
'ui::Cursor',
# UI Pointer and Hover
'ui::PointerType',
'ui::POINTER_TYPE_.*',
'ui::HoverType',
'ui::HOVER_TYPE_.*',
# UI Keyconverter
'ui::DomCode',
'ui::DomKey',
'ui::KeycodeConverter',
# Accessibility base types and the non-Blink enums they
# depend on.
'ui::AXActionData',
'ui::AXEvent',
'ui::AXEventIntent',
'ui::AXMode',
'ui::AXNodeData',
'ui::AXTreeID',
'ui::kAXModeBasic',
'ui::kAXModeComplete',
'ax::mojom::BoolAttribute',
'ax::mojom::HasPopup',
'ax::mojom::State',
'ax::mojom::Restriction',
# Accessibility helper functions - mostly used in Blink for
# serialization. Please keep alphabetized.
'ui::CanHaveInlineTextBoxChildren',
'ui::IsCellOrTableHeader',
'ui::IsClickable',
'ui::IsComboBox',
'ui::IsContainerWithSelectableChildren',
'ui::IsDialog',
'ui::IsHeading',
'ui::IsLandmark',
'ui::IsPlatformDocument',
'ui::IsPresentational',
'ui::IsSelectRequiredOrImplicit',
'ui::IsStructure',
'ui::IsTableLike',
'ui::IsTableRow',
'ui::IsTableHeader',
# Blink uses UKM for logging e.g. always-on leak detection (crbug/757374)
'ukm::.+',
# Permit using crash keys inside Blink without jumping through
# hoops.
'crash_reporter::.*CrashKey.*',
# Useful for platform-specific code.
'base::mac::(CFToNSCast|NSToCFCast)',
'base::mac::Is(AtMost|AtLeast)?OS.+',
'base::(scoped_nsobject|ScopedCFTypeRef)',
# absl::variant and getters:
'absl::get',
'absl::get_if',
'absl::holds_alternative',
'absl::variant',
'absl::visit',
],
'disallowed': [
('base::Bind(|Once|Repeating)',
'Use WTF::Bind or WTF::BindRepeating.'),
('std::(deque|map|multimap|set|vector|unordered_set|unordered_map)',
'Use WTF containers like WTF::Deque, WTF::HashMap, WTF::HashSet or WTF::Vector instead of the banned std containers. '
'However, it is fine to use std containers at the boundary layer between Blink and Chromium. '
'If you are in this case, you can use --bypass-hooks option to avoid the presubmit check when uploading your CL.'
),
_DISALLOW_NON_BLINK_MOJOM,
],
},
{
'paths': ['third_party/blink/renderer/bindings/'],
'allowed': ['gin::.+'],
},
{
'paths':
['third_party/blink/renderer/bindings/core/v8/script_streamer.cc'],
'allowed': [
# For the script streaming to be able to block when reading from a
# mojo datapipe.
'base::ScopedAllowBaseSyncPrimitives',
'base::ScopedBlockingCall',
'base::BlockingType',
],
},
{
'paths': [
'third_party/blink/renderer/bindings/core/v8/v8_gc_for_context_dispose.cc'
],
'allowed': [
# For memory reduction histogram.
'base::ProcessMetrics',
],
},
{
'paths':
['third_party/blink/renderer/controller/oom_intervention_impl.cc'],
'allowed': [
'base::BindOnce',
],
},
{
'paths': [
'third_party/blink/renderer/controller/user_level_memory_pressure_signal_generator.cc'
],
'allowed': [
'base::MemoryPressureListener',
],
},
{
'paths': ['third_party/blink/renderer/core/animation'],
'allowed': [
'[a-z_]+_functions::.+',
],
},
{
'paths': [
'third_party/blink/renderer/core/animation_frame',
'third_party/blink/renderer/core/offscreencanvas',
'third_party/blink/renderer/core/html/canvas'
],
'allowed': [
'viz::BeginFrameArgs',
],
},
{
'paths': ['third_party/blink/renderer/core/offscreencanvas'],
'allowed': [
# Flags to be used to set up sharedImage
'gpu::SHARED_IMAGE_USAGE_DISPLAY',
'gpu::SHARED_IMAGE_USAGE_SCANOUT',
],
},
{
'paths': [
'third_party/blink/renderer/core/html/canvas/canvas_rendering_context_host.cc'
],
'allowed': [
'gpu::SHARED_IMAGE_USAGE_DISPLAY',
'gpu::SHARED_IMAGE_USAGE_SCANOUT',
'gpu::SHARED_IMAGE_USAGE_CONCURRENT_READ_WRITE',
],
},
{
'paths': [
'third_party/blink/renderer/core/exported',
'third_party/blink/renderer/core/frame',
],
'allowed': [
'cc::ActiveFrameSequenceTrackers',
'cc::ApplyViewportChangesArgs',
'cc::LayerTreeSettings',
'cc::PaintBenchmarkResult',
'cc::RenderFrameMetadata',
'cc::TaskGraphRunner',
'cc::ContentLayerClient',
'cc::DeadlinePolicy',
'cc::DisplayItemList',
'cc::DrawColorOp',
'cc::DrawImageOp',
'cc::SaveOp',
'cc::ScaleOp',
'cc::RestoreOp',
'cc::TranslateOp',
'gfx::DisplayColorSpaces',
'gfx::FontRenderParams',
'ui::ImeTextSpan',
'viz::FrameSinkId',
'viz::LocalSurfaceId',
'viz::SurfaceId',
],
},
{
'paths': ['third_party/blink/renderer/core/clipboard'],
'allowed': ['net::EscapeForHTML'],
},
{
'paths': ['third_party/blink/renderer/core/css'],
'allowed': [
# Internal implementation details for CSS.
'css_property_parser_helpers::.+',
'detail::.+',
],
},
{
'paths': ['third_party/blink/renderer/core/css/media_values.cc'],
'allowed': [
'color_space_utilities::GetColorSpaceGamut',
],
},
{
'paths': ['third_party/blink/renderer/core/editing/ime'],
'allowed': [
'ui::ImeTextSpan',
'ui::TextInputAction',
],
},
{
'paths': [
'third_party/blink/renderer/core/editing/commands/undo_stack.cc',
'third_party/blink/renderer/core/editing/commands/undo_stack.h'
],
'allowed': [
'base::MemoryPressureListener',
],
},
{
'paths': [
'third_party/blink/renderer/core/fetch/data_consumer_handle_test_util.cc'
],
'allowed': [
# The existing code already contains gin::IsolateHolder.
'gin::IsolateHolder',
],
},
{
'paths': ['third_party/blink/renderer/core/frame/visual_viewport.cc'],
'allowed': [
'cc::SolidColorScrollbarLayer',
],
},
{
'paths':
['third_party/blink/renderer/core/frame/web_frame_widget_impl.cc'],
'allowed': [
'cc::CompositorCommitData',
'cc::InputHandlerScrollResult',
'cc::SwapPromise',
'viz::CompositorFrameMetadata',
],
},
{
'paths':
['third_party/blink/renderer/core/frame/web_frame_widget_impl.h'],
'allowed': [
'cc::CompositorCommitData',
],
},
{
'paths':
['third_party/blink/renderer/core/frame/web_local_frame_impl.cc'],
'allowed': [
'ui::AXTreeID',
'ui::AXTreeIDUnknown',
],
},
{
'paths':
['third_party/blink/renderer/core/frame/web_local_frame_impl.h'],
'allowed': [
'ui::AXTreeID',
],
},
{
'paths': [
'third_party/blink/renderer/core/fileapi/file_reader_loader.cc',
'third_party/blink/renderer/modules/file_system_access/file_system_underlying_sink.cc'
],
'allowed': [
'net::ERR_.+',
'net::OK',
],
},
{
'paths': ['third_party/blink/renderer/core/html/forms'],
'allowed': [
'ui::TextInputType',
],
},
{
'paths': [
'third_party/blink/renderer/core/loader/alternate_signed_exchange_resource_info.cc'
],
'allowed': [
# Used by WebPackageRequestMatcher in //third_party/blink/common.
'net::HttpRequestHeaders',
],
},
{
'paths': ['third_party/blink/renderer/core/paint'],
'allowed': [
# cc painting types.
'cc::ContentLayerClient',
'cc::DisplayItemList',
'cc::DrawRecordOp',
# blink paint tree debugging namespace
'paint_property_tree_printer::UpdateDebugNames',
],
},
{
'paths': ['third_party/blink/renderer/core/fragment_directive'],
'allowed': [
'cc::ScrollbarLayerBase',
'shared_highlighting::.+',
],
},
{
'paths': ['third_party/blink/renderer/core/page'],
'allowed': [
'touch_adjustment::.+',
'viz::FrameSinkId',
],
},
{
'paths': ['third_party/blink/renderer/core/style/computed_style.h'],
'allowed': [
'css_longhand::.+',
],
},
{
'paths': [
'third_party/blink/renderer/core/inspector/inspector_memory_agent.cc'
],
'allowed': [
'base::ModuleCache',
'base::PoissonAllocationSampler',
'base::SamplingHeapProfiler',
],
},
{
'paths': [
'third_party/blink/renderer/core/inspector/inspector_overlay_agent.cc'
],
'allowed': [
# cc painting types.
'cc::ContentLayerClient',
'cc::DisplayItemList',
'cc::DrawRecordOp',
],
},
{
'paths': [
'third_party/blink/renderer/core/inspector/inspector_contrast.cc',
'third_party/blink/renderer/core/inspector/inspector_contrast.h'
],
'allowed': [
'color_utils::GetContrastRatio',
'cc::RTree',
],
},
{
'paths':
['third_party/blink/renderer/core/inspector/locale_controller.cc'],
'allowed': [
'base::i18n::SetICUDefaultLocale',
],
},
{
'paths': ['third_party/blink/renderer/core/inspector'],
'allowed': [
# Devtools binary protocol uses std::vector<uint8_t> for serialized
# objects.
'std::vector',
# [C]h[R]ome [D]ev[T]ools [P]rotocol implementation support library
# (see third_party/inspector_protocol/crdtp).
'crdtp::.+',
# DevTools manages certificates from the net stack.
'net::X509Certificate',
'net::x509_util::CryptoBufferAsSpan',
],
},
{
'paths': [
'third_party/blink/renderer/core/inspector',
'third_party/blink/renderer/controller/dev_tools_frontend_impl.h',
'third_party/blink/renderer/controller/dev_tools_frontend_impl.cc',
'third_party/blink/renderer/modules/filesystem/dev_tools_host_file_system.cc'
],
'allowed': [
# Commands from the DevTools window are parsed from a JSON string in
# the devtools renderer and sent on as base::Value.
'base::Value',
],
},
{
'paths':
['third_party/blink/renderer/core/inspector/dev_tools_host.cc'],
'allowed': [
# Commands from the DevTools window are parsed from a JSON string in
# the devtools renderer and sent on as base::Value.
'base::JSONReader',
],
},
{
'paths': [
'third_party/blink/renderer/core/inspector/inspector_performance_agent.cc'
],
'allowed': [
'base::subtle::TimeTicksNowIgnoringOverride',
],
},
{
'paths': [
'third_party/blink/renderer/core/inspector/inspector_network_agent.cc'
],
'allowed': [
'base::flat_set',
'base::HexEncode',
'net::ct::.+',
'net::IPAddress',
'net::SourceStream',
'net::SSL.+',
],
},
{
'paths': ['third_party/blink/renderer/core/workers/worker_thread.cc'],
'allowed': [
'base::ScopedAllowBaseSyncPrimitives',
],
},
{
'paths': [
'third_party/blink/renderer/bindings/core/v8/v8_code_cache.cc',
'third_party/blink/renderer/bindings/core/v8/v8_code_cache.h',
'third_party/blink/renderer/core/loader/document_loader.cc',
'third_party/blink/renderer/core/loader/document_loader.h',
'third_party/blink/renderer/core/workers/worklet_global_scope.h',
'third_party/blink/renderer/core/workers/worklet_global_scope.cc',
'third_party/blink/renderer/core/workers/worker_global_scope.cc',
'third_party/blink/renderer/core/workers/worker_global_scope.h',
'third_party/blink/renderer/core/workers/worker_or_worklet_global_scope.h',
'third_party/blink/renderer/core/execution_context/execution_context.h',
'third_party/blink/renderer/core/execution_context/execution_context.cc',
'third_party/blink/renderer/modules/service_worker/service_worker_script_cached_metadata_handler.h',
'third_party/blink/renderer/modules/service_worker/service_worker_script_cached_metadata_handler.cc',
'third_party/blink/renderer/bindings/core/v8/v8_wasm_response_extensions.cc',
],
'allowed': [
# TODO(mythria): Allow use of non-blink mojo interface. Once
# //content/renderer/loader is moved to Blink as a part of onion
# soup we can update all uses to blink::mojom::blink::CodeCacheHost.
'blink::mojom::CodeCacheHost',
],
},
{
'paths': ['third_party/blink/renderer/core/xml'],
'allowed': [
'xpathyy::.+',
],
},
{
'paths': [
'third_party/blink/renderer/modules/device_orientation/',
'third_party/blink/renderer/modules/gamepad/',
'third_party/blink/renderer/modules/sensor/',
'third_party/blink/renderer/modules/xr/',
],
'allowed': [
'base::subtle::Atomic32',
'device::.+',
],
},
{
'paths': [
'third_party/blink/renderer/core/html/media/',
'third_party/blink/renderer/modules/canvas/',
'third_party/blink/renderer/modules/vr/',
'third_party/blink/renderer/modules/webgl/',
'third_party/blink/renderer/modules/webgpu/',
'third_party/blink/renderer/modules/xr/',
],
# The modules listed above need access to the following GL drawing and
# display-related types.
'allowed': [
'base::LRUCache',
'gl::GpuPreference',
'gpu::SHARED_IMAGE_USAGE_.+',
'gpu::gles2::GLES2Interface',
'gpu::raster::RasterInterface',
'gpu::Mailbox',
'gpu::MailboxHolder',
'gpu::SyncToken',
'gpu::webgpu::ReservedTexture',
'display::Display',
'media::IsOpaque',
'media::kNoTransformation',
'media::PaintCanvasVideoRenderer',
'media::PIXEL_FORMAT_Y16',
'media::VideoFrame',
'viz::RasterContextProvider',
'viz::ReleaseCallback',
'viz::ResourceFormat',
'viz::ResourceFormatToClosestSkColorType',
'viz::TransferableResource',
],
},
{
'paths': [
'third_party/blink/renderer/modules/webgl/webgl_rendering_context_base.cc',
],
# This class needs access to a GPU driver bug workaround entry.
'allowed': [
'gpu::ENABLE_WEBGL_TIMER_QUERY_EXTENSIONS',
],
},
{
'paths': [
'third_party/blink/renderer/core/html/media/',
],
# This module needs access to the following for media's base::Feature
# list.
'allowed': [
'media::.+',
]
},
{
'paths': [
'third_party/blink/renderer/core/timing/background_tracing_helper.cc',
'third_party/blink/renderer/core/timing/background_tracing_helper.h',
],
'allowed': [
'base::MD5Digest',
'base::MD5Sum',
'base::StringPiece',
]
},
{
'paths': [
'third_party/blink/renderer/modules/breakout_box/',
],
'allowed': [
'media::.+',
# Some media APIs require std::vector.
"std::vector",
]
},
{
'paths': [
'third_party/blink/renderer/modules/mediasource/',
],
'allowed': [
'base::CommandLine',
'media::.+',
'switches::kLacrosEnablePlatformEncryptedHevc',
]
},
{
'paths': [
'third_party/blink/renderer/modules/encryptedmedia/',
'third_party/blink/renderer/modules/media/',
'third_party/blink/renderer/modules/media_capabilities/',
'third_party/blink/renderer/modules/video_rvfc/',
],
'allowed': [
'media::.+',
'media_capabilities_identifiability_metrics::.+',
]
},
{
'paths': [
'third_party/blink/renderer/modules/media/audio/',
],
'allowed': [
# TODO(https://crbug.com/787252): Remove most of the entries below,
# once the directory is fully Onion soup'ed.
'base::Bind.*',
'base::Unretained',
'base::NoDestructor',
'base::flat_map',
'base::AutoLock',
'base::Lock',
'base::EraseIf',
'base::ScopedPlatformFile',
'mojo::WrapCallbackWithDefaultInvokeIfNotRun',
# TODO(https://crrev.com/787252): Consider allowlisting fidl::*
# usage more broadly in Blink.
'fidl::InterfaceHandle',
]
},
{
'paths': [
'third_party/blink/renderer/modules/imagecapture/',
],
'allowed': [
'cc::SkiaPaintCanvas',
'libyuv::.+',
'media::.+',
]
},
{
'paths': [
'third_party/blink/renderer/modules/media_capabilities/media_capabilities_fuzzer.cc',
],
'allowed': [
'mc_fuzzer::.+',
'google::protobuf::RepeatedField',
]
},
{
'paths': [
'third_party/blink/renderer/modules/mediacapturefromelement/',
],
'allowed': [
'gpu::MailboxHolder',
'media::.+',
'libyuv::.+',
'viz::SkColorTypeToResourceFormat',
]
},
{
'paths': [
'third_party/blink/renderer/modules/mediarecorder/',
],
'allowed': [
'base::data',
# TODO(crbug.com/960665): Remove base::queue once it is replaced with a WTF equivalent.
'base::queue',
'base::SharedMemory',
'base::StringPiece',
'base::ThreadTaskRunnerHandle',
'media::.+',
'libopus::.+',
'libyuv::.+',
'video_track_recorder::.+',
]
},
{
'paths': [
'third_party/blink/renderer/modules/mediastream/',
],
'allowed': [
'media::.+',
'base::AutoLock',
'base::Hash',
'base::Lock',
'base::StringPrintf',
'base::TaskRunner',
# TODO(crbug.com/704136): Switch to using frame-based task runners.
'base::ThreadTaskRunnerHandle',
'base::subtle::Atomic32',
'base::subtle::Acquire_Load',
'base::subtle::NoBarrier_.+',
'base::subtle::Release_Store',
'cc::SkiaPaintCanvas',
'cc::UpdateSubmissionStateCB',
'cc::VideoFrameProvider',
'cc::VideoLayer',
'gpu::gles2::GLES2Interface',
'libyuv::.+',
'media_constraints::.+',
"rtc::RefCountedObject",
'rtc::TaskQueue',
'rtc::scoped_refptr',
'viz::.+',
'webrtc::Aec3ConfigFromJsonString',
'webrtc::AudioProcessingBuilder',
'webrtc::AudioProcessing',
'webrtc::AudioProcessorInterface',
'webrtc::AudioTrackInterface',
'webrtc::Config',
'webrtc::EchoCanceller3Config',
'webrtc::EchoCanceller3Factory',
'webrtc::ExperimentalAgc',
'webrtc::ExperimentalNs',
'webrtc::MediaStreamTrackInterface',
'webrtc::ObserverInterface',
'webrtc::StreamConfig',
'webrtc::TypingDetection',
'webrtc::VideoTrackInterface',
]
},
{
'paths': [
'third_party/blink/renderer/modules/indexeddb/',
],
'allowed': [
'indexed_db::.+',
],
},
{
'paths': [
'third_party/blink/renderer/modules/remote_objects/',
],
'allowed': [
'gin::.+',
# gin::NamedPropertyInterceptor uses std::vector.
'std::vector',
],
},
{
'paths': [
'third_party/blink/renderer/modules/webcodecs/',
],
'allowed': [
'base::PlatformThreadRef',
'base::WrapRefCounted',
'cc::kNumYUVPlanes',
'cc::YUVIndex',
'cc::YUVSubsampling',
'gpu::kNullSurfaceHandle',
'gpu::SHARED_IMAGE_.+',
'gpu::raster::RasterInterface',
'gpu::Mailbox',
'gpu::MailboxHolder',
'gpu::SharedImageInterface',
'gpu::SyncToken',
'viz::RasterContextProvider',
'viz::ReleaseCallback',
'viz::ResourceFormat',
'media::.+',
'libyuv::.+',
]
},
{
'paths': [
'third_party/blink/renderer/modules/webcodecs/video_decoder_fuzzer.cc',
'third_party/blink/renderer/modules/webcodecs/fuzzer_utils.cc',
'third_party/blink/renderer/modules/webcodecs/fuzzer_utils.h',
],
'allowed': [
'wc_fuzzer::.+',
]
},
{
'paths': [
'third_party/blink/renderer/modules/webgpu/',
],
# The WebGPU Blink module needs access to the WebGPU control
# command buffer interface.
'allowed': [
'gpu::webgpu::PowerPreference',
'gpu::webgpu::WebGPUInterface',
],
},
{
'paths': [
'third_party/blink/renderer/modules/webrtc/',
],
'allowed': [
'base::AutoLock',
'base::Erase',
'base::Lock',
'base::StringPrintf',
'media::.+',
'rtc::scoped_refptr',
'webrtc::AudioDeviceModule',
'webrtc::AudioSourceInterface',
'webrtc::AudioTransport',
'webrtc::kAdmMaxDeviceNameSize',
'webrtc::kAdmMaxGuidSize',
]
},
{
'paths': [
'third_party/blink/renderer/platform/',
],
# Suppress almost all checks on platform since code in this directory is
# meant to be a bridge between Blink and non-Blink code. However,
# base::RefCounted should still be explicitly blocked, since
# WTF::RefCounted should be used instead. base::RefCountedThreadSafe is
# still needed for cross_thread_copier.h though.
'allowed': ['base::RefCountedThreadSafe', '(?!base::RefCounted).+'],
'disallowed': [
# TODO(https://crbug.com/1267866): this warning is shown twice for
# renderer/platform/ violations.
_DISALLOW_NON_BLINK_MOJOM,
]
},
{
'paths': [
'third_party/blink/renderer/platform/scheduler/common/single_thread_idle_task_runner.h',
],
# base::RefCounted is prohibited in platform/ as defined above, but
# SingleThreadIdleTaskRunner needs to be constructed before WTF and
# PartitionAlloc are initialized, which forces us to use
# base::RefCountedThreadSafe for it.
'allowed': ['.+'],
},
{
'paths': [
'third_party/blink/renderer/core/exported/',
'third_party/blink/renderer/core/input/',
],
'allowed': [
'ui::LatencyInfo',
],
},
{
'paths': [
'third_party/blink/renderer/modules/animationworklet/',
],
'allowed': [
'cc::AnimationOptions',
'cc::AnimationEffectTimings',
],
},
{
'paths': [
'third_party/blink/renderer/modules/url_pattern/',
],
'allowed': [
# Required to provide a canonicalization functor to liburlpattern.
"absl::InvalidArgumentError",
"absl::StatusOr",
# Required by liburlpattern API in order to pass string data
# efficiently.
"absl::string_view",
# Needed to work with std::string values returned from
# liburlpattern API.
"base::IsStringASCII",
# Needed to use part of the StringUTF8Adaptor API.
"base::StringPiece",
# //third_party/liburlpattern
'liburlpattern::.+',
# The liburlpattern API requires using std::vector.
'std::vector',
# Internal namespace used by url_pattern module.
'url_pattern::.+',
],
},
{
'paths': [
'third_party/blink/renderer/modules/webaudio/',
],
'allowed': ['audio_utilities::.+'],
},
{
'paths': [
'third_party/blink/renderer/modules/webdatabase/',
],
'allowed': ['sql::.+'],
},
{
'paths': [
'third_party/blink/renderer/core/layout/layout_theme.cc',
'third_party/blink/renderer/core/layout/layout_theme_mac.mm',
'third_party/blink/renderer/core/paint/outline_painter.cc',
'third_party/blink/renderer/core/paint/theme_painter.cc',
'third_party/blink/renderer/core/paint/theme_painter_default.cc',
],
'allowed': ['ui::NativeTheme.*', 'ui::color_utils.*'],
},
{
'paths': [
'third_party/blink/renderer/core/scroll/mac_scrollbar_animator_impl.h',
'third_party/blink/renderer/core/scroll/mac_scrollbar_animator_impl.mm',
],
'allowed': [
'ui::ScrollbarAnimationTimerMac',
'ui::OverlayScrollbarAnimatorMac',
],
},
{
'paths': [
'third_party/blink/renderer/modules/crypto/',
],
'allowed': ['crypto::.+'],
},
{
'paths': [
'third_party/blink/renderer/modules/p2p',
],
'allowed': [
# TODO(crbug.com/787254): Remove GURL usage.
'GURL',
'cricket::.*',
'rtc::.+',
]
},
{
'paths': [
# TODO(crbug.com/787254): Separate the two paths below and their own
# allowlist.
'third_party/blink/renderer/modules/peerconnection/',
'third_party/blink/renderer/bindings/modules/v8/serialization/',
],
'allowed': [
'absl::.+',
'base::AutoLock',
'base::AutoUnlock',
# TODO(crbug.com/1266408): Temporarily added to enable splitting UMA stats based on tier.
'base::CPU',
'base::LazyInstance',
'base::Lock',
# TODO(crbug.com/787254): Remove base::BindOnce, base::Unretained,
# base::Passed, base::OnceClosure, base::RepeatingClosure,
# base::CurrentThread and base::RetainedRef.
'base::Bind.*',
'base::MD5.*',
'base::CurrentThread',
'base::.*Closure',
'base::Passed',
'base::PowerObserver',
'base::RetainedRef',
'base::StringPrintf',
'base::Value',
'base::Unretained',
# TODO(crbug.com/787254): Replace base::Thread with the appropriate Blink class.
'base::Thread',
'base::WrapRefCounted',
'cricket::.*',
'jingle_glue::JingleThreadWrapper',
# TODO(crbug.com/787254): Remove GURL usage.
'GURL',
'media::.+',
'net::NetworkTrafficAnnotationTag',
'net::DefineNetworkTrafficAnnotation',
# TODO(crbug.com/1266408): Temporarily added to enable splitting UMA stats based on tier.
're2::RE2',
'rtc::.+',
'webrtc::.+',
'quic::.+',
'quiche::.+',
]
},
{
'paths': [
'third_party/blink/renderer/modules/peerconnection/adapters/',
],
# The code in adapters/ wraps WebRTC APIs using STL/WebRTC types only.
# Thus, the restriction that objects should only be created and
# destroyed on the same thread can be relaxed since no Blink types (like
# AtomicString or HeapVector) are used cross thread. These Blink types
# are converted to the STL/WebRTC counterparts in the parent directory.
'allowed': [
'base::OnTaskRunnerDeleter',
'sigslot::.+',
],
},
{
'paths': [
'third_party/blink/renderer/modules/direct_sockets/',
],
'allowed': [
'net::DefineNetworkTrafficAnnotation',
'net::Error',
'net::MutableNetworkTrafficAnnotationTag',
'net::NetworkTrafficAnnotationTag',
]
},
{
'paths': ['third_party/blink/renderer/modules/manifest/'],
'allowed': [
'net::IsValidTopLevelMimeType',
'net::ParseMimeTypeWithoutParameter',
'net::registry_controlled_domains::.+',
],
},
{
'paths':
['third_party/blink/renderer/core/fetch/fetch_request_data.cc'],
'allowed': ['net::RequestPriority'],
},
{
'paths':
['third_party/blink/renderer/core/fetch/fetch_response_data.cc'],
'allowed': [
'storage::ComputeRandomResponsePadding',
'storage::ComputeStableResponsePadding',
'storage::ShouldPadResponseType'
],
},
{
'paths': ['third_party/blink/renderer/core/frame/local_frame_view.cc'],
'allowed': [
'base::LapTimer',
'cc::frame_viewer_instrumentation::IsTracingLayerTreeSnapshots',
],
},
{
'paths': [
'third_party/blink/renderer/modules/webaudio/offline_audio_worklet_thread.cc',
'third_party/blink/renderer/modules/webaudio/realtime_audio_worklet_thread.cc',
'third_party/blink/renderer/modules/webaudio/semi_realtime_audio_worklet_thread.cc',
],
'allowed': ['base::ThreadPriority'],
},
{
'paths': [
'third_party/blink/renderer/core/frame/local_frame_mojo_handler.cc',
'third_party/blink/renderer/core/frame/local_frame_mojo_handler.h'
],
'allowed': ['base::Value'],
},
{
'paths': ['third_party/blink/renderer/core/frame/local_dom_window.cc'],
'allowed': [
'net::registry_controlled_domains::.+',
],
},
{
'paths': [
'third_party/blink/renderer/platform/audio/',
'third_party/blink/renderer/modules/webaudio/',
],
'allowed': ['fdlibm::.+'],
},
{
'paths': [
'third_party/blink/renderer/core/html/canvas/html_canvas_element.cc',
'third_party/blink/renderer/core/html/canvas/html_canvas_element.h',
],
'allowed': ['viz::ResourceId'],
},
{
'paths': [
'third_party/blink/renderer/platform/graphics/document_transition_shared_element_id.h'
],
'allowed': ['cc::DocumentTransitionSharedElementId'],
},
{
'paths': [
'third_party/blink/renderer/core/',
],
'allowed': ['ui::k200Percent'],
},
{
'paths': [
'third_party/blink/renderer/core/loader/frame_client_hints_preferences_context.cc',
],
'allowed': [
'base::NoDestructor',
]
},
{
'paths': [
'third_party/blink/renderer/modules/webdatabase/dom_window_web_database.cc',
'third_party/blink/renderer/controller/blink_initializer.cc',
],
'allowed': [
'base::CommandLine',
]
},
{
'paths': [
'third_party/blink/renderer/controller/blink_shutdown.cc',
],
'allowed': [
'base::CommandLine',
'switches::kDumpRuntimeCallStats',
]
},
{
'paths':
['third_party/blink/renderer/bindings/core/v8/local_window_proxy.cc'],
'allowed': [
'base::SingleSampleMetric',
'base::SingleSampleMetricsFactory',
],
},
]
def _precompile_config():
"""Turns the raw config into a config of compiled regex."""
match_nothing_re = re.compile('.^')
def compile_regexp(match_list, is_list=True):
"""Turns a match list into a compiled regexp.
If match_list is None, a regexp that matches nothing is returned.
"""
if (match_list and is_list):
match_list = '(?:%s)$' % '|'.join(match_list)
if match_list:
return re.compile(match_list)
return match_nothing_re
def compile_disallowed(disallowed_list):
"""Transforms the disallowed list to one with the regexps compiled."""
if not disallowed_list:
return match_nothing_re, []
match_list = []
advice_list = []
for entry in disallowed_list:
if isinstance(entry, tuple):
warning = ''
if len(entry) == 2:
match, advice = entry
else:
match, advice, warning = entry
match_list.append(match)
advice_list.append((compile_regexp(match, False), advice,
warning == 'Warning'))
else:
# Just a string
match_list.append(entry)
return compile_regexp(match_list), advice_list
compiled_config = []
for raw_entry in _CONFIG:
disallowed, advice = compile_disallowed(raw_entry.get('disallowed'))
compiled_config.append({
'paths':
raw_entry['paths'],
'allowed':
compile_regexp(raw_entry.get('allowed')),
'disallowed':
disallowed,
'advice':
advice,
})
return compiled_config
_COMPILED_CONFIG = _precompile_config()
# Attempt to match identifiers qualified with a namespace. Since parsing C++ in
# Python is hard, this regex assumes that namespace names only contain lowercase
# letters, numbers, and underscores, matching the Google C++ style guide. This
# is intended to minimize the number of matches where :: is used to qualify a
# name with a class or enum name.
#
# As a bit of a minor hack, this regex also hardcodes a check for GURL, since
# GURL isn't namespace qualified and wouldn't match otherwise.
_IDENTIFIER_WITH_NAMESPACE_RE = re.compile(
r'\b(?:(?:[a-z_][a-z0-9_]*::)+[A-Za-z_][A-Za-z0-9_]*|GURL)\b')
def _find_matching_entries(path):
"""Finds entries that should be used for path.
Returns:
A list of entries, sorted in order of relevance. Each entry is a
dictionary with keys:
allowed: A regexp for identifiers that should be allowed.
disallowed: A regexp for identifiers that should not be allowed.
advice: (optional) A regexp for identifiers along with advice
"""
entries = []
for entry in _COMPILED_CONFIG:
for entry_path in entry['paths']:
if path.startswith(entry_path):
entries.append({'sortkey': len(entry_path), 'entry': entry})
# The path length is used as the sort key: a longer path implies more
# relevant, since that config is a more exact match.
entries.sort(key=lambda x: x['sortkey'], reverse=True)
return [entry['entry'] for entry in entries]
def _check_entries_for_identifier(entries, identifier):
"""Check if an identifier is allowed"""
for entry in entries:
if entry['disallowed'].match(identifier):
return False
if entry['allowed'].match(identifier):
return True
# Disallow by default.
return False
def _find_advice_for_identifier(entries, identifier):
advice_list = []
for entry in entries:
for matcher, advice, warning in entry.get('advice', []):
if matcher.match(identifier):
advice_list.append(advice)
return advice_list, warning
class BadIdentifier(object):
"""Represents a single instance of a bad identifier."""
def __init__(self, identifier, line, advice=None, warning=False):
self.identifier = identifier
self.line = line
self.advice = advice
self.warning = warning
def check(path, contents):
"""Checks for disallowed usage of non-Blink classes, functions, et cetera.
Args:
path: The path of the file to check.
contents: An array of line number, line tuples to check.
Returns:
A list of (line number, disallowed identifier, advice) tuples.
"""
results = []
# Because Windows.
path = path.replace('\\', '/')
basename, ext = os.path.splitext(path)
# Only check code. Ignore tests and fuzzers.
if (ext not in ('.cc', '.cpp', '.h', '.mm') or path.find('/testing/') >= 0
or path.find('/core/web_test/') >= 0 or path.find('/tests/') >= 0
or basename.endswith('_test') or basename.endswith('_test_helpers')
or basename.endswith('_unittest') or basename.endswith('_fuzzer')
or basename.endswith('_perftest')):
return results
entries = _find_matching_entries(path)
if not entries:
return
for line_number, line in contents:
idx = line.find('//')
if idx >= 0:
line = line[:idx]
identifiers = _IDENTIFIER_WITH_NAMESPACE_RE.findall(line)
for identifier in identifiers:
if not _check_entries_for_identifier(entries, identifier):
advice, warning = _find_advice_for_identifier(
entries, identifier)
results.append(
BadIdentifier(identifier, line_number, advice, warning))
return results
def main():
for path in sys.stdin.read().splitlines():
try:
with open(path, 'r') as f:
contents = f.read()
disallowed_identifiers = check(
path,
[(i + 1, l) for i, l in enumerate(contents.splitlines())])
if disallowed_identifiers:
print('%s uses disallowed identifiers:' % path)
for i in disallowed_identifiers:
print(i.line, i.identifier, i.advice)
except IOError as e:
print('could not open %s: %s' % (path, e))
if __name__ == '__main__':
sys.exit(main())
|
ric2b/Vivaldi-browser
|
chromium/third_party/blink/tools/blinkpy/presubmit/audit_non_blink_usage.py
|
Python
|
bsd-3-clause
| 59,398
|
[
"VisIt"
] |
2a1cf7c0873607262e346d020889eda5ee01ed59dacc0a4931e98b5302b493c8
|
# Copyright 2003 Iddo Friedberg. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""A parser for the NCBI blastpgp version 2.2.5 output format. Currently only supports
the '-m 9' option, (table w/ annotations).
Returns a BlastTableRec instance
"""
import sys
class BlastTableEntry(object):
def __init__(self, in_rec):
bt_fields = in_rec.split()
self.qid = bt_fields[0].split('|')
self.sid = bt_fields[1].split('|')
self.pid = float(bt_fields[2])
self.ali_len = int(bt_fields[3])
self.mis = int(bt_fields[4])
self.gaps = int(bt_fields[5])
self.q_bounds = (int(bt_fields[6]), int(bt_fields[7]))
self.s_bounds = (int(bt_fields[8]), int(bt_fields[9]))
self.e_value = float(bt_fields[10])
self.bit_score = float(bt_fields[11])
class BlastTableRec(object):
def __init__(self):
self.program = None
self.version = None
self.date = None
self.iteration = None
self.query = None
self.database = None
self.entries = []
def add_entry(self, entry):
self.entries.append(entry)
reader_keywords = {'BLASTP': 'version',
'Iteration': 'iteration',
'Query': 'query',
'Database': 'database',
'Fields': 'fields'}
class BlastTableReader(object):
def __init__(self, handle):
self.handle = handle
inline = self.handle.readline()
# zip forward to start of record
while inline and 'BLASTP' not in inline:
inline = self.handle.readline()
self._lookahead = inline
self._n = 0
self._in_header = 1
def __next__(self):
self.table_record = BlastTableRec()
self._n += 1
inline = self._lookahead
if not inline:
return None
while inline:
if inline[0] == '#':
if self._in_header:
self._in_header = self._consume_header(inline)
else:
break
else:
self._consume_entry(inline)
self._in_header = 0
inline = self.handle.readline()
self._lookahead = inline
self._in_header = 1
return self.table_record
if sys.version_info[0] < 3:
def next(self):
"""Python 2 style alias for Python 3 style __next__ method."""
return self.__next__()
def _consume_entry(self, inline):
current_entry = BlastTableEntry(inline)
self.table_record.add_entry(current_entry)
def _consume_header(self, inline):
for keyword in reader_keywords:
if keyword in inline:
in_header = self._Parse('_parse_%s' % reader_keywords[keyword], inline)
break
return in_header
def _parse_version(self, inline):
program, version, date = inline.split()[1:]
self.table_record.program = program
self.table_record.version = version
self.table_record.date = date
return 1
def _parse_iteration(self, inline):
self.table_record.iteration = int(inline.split()[2])
return 1
def _parse_query(self, inline):
self.table_record.query = inline.split()[2:]
return 1
def _parse_database(self, inline):
self.table_record.database = inline.split()[2]
return 1
def _parse_fields(self, inline):
return 0
def _Parse(self, method_name, inline):
return getattr(self, method_name)(inline)
|
updownlife/multipleK
|
dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/Blast/ParseBlastTable.py
|
Python
|
gpl-2.0
| 3,717
|
[
"Biopython"
] |
bb703ff99c68487b833c196e7a4d6edd656b297507d55ddd433bd149679241e4
|
"""
Tests for discussion pages
"""
import datetime
from pytz import UTC
from uuid import uuid4
from nose.plugins.attrib import attr
from .helpers import BaseDiscussionTestCase
from ..helpers import UniqueCourseTest
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.discussion import (
DiscussionTabSingleThreadPage,
InlineDiscussionPage,
InlineDiscussionThreadPage,
DiscussionUserProfilePage,
DiscussionTabHomePage,
DiscussionSortPreferencePage,
)
from ...pages.lms.learner_profile import LearnerProfilePage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ...fixtures.discussion import (
SingleThreadViewFixture,
UserProfileViewFixture,
SearchResultFixture,
Thread,
Response,
Comment,
SearchResult,
MultipleThreadFixture)
from .helpers import BaseDiscussionMixin
THREAD_CONTENT_WITH_LATEX = """Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
\n\n----------\n\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur. (b).\n\n
**(a)** $H_1(e^{j\\omega}) = \\sum_{n=-\\infty}^{\\infty}h_1[n]e^{-j\\omega n} =
\\sum_{n=-\\infty} ^{\\infty}h[n]e^{-j\\omega n}+\\delta_2e^{-j\\omega n_0}$
$= H(e^{j\\omega})+\\delta_2e^{-j\\omega n_0}=A_e (e^{j\\omega}) e^{-j\\omega n_0}
+\\delta_2e^{-j\\omega n_0}=e^{-j\\omega n_0} (A_e(e^{j\\omega})+\\delta_2)
$H_3(e^{j\\omega})=A_e(e^{j\\omega})+\\delta_2$. Dummy $A_e(e^{j\\omega})$ dummy post $.
$A_e(e^{j\\omega}) \\ge -\\delta_2$, it follows that $H_3(e^{j\\omega})$ is real and
$H_3(e^{j\\omega})\\ge 0$.\n\n**(b)** Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.\n\n
**Case 1:** If $re^{j\\theta}$ is a Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
\n\n**Case 3:** Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem $H_3(e^{j\\omega}) = P(cos\\omega)(cos\\omega - cos\\theta)^k$,
Lorem Lorem Lorem Lorem Lorem Lorem $P(cos\\omega)$ has no
$(cos\\omega - cos\\theta)$ factor.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
$P(cos\\theta) \\neq 0$. Since $P(cos\\omega)$ this is a dummy data post $\\omega$,
dummy $\\delta > 0$ such that for all $\\omega$ dummy $|\\omega - \\theta|
< \\delta$, $P(cos\\omega)$ Lorem ipsum dolor sit amet, consectetur adipiscing elit,
sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim
veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo
consequat. Duis aute irure dolor in reprehenderit in voluptate velit sse cillum dolore
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
"""
class DiscussionResponsePaginationTestMixin(BaseDiscussionMixin):
"""
A mixin containing tests for response pagination for use by both inline
discussion and the discussion tab
"""
def assert_response_display_correct(self, response_total, displayed_responses):
"""
Assert that various aspects of the display of responses are all correct:
* Text indicating total number of responses
* Presence of "Add a response" button
* Number of responses actually displayed
* Presence and text of indicator of how many responses are shown
* Presence and text of button to load more responses
"""
self.assertEqual(
self.thread_page.get_response_total_text(),
str(response_total) + " responses"
)
self.assertEqual(self.thread_page.has_add_response_button(), response_total != 0)
self.assertEqual(self.thread_page.get_num_displayed_responses(), displayed_responses)
self.assertEqual(
self.thread_page.get_shown_responses_text(),
(
None if response_total == 0 else
"Showing all responses" if response_total == displayed_responses else
"Showing first {} responses".format(displayed_responses)
)
)
self.assertEqual(
self.thread_page.get_load_responses_button_text(),
(
None if response_total == displayed_responses else
"Load all responses" if response_total - displayed_responses < 100 else
"Load next 100 responses"
)
)
def test_pagination_no_responses(self):
self.setup_thread(0)
self.assert_response_display_correct(0, 0)
def test_pagination_few_responses(self):
self.setup_thread(5)
self.assert_response_display_correct(5, 5)
def test_pagination_two_response_pages(self):
self.setup_thread(50)
self.assert_response_display_correct(50, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(50, 50)
def test_pagination_exactly_two_response_pages(self):
self.setup_thread(125)
self.assert_response_display_correct(125, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(125, 125)
def test_pagination_three_response_pages(self):
self.setup_thread(150)
self.assert_response_display_correct(150, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(150, 125)
self.thread_page.load_more_responses()
self.assert_response_display_correct(150, 150)
def test_add_response_button(self):
self.setup_thread(5)
self.assertTrue(self.thread_page.has_add_response_button())
self.thread_page.click_add_response_button()
def test_add_response_button_closed_thread(self):
self.setup_thread(5, closed=True)
self.assertFalse(self.thread_page.has_add_response_button())
@attr('shard_2')
class DiscussionHomePageTest(UniqueCourseTest):
"""
Tests for the discussion home page.
"""
SEARCHED_USERNAME = "gizmo"
def setUp(self):
super(DiscussionHomePageTest, self).setUp()
CourseFixture(**self.course_info).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.page = DiscussionTabHomePage(self.browser, self.course_id)
self.page.visit()
def test_new_post_button(self):
"""
Scenario: I can create new posts from the Discussion home page.
Given that I am on the Discussion home page
When I click on the 'New Post' button
Then I should be shown the new post form
"""
self.assertIsNotNone(self.page.new_post_button)
self.page.click_new_post_button()
self.assertIsNotNone(self.page.new_post_form)
@attr('shard_2')
class DiscussionTabSingleThreadTest(BaseDiscussionTestCase, DiscussionResponsePaginationTestMixin):
"""
Tests for the discussion page displaying a single thread
"""
def setUp(self):
super(DiscussionTabSingleThreadTest, self).setUp()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def setup_thread_page(self, thread_id):
self.thread_page = self.create_single_thread_page(thread_id) # pylint: disable=attribute-defined-outside-init
self.thread_page.visit()
def test_mathjax_rendering(self):
thread_id = "test_thread_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(
id=thread_id,
body=THREAD_CONTENT_WITH_LATEX,
commentable_id=self.discussion_id,
thread_type="discussion"
)
)
thread_fixture.push()
self.setup_thread_page(thread_id)
self.assertTrue(self.thread_page.is_discussion_body_visible())
self.thread_page.verify_mathjax_preview_available()
self.thread_page.verify_mathjax_rendered()
def test_markdown_reference_link(self):
"""
Check markdown editor renders reference link correctly
and colon(:) in reference link is not converted to %3a
"""
sample_link = "http://example.com/colon:test"
thread_content = """[enter link description here][1]\n[1]: http://example.com/colon:test"""
thread_id = "test_thread_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(
id=thread_id,
body=thread_content,
commentable_id=self.discussion_id,
thread_type="discussion"
)
)
thread_fixture.push()
self.setup_thread_page(thread_id)
self.assertEqual(self.thread_page.get_link_href(), sample_link)
def test_marked_answer_comments(self):
thread_id = "test_thread_{}".format(uuid4().hex)
response_id = "test_response_{}".format(uuid4().hex)
comment_id = "test_comment_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(id=thread_id, commentable_id=self.discussion_id, thread_type="question")
)
thread_fixture.addResponse(
Response(id=response_id, endorsed=True),
[Comment(id=comment_id)]
)
thread_fixture.push()
self.setup_thread_page(thread_id)
self.assertFalse(self.thread_page.is_comment_visible(comment_id))
self.assertFalse(self.thread_page.is_add_comment_visible(response_id))
self.assertTrue(self.thread_page.is_show_comments_visible(response_id))
self.thread_page.show_comments(response_id)
self.assertTrue(self.thread_page.is_comment_visible(comment_id))
self.assertTrue(self.thread_page.is_add_comment_visible(response_id))
self.assertFalse(self.thread_page.is_show_comments_visible(response_id))
@attr('shard_2')
class DiscussionTabMultipleThreadTest(BaseDiscussionTestCase):
"""
Tests for the discussion page with multiple threads
"""
def setUp(self):
super(DiscussionTabMultipleThreadTest, self).setUp()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.thread_count = 2
self.thread_ids = []
self.setup_multiple_threads(thread_count=self.thread_count)
self.thread_page_1 = DiscussionTabSingleThreadPage(
self.browser,
self.course_id,
self.discussion_id,
self.thread_ids[0]
)
self.thread_page_2 = DiscussionTabSingleThreadPage(
self.browser,
self.course_id,
self.discussion_id,
self.thread_ids[1]
)
self.thread_page_1.visit()
def setup_multiple_threads(self, thread_count):
threads = []
for i in range(thread_count):
thread_id = "test_thread_{}_{}".format(i, uuid4().hex)
thread_body = "Dummy Long text body." * 50
threads.append(
Thread(id=thread_id, commentable_id=self.discussion_id, body=thread_body),
)
self.thread_ids.append(thread_id)
view = MultipleThreadFixture(threads)
view.push()
def test_page_scroll_on_thread_change_view(self):
"""
Check switching between threads changes the page focus
"""
# verify threads are rendered on the page
self.assertTrue(
self.thread_page_1.check_threads_rendered_successfully(thread_count=self.thread_count)
)
# From the thread_page_1 open & verify next thread
self.thread_page_1.click_and_open_thread(thread_id=self.thread_ids[1])
self.assertTrue(self.thread_page_2.is_browser_on_page())
# Verify that the focus is changed
self.thread_page_2.check_focus_is_set(selector=".discussion-article")
@attr('shard_2')
class DiscussionOpenClosedThreadTest(BaseDiscussionTestCase):
"""
Tests for checking the display of attributes on open and closed threads
"""
def setUp(self):
super(DiscussionOpenClosedThreadTest, self).setUp()
self.thread_id = "test_thread_{}".format(uuid4().hex)
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self, **thread_kwargs):
thread_kwargs.update({'commentable_id': self.discussion_id})
view = SingleThreadViewFixture(
Thread(id=self.thread_id, **thread_kwargs)
)
view.addResponse(Response(id="response1"))
view.push()
def setup_openclosed_thread_page(self, closed=False):
self.setup_user(roles=['Moderator'])
if closed:
self.setup_view(closed=True)
else:
self.setup_view()
page = self.create_single_thread_page(self.thread_id)
page.visit()
page.close_open_thread()
return page
def test_originally_open_thread_vote_display(self):
page = self.setup_openclosed_thread_page()
self.assertFalse(page._is_element_visible('.forum-thread-main-wrapper .action-vote'))
self.assertTrue(page._is_element_visible('.forum-thread-main-wrapper .display-vote'))
self.assertFalse(page._is_element_visible('.response_response1 .action-vote'))
self.assertTrue(page._is_element_visible('.response_response1 .display-vote'))
def test_originally_closed_thread_vote_display(self):
page = self.setup_openclosed_thread_page(True)
self.assertTrue(page._is_element_visible('.forum-thread-main-wrapper .action-vote'))
self.assertFalse(page._is_element_visible('.forum-thread-main-wrapper .display-vote'))
self.assertTrue(page._is_element_visible('.response_response1 .action-vote'))
self.assertFalse(page._is_element_visible('.response_response1 .display-vote'))
@attr('shard_2')
class DiscussionCommentDeletionTest(BaseDiscussionTestCase):
"""
Tests for deleting comments displayed beneath responses in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="comment_deletion_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response1"), [
Comment(id="comment_other_author"),
Comment(id="comment_self_author", user_id=self.user_id, thread_id="comment_deletion_test_thread")
]
)
view.push()
def test_comment_deletion_as_student(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_deletion_test_thread")
page.visit()
self.assertTrue(page.is_comment_deletable("comment_self_author"))
self.assertTrue(page.is_comment_visible("comment_other_author"))
self.assertFalse(page.is_comment_deletable("comment_other_author"))
page.delete_comment("comment_self_author")
def test_comment_deletion_as_moderator(self):
self.setup_user(roles=['Moderator'])
self.setup_view()
page = self.create_single_thread_page("comment_deletion_test_thread")
page.visit()
self.assertTrue(page.is_comment_deletable("comment_self_author"))
self.assertTrue(page.is_comment_deletable("comment_other_author"))
page.delete_comment("comment_self_author")
page.delete_comment("comment_other_author")
@attr('shard_2')
class DiscussionResponseEditTest(BaseDiscussionTestCase):
"""
Tests for editing responses displayed beneath thread in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="response_edit_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response_other_author", user_id="other", thread_id="response_edit_test_thread"),
)
view.addResponse(
Response(id="response_self_author", user_id=self.user_id, thread_id="response_edit_test_thread"),
)
view.push()
def edit_response(self, page, response_id):
self.assertTrue(page.is_response_editable(response_id))
page.start_response_edit(response_id)
new_response = "edited body"
page.set_response_editor_value(response_id, new_response)
page.submit_response_edit(response_id, new_response)
def test_edit_response_as_student(self):
"""
Scenario: Students should be able to edit the response they created not responses of other users
Given that I am on discussion page with student logged in
When I try to edit the response created by student
Then the response should be edited and rendered successfully
And responses from other users should be shown over there
And the student should be able to edit the response of other people
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.assertTrue(page.is_response_visible("response_other_author"))
self.assertFalse(page.is_response_editable("response_other_author"))
self.edit_response(page, "response_self_author")
def test_edit_response_as_moderator(self):
"""
Scenario: Moderator should be able to edit the response they created and responses of other users
Given that I am on discussion page with moderator logged in
When I try to edit the response created by moderator
Then the response should be edited and rendered successfully
And I try to edit the response created by other users
Then the response should be edited and rendered successfully
"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.edit_response(page, "response_self_author")
self.edit_response(page, "response_other_author")
def test_vote_report_endorse_after_edit(self):
"""
Scenario: Moderator should be able to vote, report or endorse after editing the response.
Given that I am on discussion page with moderator logged in
When I try to edit the response created by moderator
Then the response should be edited and rendered successfully
And I try to edit the response created by other users
Then the response should be edited and rendered successfully
And I try to vote the response created by moderator
Then the response should be voted successfully
And I try to vote the response created by other users
Then the response should be voted successfully
And I try to report the response created by moderator
Then the response should be reported successfully
And I try to report the response created by other users
Then the response should be reported successfully
And I try to endorse the response created by moderator
Then the response should be endorsed successfully
And I try to endorse the response created by other users
Then the response should be endorsed successfully
"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.edit_response(page, "response_self_author")
self.edit_response(page, "response_other_author")
page.vote_response('response_self_author')
page.vote_response('response_other_author')
page.report_response('response_self_author')
page.report_response('response_other_author')
page.endorse_response('response_self_author')
page.endorse_response('response_other_author')
@attr('shard_2')
class DiscussionCommentEditTest(BaseDiscussionTestCase):
"""
Tests for editing comments displayed beneath responses in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="comment_edit_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response1"),
[Comment(id="comment_other_author", user_id="other"), Comment(id="comment_self_author", user_id=self.user_id)])
view.push()
def edit_comment(self, page, comment_id):
page.start_comment_edit(comment_id)
new_comment = "edited body"
page.set_comment_editor_value(comment_id, new_comment)
page.submit_comment_edit(comment_id, new_comment)
def test_edit_comment_as_student(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_visible("comment_other_author"))
self.assertFalse(page.is_comment_editable("comment_other_author"))
self.edit_comment(page, "comment_self_author")
def test_edit_comment_as_moderator(self):
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_editable("comment_other_author"))
self.edit_comment(page, "comment_self_author")
self.edit_comment(page, "comment_other_author")
def test_cancel_comment_edit(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
page.set_comment_editor_value("comment_self_author", "edited body")
page.cancel_comment_edit("comment_self_author", original_body)
def test_editor_visibility(self):
"""Only one editor should be visible at a time within a single response"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_editable("comment_other_author"))
self.assertTrue(page.is_add_comment_visible("response1"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
self.assertFalse(page.is_add_comment_visible("response1"))
self.assertTrue(page.is_comment_editor_visible("comment_self_author"))
page.set_comment_editor_value("comment_self_author", "edited body")
page.start_comment_edit("comment_other_author")
self.assertFalse(page.is_comment_editor_visible("comment_self_author"))
self.assertTrue(page.is_comment_editor_visible("comment_other_author"))
self.assertEqual(page.get_comment_body("comment_self_author"), original_body)
page.start_response_edit("response1")
self.assertFalse(page.is_comment_editor_visible("comment_other_author"))
self.assertTrue(page.is_response_editor_visible("response1"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
self.assertFalse(page.is_response_editor_visible("response1"))
self.assertTrue(page.is_comment_editor_visible("comment_self_author"))
page.cancel_comment_edit("comment_self_author", original_body)
self.assertFalse(page.is_comment_editor_visible("comment_self_author"))
self.assertTrue(page.is_add_comment_visible("response1"))
@attr('shard_2')
class InlineDiscussionTest(UniqueCourseTest, DiscussionResponsePaginationTestMixin):
"""
Tests for inline discussions
"""
def setUp(self):
super(InlineDiscussionTest, self).setUp()
self.thread_ids = []
self.discussion_id = "test_discussion_{}".format(uuid4().hex)
self.additional_discussion_id = "test_discussion_{}".format(uuid4().hex)
self.course_fix = CourseFixture(**self.course_info).add_children(
XBlockFixtureDesc("chapter", "Test Section").add_children(
XBlockFixtureDesc("sequential", "Test Subsection").add_children(
XBlockFixtureDesc("vertical", "Test Unit").add_children(
XBlockFixtureDesc(
"discussion",
"Test Discussion",
metadata={"discussion_id": self.discussion_id}
),
XBlockFixtureDesc(
"discussion",
"Test Discussion 1",
metadata={"discussion_id": self.additional_discussion_id}
)
)
)
)
).install()
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id).visit().get_user_id()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.courseware_page.visit()
self.discussion_page = InlineDiscussionPage(self.browser, self.discussion_id)
self.additional_discussion_page = InlineDiscussionPage(self.browser, self.additional_discussion_id)
def setup_thread_page(self, thread_id):
self.discussion_page.expand_discussion()
self.assertEqual(self.discussion_page.get_num_displayed_threads(), 1)
self.thread_page = InlineDiscussionThreadPage(self.browser, thread_id) # pylint: disable=attribute-defined-outside-init
self.thread_page.expand()
def setup_multiple_inline_threads(self, thread_count):
"""
Set up multiple treads on the page by passing 'thread_count'
"""
threads = []
for i in range(thread_count):
thread_id = "test_thread_{}_{}".format(i, uuid4().hex)
threads.append(
Thread(id=thread_id, commentable_id=self.discussion_id),
)
self.thread_ids.append(thread_id)
thread_fixture = MultipleThreadFixture(threads)
thread_fixture.add_response(
Response(id="response1"),
[Comment(id="comment1", user_id="other"), Comment(id="comment2", user_id=self.user_id)],
threads[0]
)
thread_fixture.push()
def test_page_while_expanding_inline_discussion(self):
"""
Tests for the Inline Discussion page with multiple treads. Page should not focus 'thread-wrapper'
after loading responses.
"""
self.setup_multiple_inline_threads(thread_count=3)
self.discussion_page.expand_discussion()
thread_page = InlineDiscussionThreadPage(self.browser, self.thread_ids[0])
thread_page.expand()
# Check if 'thread-wrapper' is focused after expanding thread
self.assertFalse(thread_page.check_if_selector_is_focused(selector='.thread-wrapper'))
def test_initial_render(self):
self.assertFalse(self.discussion_page.is_discussion_expanded())
def test_expand_discussion_empty(self):
self.discussion_page.expand_discussion()
self.assertEqual(self.discussion_page.get_num_displayed_threads(), 0)
def check_anonymous_to_peers(self, is_staff):
thread = Thread(id=uuid4().hex, anonymous_to_peers=True, commentable_id=self.discussion_id)
thread_fixture = SingleThreadViewFixture(thread)
thread_fixture.push()
self.setup_thread_page(thread.get("id"))
self.assertEqual(self.thread_page.is_thread_anonymous(), not is_staff)
def test_anonymous_to_peers_threads_as_staff(self):
AutoAuthPage(self.browser, course_id=self.course_id, roles="Administrator").visit()
self.courseware_page.visit()
self.check_anonymous_to_peers(True)
def test_anonymous_to_peers_threads_as_peer(self):
self.check_anonymous_to_peers(False)
def test_discussion_blackout_period(self):
now = datetime.datetime.now(UTC)
self.course_fix.add_advanced_settings(
{
u"discussion_blackouts": {
"value": [
[
(now - datetime.timedelta(days=14)).isoformat(),
(now + datetime.timedelta(days=2)).isoformat()
]
]
}
}
)
self.course_fix._add_advanced_settings()
self.browser.refresh()
thread = Thread(id=uuid4().hex, commentable_id=self.discussion_id)
thread_fixture = SingleThreadViewFixture(thread)
thread_fixture.addResponse(
Response(id="response1"),
[Comment(id="comment1", user_id="other"), Comment(id="comment2", user_id=self.user_id)])
thread_fixture.push()
self.setup_thread_page(thread.get("id"))
self.assertFalse(self.discussion_page.element_exists(".new-post-btn"))
self.assertFalse(self.thread_page.has_add_response_button())
self.assertFalse(self.thread_page.is_response_editable("response1"))
self.assertFalse(self.thread_page.is_add_comment_visible("response1"))
self.assertFalse(self.thread_page.is_comment_editable("comment1"))
self.assertFalse(self.thread_page.is_comment_editable("comment2"))
self.assertFalse(self.thread_page.is_comment_deletable("comment1"))
self.assertFalse(self.thread_page.is_comment_deletable("comment2"))
def test_dual_discussion_module(self):
"""
Scenario: Two discussion module in one unit shouldn't override their actions
Given that I'm on courseware page where there are two inline discussion
When I click on one discussion module new post button
Then it should add new post form of that module in DOM
And I should be shown new post form of that module
And I shouldn't be shown second discussion module new post form
And I click on second discussion module new post button
Then it should add new post form of second module in DOM
And I should be shown second discussion new post form
And I shouldn't be shown first discussion module new post form
And I have two new post form in the DOM
When I click back on first module new post button
And I should be shown new post form of that module
And I shouldn't be shown second discussion module new post form
"""
self.discussion_page.wait_for_page()
self.additional_discussion_page.wait_for_page()
self.discussion_page.click_new_post_button()
with self.discussion_page.handle_alert():
self.discussion_page.click_cancel_new_post()
self.additional_discussion_page.click_new_post_button()
self.assertFalse(self.discussion_page._is_element_visible(".new-post-article"))
with self.additional_discussion_page.handle_alert():
self.additional_discussion_page.click_cancel_new_post()
self.discussion_page.click_new_post_button()
self.assertFalse(self.additional_discussion_page._is_element_visible(".new-post-article"))
@attr('shard_2')
class DiscussionUserProfileTest(UniqueCourseTest):
"""
Tests for user profile page in discussion tab.
"""
PAGE_SIZE = 20 # django_comment_client.forum.views.THREADS_PER_PAGE
PROFILED_USERNAME = "profiled-user"
def setUp(self):
super(DiscussionUserProfileTest, self).setUp()
CourseFixture(**self.course_info).install()
# The following line creates a user enrolled in our course, whose
# threads will be viewed, but not the one who will view the page.
# It isn't necessary to log them in, but using the AutoAuthPage
# saves a lot of code.
self.profiled_user_id = AutoAuthPage(
self.browser,
username=self.PROFILED_USERNAME,
course_id=self.course_id
).visit().get_user_id()
# now create a second user who will view the profile.
self.user_id = AutoAuthPage(
self.browser,
course_id=self.course_id
).visit().get_user_id()
def check_pages(self, num_threads):
# set up the stub server to return the desired amount of thread results
threads = [Thread(id=uuid4().hex) for _ in range(num_threads)]
UserProfileViewFixture(threads).push()
# navigate to default view (page 1)
page = DiscussionUserProfilePage(
self.browser,
self.course_id,
self.profiled_user_id,
self.PROFILED_USERNAME
)
page.visit()
current_page = 1
total_pages = max(num_threads - 1, 1) / self.PAGE_SIZE + 1
all_pages = range(1, total_pages + 1)
return page
def _check_page():
# ensure the page being displayed as "current" is the expected one
self.assertEqual(page.get_current_page(), current_page)
# ensure the expected threads are being shown in the right order
threads_expected = threads[(current_page - 1) * self.PAGE_SIZE:current_page * self.PAGE_SIZE]
self.assertEqual(page.get_shown_thread_ids(), [t["id"] for t in threads_expected])
# ensure the clickable page numbers are the expected ones
self.assertEqual(page.get_clickable_pages(), [
p for p in all_pages
if p != current_page
and p - 2 <= current_page <= p + 2
or (current_page > 2 and p == 1)
or (current_page < total_pages and p == total_pages)
])
# ensure the previous button is shown, but only if it should be.
# when it is shown, make sure it works.
if current_page > 1:
self.assertTrue(page.is_prev_button_shown(current_page - 1))
page.click_prev_page()
self.assertEqual(page.get_current_page(), current_page - 1)
page.click_next_page()
self.assertEqual(page.get_current_page(), current_page)
else:
self.assertFalse(page.is_prev_button_shown())
# ensure the next button is shown, but only if it should be.
if current_page < total_pages:
self.assertTrue(page.is_next_button_shown(current_page + 1))
else:
self.assertFalse(page.is_next_button_shown())
# click all the way up through each page
for i in range(current_page, total_pages):
_check_page()
if current_page < total_pages:
page.click_on_page(current_page + 1)
current_page += 1
# click all the way back down
for i in range(current_page, 0, -1):
_check_page()
if current_page > 1:
page.click_on_page(current_page - 1)
current_page -= 1
def test_0_threads(self):
self.check_pages(0)
def test_1_thread(self):
self.check_pages(1)
def test_20_threads(self):
self.check_pages(20)
def test_21_threads(self):
self.check_pages(21)
def test_151_threads(self):
self.check_pages(151)
def test_pagination_window_reposition(self):
page = self.check_pages(50)
page.click_next_page()
page.wait_for_ajax()
self.assertTrue(page.is_window_on_top())
def test_redirects_to_learner_profile(self):
"""
Scenario: Verify that learner-profile link is present on forum discussions page and we can navigate to it.
Given that I am on discussion forum user's profile page.
And I can see a username on left sidebar
When I click on my username.
Then I will be navigated to Learner Profile page.
And I can my username on Learner Profile page
"""
learner_profile_page = LearnerProfilePage(self.browser, self.PROFILED_USERNAME)
page = self.check_pages(1)
page.click_on_sidebar_username()
learner_profile_page.wait_for_page()
self.assertTrue(learner_profile_page.field_is_visible('username'))
@attr('shard_2')
class DiscussionSearchAlertTest(UniqueCourseTest):
"""
Tests for spawning and dismissing alerts related to user search actions and their results.
"""
SEARCHED_USERNAME = "gizmo"
def setUp(self):
super(DiscussionSearchAlertTest, self).setUp()
CourseFixture(**self.course_info).install()
# first auto auth call sets up a user that we will search for in some tests
self.searched_user_id = AutoAuthPage(
self.browser,
username=self.SEARCHED_USERNAME,
course_id=self.course_id
).visit().get_user_id()
# this auto auth call creates the actual session user
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.page = DiscussionTabHomePage(self.browser, self.course_id)
self.page.visit()
def setup_corrected_text(self, text):
SearchResultFixture(SearchResult(corrected_text=text)).push()
def check_search_alert_messages(self, expected):
actual = self.page.get_search_alert_messages()
self.assertTrue(all(map(lambda msg, sub: msg.lower().find(sub.lower()) >= 0, actual, expected)))
def test_no_rewrite(self):
self.setup_corrected_text(None)
self.page.perform_search()
self.check_search_alert_messages(["no threads"])
def test_rewrite_dismiss(self):
self.setup_corrected_text("foo")
self.page.perform_search()
self.check_search_alert_messages(["foo"])
self.page.dismiss_alert_message("foo")
self.check_search_alert_messages([])
def test_new_search(self):
self.setup_corrected_text("foo")
self.page.perform_search()
self.check_search_alert_messages(["foo"])
self.setup_corrected_text("bar")
self.page.perform_search()
self.check_search_alert_messages(["bar"])
self.setup_corrected_text(None)
self.page.perform_search()
self.check_search_alert_messages(["no threads"])
def test_rewrite_and_user(self):
self.setup_corrected_text("foo")
self.page.perform_search(self.SEARCHED_USERNAME)
self.check_search_alert_messages(["foo", self.SEARCHED_USERNAME])
def test_user_only(self):
self.setup_corrected_text(None)
self.page.perform_search(self.SEARCHED_USERNAME)
self.check_search_alert_messages(["no threads", self.SEARCHED_USERNAME])
# make sure clicking the link leads to the user profile page
UserProfileViewFixture([]).push()
self.page.get_search_alert_links().first.click()
DiscussionUserProfilePage(
self.browser,
self.course_id,
self.searched_user_id,
self.SEARCHED_USERNAME
).wait_for_page()
@attr('shard_2')
class DiscussionSortPreferenceTest(UniqueCourseTest):
"""
Tests for the discussion page displaying a single thread.
"""
def setUp(self):
super(DiscussionSortPreferenceTest, self).setUp()
# Create a course to register for.
CourseFixture(**self.course_info).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.sort_page = DiscussionSortPreferencePage(self.browser, self.course_id)
self.sort_page.visit()
def test_default_sort_preference(self):
"""
Test to check the default sorting preference of user. (Default = date )
"""
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, "date")
def test_change_sort_preference(self):
"""
Test that if user sorting preference is changing properly.
"""
selected_sort = ""
for sort_type in ["votes", "comments", "date"]:
self.assertNotEqual(selected_sort, sort_type)
self.sort_page.change_sort_preference(sort_type)
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
def test_last_preference_saved(self):
"""
Test that user last preference is saved.
"""
selected_sort = ""
for sort_type in ["votes", "comments", "date"]:
self.assertNotEqual(selected_sort, sort_type)
self.sort_page.change_sort_preference(sort_type)
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
self.sort_page.refresh_page()
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
|
simbs/edx-platform
|
common/test/acceptance/tests/discussion/test_discussion.py
|
Python
|
agpl-3.0
| 46,784
|
[
"VisIt"
] |
5857c0951df397e5184ce10eeab465725c585de77b2ad7e41510895a1c702c98
|
from charm.toolbox.enum import *
OpType = Enum('OR', 'AND', 'ATTR', 'THRESHOLD', 'CONDITIONAL', 'NONE')
class BinaryNode:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
def getLeft(self):
return self.left
def getRight(self):
return self.right
def addSubNode(self, left, right):
# set subNodes appropriately
self.left = left if left is not None else None
self.right = right if left is not None else None
def __str__(self):
if self.left is None or self.right is None:
return "%s" % str(self.value)
else:
return "%s (%s, %s)" % (str(self.value), str(self.left), str(self.right))
def __eq__(self, other):
return isinstance(other, BinaryNode) \
and self.value == other.value \
and self.left == other.left \
and self.right == other.right
def __hash__(self):
return hash((self.value, self.left, self.right))
class BinNode(BinaryNode):
def __init__(self, value, left=None, right=None):
super(BinNode, self).__init__(value, left, right)
self.negated = False
self.index = None
if isinstance(value, str):
if value[0] == '!':
value = value[1:] # remove but set flag
self.negated = True
if value.find('_') != -1:
val = value.split('_')
self.index = int(val[1]) # index
value = val[0]
self.type = OpType.ATTR
self.attribute = value.upper()
elif OpType.OR <= value < OpType.NONE:
self.type = value
if self.type == OpType.OR:
self.threshold = 1
elif self.type == OpType.AND:
self.threshold = 2
# elif self.type == OpType.THRESHOLD:
self.attribute = ''
else:
self.type = None
self.attribute = ''
def __repr__(self):
return str(self)
def __str__(self):
if self.type == OpType.ATTR:
if self.negated:
prefix = '!'
else:
prefix = ''
if self.index is not None:
postfix = '_' + str(self.index)
else:
postfix = ''
return prefix + self.attribute + postfix
else:
left = str(self.left)
right = str(self.right)
if self.type == OpType.OR:
return '(' + left + ' or ' + right + ')'
elif self.type == OpType.AND:
return '(' + left + ' and ' + right + ')'
return None
def getAttribute(self):
if self.type == OpType.ATTR:
if self.negated:
prefix = '!'
else:
prefix = ''
return prefix + self.attribute
return
def getAttributeAndIndex(self):
if self.type == OpType.ATTR:
if self.negated:
prefix = '!'
else:
prefix = ''
if self.index is not None:
postfix = '_' + str(self.index)
else:
postfix = ''
return prefix + self.attribute + postfix
return
def __iter__(self):
return self
def __eq__(self, other):
# print("checking...:", self, str(other))
if other is None:
return False
if type(self) == type(other):
return self.getAttribute() == other.getAttribute()
elif type(other) in [str, bytes]:
return other in self.getAttributeAndIndex()
elif type(self) in [str, bytes]:
return self in other.getAttributeAndIndex()
else:
raise ValueError('BinNode - invalid comparison.')
def getNodeType(self):
return self.type
# only applies function on leaf nodes
def traverse(self, function):
# visit node then traverse left and right
function(self.type, self)
if self.left is None:
return None
self.left.traverse(function)
if self.right is None:
return None
self.right.traverse(function)
return None
|
denniss17/charm
|
charm/toolbox/node.py
|
Python
|
lgpl-3.0
| 4,318
|
[
"VisIt"
] |
2c99ddf43dbdbc6e8b33afdf32eb6e195e0804b9f04fd0ec8038800034dc1982
|
# encoding: utf-8
"""
A payload based version of page.
Authors:
* Brian Granger
* Fernando Perez
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Third-party
try:
from docutils.core import publish_string
except ImportError:
# html paging won't be available, but we don't raise any errors. It's a
# purely optional feature.
pass
# Our own
from IPython.core.interactiveshell import InteractiveShell
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def page(strng, start=0, screen_lines=0, pager_cmd=None,
html=None, auto_html=False):
"""Print a string, piping through a pager.
This version ignores the screen_lines and pager_cmd arguments and uses
IPython's payload system instead.
Parameters
----------
strng : str
Text to page.
start : int
Starting line at which to place the display.
html : str, optional
If given, an html string to send as well.
auto_html : bool, optional
If true, the input string is assumed to be valid reStructuredText and is
converted to HTML with docutils. Note that if docutils is not found,
this option is silently ignored.
Note
----
Only one of the ``html`` and ``auto_html`` options can be given, not
both.
"""
# Some routines may auto-compute start offsets incorrectly and pass a
# negative value. Offset to 0 for robustness.
start = max(0, start)
shell = InteractiveShell.instance()
if auto_html:
try:
# These defaults ensure user configuration variables for docutils
# are not loaded, only our config is used here.
defaults = {'file_insertion_enabled': 0,
'raw_enabled': 0,
'_disable_config': 1}
html = publish_string(strng, writer_name='html',
settings_overrides=defaults)
except:
pass
payload = dict(
source='IPython.zmq.page.page',
text=strng,
html=html,
start_line_number=start
)
shell.payload_manager.write_payload(payload)
def install_payload_page():
"""Install this version of page as IPython.core.page.page."""
from IPython.core import page as corepage
corepage.page = page
|
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/IPython/core/payloadpage.py
|
Python
|
lgpl-3.0
| 2,912
|
[
"Brian"
] |
2036d2b3eb82010b2f4955d70e28f5d7d3747d8eeb51c74e89b0bd2e5061a419
|
# -*- coding: utf-8 -*-
#
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Joan Massich <mailsik@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
# Oleh Kozynets <ok7mailbox@gmail.com>
#
# License: Simplified BSD
import os
import os.path as op
import sys
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from mne import (read_source_estimate, read_evokeds, read_cov,
read_forward_solution, pick_types_forward,
SourceEstimate, MixedSourceEstimate, write_surface,
VolSourceEstimate, vertex_to_mni)
from mne.minimum_norm import apply_inverse, make_inverse_operator
from mne.source_space import (read_source_spaces,
setup_volume_source_space)
from mne.datasets import testing
from mne.fixes import _cell_data
from mne.io import read_info
from mne.utils import check_version
from mne.label import read_label
from mne.viz._brain import Brain, _LinkViewer, _BrainScraper, _LayeredMesh
from mne.viz._brain.colormap import calculate_lut
from matplotlib import cm, image
from matplotlib.lines import Line2D
data_path = testing.data_path(download=False)
subject_id = 'sample'
subjects_dir = op.join(data_path, 'subjects')
sample_dir = op.join(data_path, 'MEG', 'sample')
fname_raw_testing = op.join(sample_dir, 'sample_audvis_trunc_raw.fif')
fname_trans = op.join(sample_dir, 'sample_audvis_trunc-trans.fif')
fname_stc = op.join(sample_dir, 'sample_audvis_trunc-meg')
fname_label = op.join(sample_dir, 'labels', 'Vis-lh.label')
fname_cov = op.join(sample_dir, 'sample_audvis_trunc-cov.fif')
fname_evoked = op.join(sample_dir, 'sample_audvis_trunc-ave.fif')
fname_fwd = op.join(sample_dir, 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
src_fname = op.join(subjects_dir, subject_id, 'bem', 'sample-oct-6-src.fif')
class _Collection(object):
def __init__(self, actors):
self._actors = actors
def GetNumberOfItems(self):
return len(self._actors)
def GetItemAsObject(self, ii):
return self._actors[ii]
class TstVTKPicker(object):
"""Class to test cell picking."""
def __init__(self, mesh, cell_id, hemi, brain):
self.mesh = mesh
self.cell_id = cell_id
self.point_id = None
self.hemi = hemi
self.brain = brain
self._actors = ()
def GetCellId(self):
"""Return the picked cell."""
return self.cell_id
def GetDataSet(self):
"""Return the picked mesh."""
return self.mesh
def GetPickPosition(self):
"""Return the picked position."""
if self.hemi == 'vol':
self.point_id = self.cell_id
return self.brain._data['vol']['grid_coords'][self.cell_id]
else:
vtk_cell = self.mesh.GetCell(self.cell_id)
cell = [vtk_cell.GetPointId(point_id) for point_id
in range(vtk_cell.GetNumberOfPoints())]
self.point_id = cell[0]
return self.mesh.points[self.point_id]
def GetProp3Ds(self):
"""Return all picked Prop3Ds."""
return _Collection(self._actors)
def GetRenderer(self):
"""Return the "renderer"."""
return self # set this to also be the renderer and active camera
GetActiveCamera = GetRenderer
def GetPosition(self):
"""Return the position."""
return np.array(self.GetPickPosition()) - (0, 0, 100)
def test_layered_mesh(renderer_interactive_pyvistaqt):
"""Test management of scalars/colormap overlay."""
mesh = _LayeredMesh(
renderer=renderer_interactive_pyvistaqt._get_renderer(size=(300, 300)),
vertices=np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]]),
triangles=np.array([[0, 1, 2], [1, 2, 3]]),
normals=np.array([[0, 0, 1]] * 4),
)
assert not mesh._is_mapped
mesh.map()
assert mesh._is_mapped
assert mesh._current_colors is None
assert mesh._cached_colors is None
mesh.update()
assert len(mesh._overlays) == 0
mesh.add_overlay(
scalars=np.array([0, 1, 1, 0]),
colormap=np.array([(1, 1, 1, 1), (0, 0, 0, 0)]),
rng=[0, 1],
opacity=None,
name='test1',
)
assert mesh._current_colors is not None
assert mesh._cached_colors is None
assert len(mesh._overlays) == 1
assert 'test1' in mesh._overlays
mesh.add_overlay(
scalars=np.array([1, 0, 0, 1]),
colormap=np.array([(1, 1, 1, 1), (0, 0, 0, 0)]),
rng=[0, 1],
opacity=None,
name='test2',
)
assert mesh._current_colors is not None
assert mesh._cached_colors is not None
assert len(mesh._overlays) == 2
assert 'test2' in mesh._overlays
mesh.remove_overlay('test2')
assert 'test2' not in mesh._overlays
mesh.update()
assert len(mesh._overlays) == 1
mesh._clean()
@testing.requires_testing_data
def test_brain_gc(renderer_pyvistaqt, brain_gc):
"""Test that a minimal version of Brain gets GC'ed."""
brain = Brain('fsaverage', 'both', 'inflated', subjects_dir=subjects_dir)
brain.close()
@testing.requires_testing_data
def test_brain_routines(renderer, brain_gc):
"""Test backend agnostic Brain routines."""
brain_klass = renderer.get_brain_class()
from mne.viz._brain import Brain
assert brain_klass == Brain
@testing.requires_testing_data
def test_brain_init(renderer_pyvistaqt, tmp_path, pixel_ratio, brain_gc):
"""Test initialization of the Brain instance."""
from mne.source_estimate import _BaseSourceEstimate
class FakeSTC(_BaseSourceEstimate):
def __init__(self):
pass
hemi = 'lh'
surf = 'inflated'
cortex = 'low_contrast'
title = 'test'
size = (300, 300)
kwargs = dict(subject_id=subject_id, subjects_dir=subjects_dir)
with pytest.raises(ValueError, match='"size" parameter must be'):
Brain(hemi=hemi, surf=surf, size=[1, 2, 3], **kwargs)
with pytest.raises(ValueError, match='.*hemi.*Allowed values.*'):
Brain(hemi='foo', surf=surf, **kwargs)
with pytest.raises(ValueError, match='.*view.*Allowed values.*'):
Brain(hemi='lh', surf=surf, views='foo', **kwargs)
with pytest.raises(TypeError, match='figure'):
Brain(hemi=hemi, surf=surf, figure='foo', **kwargs)
with pytest.raises(TypeError, match='interaction'):
Brain(hemi=hemi, surf=surf, interaction=0, **kwargs)
with pytest.raises(ValueError, match='interaction'):
Brain(hemi=hemi, surf=surf, interaction='foo', **kwargs)
with pytest.raises(FileNotFoundError, match=r'lh\.whatever'):
Brain(subject_id, 'lh', 'whatever')
with pytest.raises(ValueError, match='`surf` cannot be seghead'):
Brain(hemi='lh', surf='seghead', **kwargs)
with pytest.raises(ValueError, match='RGB argument'):
Brain('sample', cortex='badcolor')
Brain(subject_id, hemi=None, surf=None) # test no surfaces
renderer_pyvistaqt.backend._close_all()
brain = Brain(hemi=hemi, surf=surf, size=size, title=title,
cortex=cortex, units='m',
silhouette=dict(decimate=0.95), **kwargs)
assert 'data' not in brain._actors
with pytest.raises(TypeError, match='not supported'):
brain._check_stc(hemi='lh', array=FakeSTC(), vertices=None)
with pytest.raises(ValueError, match='add_data'):
brain.setup_time_viewer(time_viewer=True)
brain._hemi = 'foo' # for testing: hemis
with pytest.raises(ValueError, match='not be None'):
brain._check_hemi(hemi=None)
with pytest.raises(ValueError, match='Invalid.*hemi.*Allowed'):
brain._check_hemi(hemi='foo')
brain._hemi = hemi # end testing: hemis
with pytest.raises(ValueError, match='bool or positive'):
brain._to_borders(None, None, 'foo')
assert brain.interaction == 'trackball'
# add_data
stc = read_source_estimate(fname_stc)
fmin = stc.data.min()
fmax = stc.data.max()
for h in brain._hemis:
if h == 'lh':
hi = 0
else:
hi = 1
hemi_data = stc.data[:len(stc.vertices[hi]), 10]
hemi_vertices = stc.vertices[hi]
with pytest.raises(TypeError, match='scale_factor'):
brain.add_data(hemi_data, hemi=h, scale_factor='foo')
with pytest.raises(TypeError, match='vector_alpha'):
brain.add_data(hemi_data, hemi=h, vector_alpha='foo')
with pytest.raises(ValueError, match='thresh'):
brain.add_data(hemi_data, hemi=h, thresh=-1)
with pytest.raises(ValueError, match='remove_existing'):
brain.add_data(hemi_data, hemi=h, remove_existing=-1)
with pytest.raises(ValueError, match='time_label_size'):
brain.add_data(hemi_data, hemi=h, time_label_size=-1,
vertices=hemi_vertices)
with pytest.raises(ValueError, match='is positive'):
brain.add_data(hemi_data, hemi=h, smoothing_steps=-1,
vertices=hemi_vertices)
with pytest.raises(TypeError, match='int or NoneType'):
brain.add_data(hemi_data, hemi=h, smoothing_steps='foo')
with pytest.raises(ValueError, match='dimension mismatch'):
brain.add_data(array=np.array([0, 1, 2]), hemi=h,
vertices=hemi_vertices)
with pytest.raises(ValueError, match='vertices parameter must not be'):
brain.add_data(hemi_data, fmin=fmin, hemi=hemi,
fmax=fmax, vertices=None)
with pytest.raises(ValueError, match='has shape'):
brain.add_data(hemi_data[:, np.newaxis], fmin=fmin, hemi=hemi,
fmax=fmax, vertices=None, time=[0, 1])
brain.add_data(hemi_data, fmin=fmin, hemi=h, fmax=fmax,
colormap='hot', vertices=hemi_vertices,
smoothing_steps='nearest', colorbar=(0, 0), time=None)
with pytest.raises(ValueError, match='brain has no defined times'):
brain.set_time(0.)
assert brain.data['lh']['array'] is hemi_data
assert brain.views == ['lateral']
assert brain.hemis == ('lh',)
brain.add_data(hemi_data[:, np.newaxis], fmin=fmin, hemi=h, fmax=fmax,
colormap='hot', vertices=hemi_vertices,
smoothing_steps=1, initial_time=0., colorbar=False,
time=[0])
with pytest.raises(ValueError, match='the range of available times'):
brain.set_time(7.)
brain.set_time(0.)
brain.set_time_point(0) # should hit _safe_interp1d
with pytest.raises(ValueError, match='consistent with'):
brain.add_data(hemi_data[:, np.newaxis], fmin=fmin, hemi=h,
fmax=fmax, colormap='hot', vertices=hemi_vertices,
smoothing_steps='nearest', colorbar=False,
time=[1])
with pytest.raises(ValueError, match='different from'):
brain.add_data(hemi_data[:, np.newaxis][:, [0, 0]],
fmin=fmin, hemi=h, fmax=fmax, colormap='hot',
vertices=hemi_vertices)
with pytest.raises(ValueError, match='need shape'):
brain.add_data(hemi_data[:, np.newaxis], time=[0, 1],
fmin=fmin, hemi=h, fmax=fmax, colormap='hot',
vertices=hemi_vertices)
with pytest.raises(ValueError, match='If array has 3'):
brain.add_data(hemi_data[:, np.newaxis, np.newaxis],
fmin=fmin, hemi=h, fmax=fmax, colormap='hot',
vertices=hemi_vertices)
assert len(brain._actors['data']) == 4
brain.remove_data()
assert 'data' not in brain._actors
# add label
label = read_label(fname_label)
with pytest.raises(ValueError, match="not a filename"):
brain.add_label(0)
with pytest.raises(ValueError, match="does not exist"):
brain.add_label('foo', subdir='bar')
label.name = None # test unnamed label
brain.add_label(label, scalar_thresh=0., color="green")
assert isinstance(brain.labels[label.hemi], list)
overlays = brain._layered_meshes[label.hemi]._overlays
assert 'unnamed0' in overlays
assert np.allclose(overlays['unnamed0']._colormap[0],
[0, 0, 0, 0]) # first component is transparent
assert np.allclose(overlays['unnamed0']._colormap[1],
[0, 128, 0, 255]) # second is green
brain.remove_labels()
assert 'unnamed0' not in overlays
brain.add_label(fname_label)
brain.add_label('V1', borders=True)
brain.remove_labels()
brain.remove_labels()
# add foci
brain.add_foci([0], coords_as_verts=True,
hemi=hemi, color='blue')
# add head and skull
brain.add_head(color='red', alpha=0.1)
brain.remove_head()
brain.add_skull(outer=True, color='green', alpha=0.1)
brain.remove_skull()
# add volume labels
brain.add_volume_labels(
aseg='aseg', labels=('Brain-Stem', 'Left-Hippocampus',
'Left-Amygdala'))
brain.remove_volume_labels()
# add sensors
info = read_info(fname_raw_testing)
brain.add_sensors(info, trans=fname_trans)
for kind in ('meg', 'eeg', 'fnirs', 'ecog', 'seeg', 'dbs', 'helmet'):
brain.remove_sensors(kind)
brain.add_sensors(info, trans=fname_trans)
brain.remove_sensors()
info['chs'][0]['coord_frame'] = 99
with pytest.raises(RuntimeError, match='must be "meg", "head" or "mri"'):
brain.add_sensors(info, trans=fname_trans)
# add text
brain.add_text(x=0, y=0, text='foo')
with pytest.raises(ValueError, match='already exists'):
brain.add_text(x=0, y=0, text='foo')
brain.remove_text('foo')
brain.add_text(x=0, y=0, text='foo')
brain.remove_text()
brain.close()
# add annotation
annots = ['aparc', op.join(subjects_dir, 'fsaverage', 'label',
'lh.PALS_B12_Lobes.annot')]
borders = [True, 2]
alphas = [1, 0.5]
colors = [None, 'r']
brain = Brain(subject_id='fsaverage', hemi='both', size=size,
surf='inflated', subjects_dir=subjects_dir)
with pytest.raises(RuntimeError, match="both hemispheres"):
brain.add_annotation(annots[-1])
with pytest.raises(ValueError, match="does not exist"):
brain.add_annotation('foo')
brain.close()
brain = Brain(subject_id='fsaverage', hemi=hemi, size=size,
surf='inflated', subjects_dir=subjects_dir)
for a, b, p, color in zip(annots, borders, alphas, colors):
brain.add_annotation(a, b, p, color=color)
view_args = dict(roll=1, distance=500, focalpoint=(1e-5, 1e-5, 1e-5))
cam = brain._renderer.figure.plotter.camera
previous_roll = cam.GetRoll()
brain.show_view(**view_args)
assert_allclose(cam.GetFocalPoint(), view_args["focalpoint"])
assert_allclose(cam.GetDistance(), view_args["distance"])
assert_allclose(cam.GetRoll(), previous_roll + view_args["roll"])
del view_args
# image and screenshot
fname = op.join(str(tmp_path), 'test.png')
assert not op.isfile(fname)
brain.save_image(fname)
assert op.isfile(fname)
fp = np.array(
brain._renderer.figure.plotter.renderer.ComputeVisiblePropBounds())
fp = (fp[1::2] + fp[::2]) * 0.5
azimuth, elevation = 180., 90.
for view_args in (dict(azimuth=azimuth, elevation=elevation,
focalpoint='auto'),
dict(view='lateral', hemi='lh')):
brain.show_view(**view_args)
assert_allclose(brain._renderer.figure._azimuth, azimuth)
assert_allclose(brain._renderer.figure._elevation, elevation)
assert_allclose(cam.GetFocalPoint(), fp)
del view_args
img = brain.screenshot(mode='rgba')
want_size = np.array([size[0] * pixel_ratio, size[1] * pixel_ratio, 4])
assert_allclose(img.shape, want_size)
brain.close()
@testing.requires_testing_data
@pytest.mark.skipif(os.getenv('CI_OS_NAME', '') == 'osx',
reason='Unreliable/segfault on macOS CI')
@pytest.mark.parametrize('hemi', ('lh', 'rh'))
def test_single_hemi(hemi, renderer_interactive_pyvistaqt, brain_gc):
"""Test single hemi support."""
stc = read_source_estimate(fname_stc)
idx, order = (0, 1) if hemi == 'lh' else (1, -1)
stc = SourceEstimate(
getattr(stc, f'{hemi}_data'), [stc.vertices[idx], []][::order],
0, 1, 'sample')
brain = stc.plot(
subjects_dir=subjects_dir, hemi='both', size=300,
cortex='0.5') # single cortex string arg
brain.close()
# test skipping when len(vertices) == 0
stc.vertices[1 - idx] = np.array([])
brain = stc.plot(
subjects_dir=subjects_dir, hemi=hemi, size=300)
brain.close()
@testing.requires_testing_data
@pytest.mark.slowtest
def test_brain_save_movie(tmp_path, renderer, brain_gc):
"""Test saving a movie of a Brain instance."""
from imageio_ffmpeg import count_frames_and_secs
brain = _create_testing_brain(hemi='lh', time_viewer=False,
cortex=['r', 'b']) # custom binarized
filename = str(op.join(tmp_path, "brain_test.mov"))
for interactive_state in (False, True):
# for coverage, we set interactivity
if interactive_state:
brain._renderer.plotter.enable()
else:
brain._renderer.plotter.disable()
with pytest.raises(TypeError, match='unexpected keyword argument'):
brain.save_movie(filename, time_dilation=1, tmin=1, tmax=1.1,
bad_name='blah')
assert not op.isfile(filename)
tmin = 1
tmax = 5
duration = np.floor(tmax - tmin)
brain.save_movie(filename, time_dilation=1., tmin=tmin,
tmax=tmax, interpolation='nearest')
assert op.isfile(filename)
_, nsecs = count_frames_and_secs(filename)
assert_allclose(duration, nsecs, atol=0.2)
os.remove(filename)
brain.close()
_TINY_SIZE = (350, 300)
def tiny(tmp_path):
"""Create a tiny fake brain."""
# This is a minimal version of what we need for our viz-with-timeviewer
# support currently
subject = 'test'
(tmp_path / subject).mkdir()
subject_dir = tmp_path / subject
(subject_dir / 'surf').mkdir()
surf_dir = subject_dir / 'surf'
rng = np.random.RandomState(0)
rr = rng.randn(4, 3)
tris = np.array([[0, 1, 2], [2, 1, 3]])
curv = rng.randn(len(rr))
with open(surf_dir / 'lh.curv', 'wb') as fid:
fid.write(np.array([255, 255, 255], dtype=np.uint8))
fid.write(np.array([len(rr), 0, 1], dtype='>i4'))
fid.write(curv.astype('>f4'))
write_surface(surf_dir / 'lh.white', rr, tris)
write_surface(surf_dir / 'rh.white', rr, tris) # needed for vertex tc
vertices = [np.arange(len(rr)), []]
data = rng.randn(len(rr), 10)
stc = SourceEstimate(data, vertices, 0, 1, subject)
brain = stc.plot(subjects_dir=tmp_path, hemi='lh', surface='white',
size=_TINY_SIZE)
# in principle this should be sufficient:
#
# ratio = brain.mpl_canvas.canvas.window().devicePixelRatio()
#
# but in practice VTK can mess up sizes, so let's just calculate it.
sz = brain.plotter.size()
sz = (sz.width(), sz.height())
sz_ren = brain.plotter.renderer.GetSize()
ratio = np.median(np.array(sz_ren) / np.array(sz))
return brain, ratio
@pytest.mark.filterwarnings('ignore:.*constrained_layout not applied.*:')
def test_brain_screenshot(renderer_interactive_pyvistaqt, tmp_path, brain_gc):
"""Test time viewer screenshot."""
# XXX disable for sprint because it's too unreliable
if sys.platform == 'darwin' and os.getenv('GITHUB_ACTIONS', '') == 'true':
pytest.skip('Test is unreliable on GitHub Actions macOS')
tiny_brain, ratio = tiny(tmp_path)
img_nv = tiny_brain.screenshot(time_viewer=False)
want = (_TINY_SIZE[1] * ratio, _TINY_SIZE[0] * ratio, 3)
assert img_nv.shape == want
img_v = tiny_brain.screenshot(time_viewer=True)
assert img_v.shape[1:] == want[1:]
assert_allclose(img_v.shape[0], want[0] * 4 / 3, atol=3) # some slop
tiny_brain.close()
def _assert_brain_range(brain, rng):
__tracebackhide__ = True
assert brain._cmap_range == rng, 'brain._cmap_range == rng'
for hemi, layerer in brain._layered_meshes.items():
for key, mesh in layerer._overlays.items():
if key == 'curv':
continue
assert mesh._rng == rng, \
f'_layered_meshes[{repr(hemi)}][{repr(key)}]._rng != {rng}'
@testing.requires_testing_data
@pytest.mark.slowtest
def test_brain_time_viewer(renderer_interactive_pyvistaqt, pixel_ratio,
brain_gc):
"""Test time viewer primitives."""
with pytest.raises(ValueError, match="between 0 and 1"):
_create_testing_brain(hemi='lh', show_traces=-1.0)
with pytest.raises(ValueError, match="got unknown keys"):
_create_testing_brain(hemi='lh', surf='white', src='volume',
volume_options={'foo': 'bar'})
brain = _create_testing_brain(
hemi='both', show_traces=False,
brain_kwargs=dict(silhouette=dict(decimate=0.95))
)
# test sub routines when show_traces=False
brain._on_pick(None, None)
brain._configure_vertex_time_course()
brain._configure_label_time_course()
brain.setup_time_viewer() # for coverage
brain.callbacks["time"](value=0)
assert "renderer" not in brain.callbacks
brain.callbacks["orientation"](
value='lat',
update_widget=True
)
brain.callbacks["orientation"](
value='medial',
update_widget=True
)
brain.callbacks["time"](
value=0.0,
time_as_index=False,
)
# Need to process events for old Qt
brain.callbacks["smoothing"](value=1)
_assert_brain_range(brain, [0.1, 0.3])
from mne.utils import use_log_level
print('\nCallback fmin\n')
with use_log_level('debug'):
brain.callbacks["fmin"](value=12.0)
assert brain._data["fmin"] == 12.0
brain.callbacks["fmax"](value=4.0)
_assert_brain_range(brain, [4.0, 4.0])
brain.callbacks["fmid"](value=6.0)
_assert_brain_range(brain, [4.0, 6.0])
brain.callbacks["fmid"](value=4.0)
brain.callbacks["fplus"]()
brain.callbacks["fminus"]()
brain.callbacks["fmin"](value=12.0)
brain.callbacks["fmid"](value=4.0)
_assert_brain_range(brain, [4.0, 12.0])
brain._shift_time(op=lambda x, y: x + y)
brain._shift_time(op=lambda x, y: x - y)
brain._rotate_azimuth(15)
brain._rotate_elevation(15)
brain.toggle_interface()
brain.toggle_interface(value=False)
brain.callbacks["playback_speed"](value=0.1)
brain.toggle_playback()
brain.toggle_playback(value=False)
brain.apply_auto_scaling()
brain.restore_user_scaling()
brain.reset()
assert brain.help_canvas is not None
assert not brain.help_canvas.canvas.isVisible()
brain.help()
assert brain.help_canvas.canvas.isVisible()
# screenshot
# Need to turn the interface back on otherwise the window is too wide
# (it keeps the window size and expands the 3D area when the interface
# is toggled off)
brain.toggle_interface(value=True)
brain.show_view(azimuth=180., elevation=90.)
img = brain.screenshot(mode='rgb')
want_shape = np.array([300 * pixel_ratio, 300 * pixel_ratio, 3])
assert_allclose(img.shape, want_shape)
brain.close()
@testing.requires_testing_data
@pytest.mark.parametrize('hemi', [
'lh',
pytest.param('rh', marks=pytest.mark.slowtest),
pytest.param('split', marks=pytest.mark.slowtest),
pytest.param('both', marks=pytest.mark.slowtest),
])
@pytest.mark.parametrize('src', [
'surface',
pytest.param('vector', marks=pytest.mark.slowtest),
pytest.param('volume', marks=pytest.mark.slowtest),
pytest.param('mixed', marks=pytest.mark.slowtest),
])
@pytest.mark.slowtest
def test_brain_traces(renderer_interactive_pyvistaqt, hemi, src, tmp_path,
brain_gc):
"""Test brain traces."""
hemi_str = list()
if src in ('surface', 'vector', 'mixed'):
hemi_str.extend([hemi] if hemi in ('lh', 'rh') else ['lh', 'rh'])
if src in ('mixed', 'volume'):
hemi_str.extend(['vol'])
# label traces
brain = _create_testing_brain(
hemi=hemi, surf='white', src=src, show_traces='label',
volume_options=None, # for speed, don't upsample
n_time=5, initial_time=0,
)
if src == 'surface':
brain._data['src'] = None # test src=None
if src in ('surface', 'vector', 'mixed'):
assert brain.show_traces
assert brain.traces_mode == 'label'
brain.widgets["extract_mode"].set_value('max')
# test picking a cell at random
rng = np.random.RandomState(0)
for idx, current_hemi in enumerate(hemi_str):
if current_hemi == 'vol':
continue
current_mesh = brain._layered_meshes[current_hemi]._polydata
cell_id = rng.randint(0, current_mesh.n_cells)
test_picker = TstVTKPicker(
current_mesh, cell_id, current_hemi, brain)
assert len(brain.picked_patches[current_hemi]) == 0
brain._on_pick(test_picker, None)
assert len(brain.picked_patches[current_hemi]) == 1
for label_id in list(brain.picked_patches[current_hemi]):
label = brain._annotation_labels[current_hemi][label_id]
assert isinstance(label._line, Line2D)
brain.widgets["extract_mode"].set_value('mean')
brain.clear_glyphs()
assert len(brain.picked_patches[current_hemi]) == 0
brain._on_pick(test_picker, None) # picked and added
assert len(brain.picked_patches[current_hemi]) == 1
brain._on_pick(test_picker, None) # picked again so removed
assert len(brain.picked_patches[current_hemi]) == 0
# test switching from 'label' to 'vertex'
brain.widgets["annotation"].set_value('None')
brain.widgets["extract_mode"].set_value('max')
else: # volume
assert "annotation" not in brain.widgets
assert "extract_mode" not in brain.widgets
brain.close()
# test colormap
if src != 'vector':
brain = _create_testing_brain(
hemi=hemi, surf='white', src=src, show_traces=0.5,
initial_time=0,
volume_options=None, # for speed, don't upsample
n_time=1 if src == 'mixed' else 5, diverging=True,
add_data_kwargs=dict(colorbar_kwargs=dict(n_labels=3)),
)
# mne_analyze should be chosen
ctab = brain._data['ctable']
assert_array_equal(ctab[0], [0, 255, 255, 255]) # opaque cyan
assert_array_equal(ctab[-1], [255, 255, 0, 255]) # opaque yellow
assert_allclose(ctab[len(ctab) // 2], [128, 128, 128, 0], atol=3)
brain.close()
# vertex traces
brain = _create_testing_brain(
hemi=hemi, surf='white', src=src, show_traces=0.5,
initial_time=0,
volume_options=None, # for speed, don't upsample
n_time=1 if src == 'mixed' else 5,
add_data_kwargs=dict(colorbar_kwargs=dict(n_labels=3)),
)
assert brain.show_traces
assert brain.traces_mode == 'vertex'
assert hasattr(brain, "picked_points")
assert hasattr(brain, "_spheres")
assert brain._scalar_bar.GetNumberOfLabels() == 3
# add foci should work for volumes
if src == 'surface':
hemi = 'lh'
else:
hemi = 'vol'
brain.add_foci([[0, 0, 0]], hemi=hemi)
assert_array_equal(brain._data[hemi]['foci'], [[0, 0, 0]])
# test points picked by default
picked_points = brain.get_picked_points()
spheres = brain._spheres
for current_hemi in hemi_str:
assert len(picked_points[current_hemi]) == 1
n_spheres = len(hemi_str)
n_actors = n_spheres
if hemi == 'split' and src in ('mixed', 'volume'):
n_spheres += 1
assert len(spheres) == n_spheres
# test that there are actually enough actors
assert len(brain._actors['data']) == n_actors
# test switching from 'vertex' to 'label'
if src == 'surface':
brain.widgets["annotation"].set_value('aparc')
brain.widgets["annotation"].set_value('None')
# test removing points
brain.clear_glyphs()
assert len(spheres) == 0
for key in ('lh', 'rh', 'vol'):
assert len(picked_points[key]) == 0
# test picking a cell at random
rng = np.random.RandomState(0)
for idx, current_hemi in enumerate(hemi_str):
assert len(spheres) == 0
if current_hemi == 'vol':
current_mesh = brain._data['vol']['grid']
vertices = brain._data['vol']['vertices']
values = _cell_data(current_mesh)['values'][vertices]
cell_id = vertices[np.argmax(np.abs(values))]
else:
current_mesh = brain._layered_meshes[current_hemi]._polydata
cell_id = rng.randint(0, current_mesh.n_cells)
test_picker = TstVTKPicker(None, None, current_hemi, brain)
assert brain._on_pick(test_picker, None) is None
test_picker = TstVTKPicker(
current_mesh, cell_id, current_hemi, brain)
assert cell_id == test_picker.cell_id
assert test_picker.point_id is None
brain._on_pick(test_picker, None)
brain._on_pick(test_picker, None)
assert test_picker.point_id is not None
assert len(picked_points[current_hemi]) == 1
assert picked_points[current_hemi][0] == test_picker.point_id
assert len(spheres) > 0
sphere = spheres[-1]
vertex_id = sphere._vertex_id
assert vertex_id == test_picker.point_id
line = sphere._line
hemi_prefix = current_hemi[0].upper()
if current_hemi == 'vol':
assert hemi_prefix + ':' in line.get_label()
assert 'MNI' in line.get_label()
continue # the MNI conversion is more complex
hemi_int = 0 if current_hemi == 'lh' else 1
mni = vertex_to_mni(
vertices=vertex_id,
hemis=hemi_int,
subject=brain._subject_id,
subjects_dir=brain._subjects_dir
)
label = "{}:{} MNI: {}".format(
hemi_prefix, str(vertex_id).ljust(6),
', '.join('%5.1f' % m for m in mni))
assert line.get_label() == label
# remove the sphere by clicking in its vicinity
old_len = len(spheres)
test_picker._actors = sum((s._actors for s in spheres), [])
brain._on_pick(test_picker, None)
assert len(spheres) < old_len
screenshot = brain.screenshot()
screenshot_all = brain.screenshot(time_viewer=True)
assert screenshot.shape[0] < screenshot_all.shape[0]
# and the scraper for it (will close the instance)
# only test one condition to save time
if not (hemi == 'rh' and src == 'surface' and
check_version('sphinx_gallery')):
brain.close()
return
fnames = [str(tmp_path / f'temp_{ii}.png') for ii in range(2)]
block_vars = dict(image_path_iterator=iter(fnames),
example_globals=dict(brain=brain))
block = ('code', """
something
# brain.save_movie(time_dilation=1, framerate=1,
# interpolation='linear', time_viewer=True)
#
""", 1)
gallery_conf = dict(src_dir=str(tmp_path), compress_images=[])
scraper = _BrainScraper()
rst = scraper(block, block_vars, gallery_conf)
assert brain.plotter is None # closed
gif_0 = fnames[0][:-3] + 'gif'
for fname in (gif_0, fnames[1]):
assert op.basename(fname) in rst
assert op.isfile(fname)
img = image.imread(fname)
assert img.shape[1] == screenshot.shape[1] # same width
assert img.shape[0] > screenshot.shape[0] # larger height
assert img.shape[:2] == screenshot_all.shape[:2]
@testing.requires_testing_data
@pytest.mark.slowtest
def test_brain_linkviewer(renderer_interactive_pyvistaqt, brain_gc):
"""Test _LinkViewer primitives."""
brain1 = _create_testing_brain(hemi='lh', show_traces=False)
brain2 = _create_testing_brain(hemi='lh', show_traces='separate')
brain1._times = brain1._times * 2
with pytest.warns(RuntimeWarning, match='linking time'):
link_viewer = _LinkViewer(
[brain1, brain2],
time=True,
camera=False,
colorbar=False,
picking=False,
)
brain1.close()
brain_data = _create_testing_brain(hemi='split', show_traces='vertex')
link_viewer = _LinkViewer(
[brain2, brain_data],
time=True,
camera=True,
colorbar=True,
picking=True,
)
link_viewer.leader.set_time_point(0)
link_viewer.leader.mpl_canvas.time_func(0)
link_viewer.leader.callbacks["fmin"](0)
link_viewer.leader.callbacks["fmid"](0.5)
link_viewer.leader.callbacks["fmax"](1)
link_viewer.leader.set_playback_speed(0.1)
link_viewer.leader.toggle_playback()
brain2.close()
brain_data.close()
def test_calculate_lut():
"""Test brain's colormap functions."""
colormap = "coolwarm"
alpha = 1.0
fmin = 0.0
fmid = 0.5
fmax = 1.0
center = None
calculate_lut(colormap, alpha=alpha, fmin=fmin,
fmid=fmid, fmax=fmax, center=center)
center = 0.0
colormap = cm.get_cmap(colormap)
calculate_lut(colormap, alpha=alpha, fmin=fmin,
fmid=fmid, fmax=fmax, center=center)
cmap = cm.get_cmap(colormap)
zero_alpha = np.array([1., 1., 1., 0])
half_alpha = np.array([1., 1., 1., 0.5])
atol = 1.5 / 256.
# fmin < fmid < fmax
lut = calculate_lut(colormap, alpha, 1, 2, 3)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0) * zero_alpha, atol=atol)
assert_allclose(lut[127], cmap(0.5), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# divergent
lut = calculate_lut(colormap, alpha, 0, 1, 2, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[63], cmap(0.25), atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[192], cmap(0.75), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# fmin == fmid == fmax
lut = calculate_lut(colormap, alpha, 1, 1, 1)
zero_alpha = np.array([1., 1., 1., 0])
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0) * zero_alpha, atol=atol)
assert_allclose(lut[1], cmap(0.5), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# divergent
lut = calculate_lut(colormap, alpha, 0, 0, 0, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# fmin == fmid < fmax
lut = calculate_lut(colormap, alpha, 1, 1, 2)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0.) * zero_alpha, atol=atol)
assert_allclose(lut[1], cmap(0.5), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# divergent
lut = calculate_lut(colormap, alpha, 1, 1, 2, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[62], cmap(0.245), atol=atol)
assert_allclose(lut[64], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[191], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[193], cmap(0.755), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
lut = calculate_lut(colormap, alpha, 0, 0, 1, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[126], cmap(0.25), atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[129], cmap(0.75), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# fmin < fmid == fmax
lut = calculate_lut(colormap, alpha, 1, 2, 2)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0) * zero_alpha, atol=atol)
assert_allclose(lut[-2], cmap(0.5), atol=atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
# divergent
lut = calculate_lut(colormap, alpha, 1, 2, 2, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[1], cmap(0.25), atol=2 * atol)
assert_allclose(lut[32], cmap(0.375) * half_alpha, atol=atol)
assert_allclose(lut[64], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[191], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[223], cmap(0.625) * half_alpha, atol=atol)
assert_allclose(lut[-2], cmap(0.7475), atol=2 * atol)
assert_allclose(lut[-1], cmap(1.), atol=2 * atol)
lut = calculate_lut(colormap, alpha, 0, 1, 1, 0)
assert lut.shape == (256, 4)
assert_allclose(lut[0], cmap(0), atol=atol)
assert_allclose(lut[1], cmap(0.25), atol=2 * atol)
assert_allclose(lut[64], cmap(0.375) * half_alpha, atol=atol)
assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)
assert_allclose(lut[191], cmap(0.625) * half_alpha, atol=atol)
assert_allclose(lut[-2], cmap(0.75), atol=2 * atol)
assert_allclose(lut[-1], cmap(1.), atol=atol)
with pytest.raises(ValueError, match=r'.*fmin \(1\) <= fmid \(0\) <= fma'):
calculate_lut(colormap, alpha, 1, 0, 2)
def _create_testing_brain(hemi, surf='inflated', src='surface',
size=300, n_time=5, diverging=False, **kwargs):
assert src in ('surface', 'vector', 'mixed', 'volume')
meth = 'plot'
if src in ('surface', 'mixed'):
sample_src = read_source_spaces(src_fname)
klass = MixedSourceEstimate if src == 'mixed' else SourceEstimate
if src == 'vector':
fwd = read_forward_solution(fname_fwd)
fwd = pick_types_forward(fwd, meg=True, eeg=False)
evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0]
noise_cov = read_cov(fname_cov)
free = make_inverse_operator(
evoked.info, fwd, noise_cov, loose=1.)
stc = apply_inverse(evoked, free, pick_ori='vector')
return stc.plot(
subject=subject_id, hemi=hemi, size=size,
subjects_dir=subjects_dir, colormap='auto',
**kwargs)
if src in ('volume', 'mixed'):
vol_src = setup_volume_source_space(
subject_id, 7., mri='aseg.mgz',
volume_label='Left-Cerebellum-Cortex',
subjects_dir=subjects_dir, add_interpolator=False)
assert len(vol_src) == 1
assert vol_src[0]['nuse'] == 150
if src == 'mixed':
sample_src = sample_src + vol_src
else:
sample_src = vol_src
klass = VolSourceEstimate
meth = 'plot_3d'
assert sample_src.kind == src
# dense version
rng = np.random.RandomState(0)
vertices = [s['vertno'] for s in sample_src]
n_verts = sum(len(v) for v in vertices)
stc_data = np.zeros((n_verts * n_time))
stc_size = stc_data.size
stc_data[(rng.rand(stc_size // 20) * stc_size).astype(int)] = \
rng.rand(stc_data.size // 20)
stc_data.shape = (n_verts, n_time)
if diverging:
stc_data -= 0.5
stc = klass(stc_data, vertices, 1, 1)
clim = dict(kind='value', lims=[0.1, 0.2, 0.3])
if diverging:
clim['pos_lims'] = clim.pop('lims')
brain_data = getattr(stc, meth)(
subject=subject_id, hemi=hemi, surface=surf, size=size,
subjects_dir=subjects_dir, colormap='auto',
clim=clim, src=sample_src,
**kwargs)
return brain_data
def test_foci_mapping(tmp_path, renderer_interactive_pyvistaqt):
"""Test mapping foci to the surface."""
tiny_brain, _ = tiny(tmp_path)
foci_coords = tiny_brain.geo['lh'].coords[:2] + 0.01
tiny_brain.add_foci(foci_coords, map_surface='white')
assert_array_equal(tiny_brain._data['lh']['foci'],
tiny_brain.geo['lh'].coords[:2])
|
wmvanvliet/mne-python
|
mne/viz/_brain/tests/test_brain.py
|
Python
|
bsd-3-clause
| 40,665
|
[
"VTK"
] |
43a91c4e27a00dc894ef39d21e696c5a9494e1296479bbfb84922419b398b22a
|
#!/usr/bin/env python3
########################################################################
# Solves problem 53 from projectEuler.net.
# Determines the amount of, not necessarily distinct, values of,
# combinations of n taken by r for 1 n 100, are greater than one-million?
# Copyright (C) 2010 Santiago Alessandri
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# You can contact me at san.lt.ss@gmail.com or salessandri@nasel.com
# Visit my wiki at http://san-ss.wikidot.com
########################################################################
from CommonFunctions import factorial
if __name__ == '__main__':
result = sum(1 for n in range(1, 101) for r in range(1, n+1) if
factorial(n) // factorial(r) // factorial(n-r) > 1000000)
print("The result is:", result)
|
sanSS/programming-contests
|
project-euler/problem053.py
|
Python
|
gpl-3.0
| 1,427
|
[
"VisIt"
] |
7c4679c6538e3396dc6d22a05e02d90244751bafcd03c589a367a4c0a3305d3e
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from flask import Blueprint
from flask_appbuilder import BaseView as AppBuilderBaseView, expose
from airflow.executors.base_executor import BaseExecutor
# Importing base classes that we need to derive
from airflow.hooks.base_hook import BaseHook
from airflow.models.baseoperator import BaseOperator
# This is the class you derive to create a plugin
from airflow.plugins_manager import AirflowPlugin
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from tests.test_utils.mock_operators import (
AirflowLink, AirflowLink2, CustomBaseIndexOpLink, CustomOpLink, GithubLink, GoogleLink,
)
# Will show up under airflow.hooks.test_plugin.PluginHook
class PluginHook(BaseHook):
pass
# Will show up under airflow.operators.test_plugin.PluginOperator
class PluginOperator(BaseOperator):
pass
# Will show up under airflow.sensors.test_plugin.PluginSensorOperator
class PluginSensorOperator(BaseSensorOperator):
pass
# Will show up under airflow.executors.test_plugin.PluginExecutor
class PluginExecutor(BaseExecutor):
pass
# Will show up under airflow.macros.test_plugin.plugin_macro
def plugin_macro():
pass
# Creating a flask appbuilder BaseView
class PluginTestAppBuilderBaseView(AppBuilderBaseView):
default_view = "test"
@expose("/")
def test(self):
return self.render_template("test_plugin/test.html", content="Hello galaxy!")
v_appbuilder_view = PluginTestAppBuilderBaseView()
v_appbuilder_package = {"name": "Test View",
"category": "Test Plugin",
"view": v_appbuilder_view}
# Creating a flask appbuilder Menu Item
appbuilder_mitem = {"name": "Google",
"category": "Search",
"category_icon": "fa-th",
"href": "https://www.google.com"}
# Creating a flask blueprint to intergrate the templates and static folder
bp = Blueprint(
"test_plugin", __name__,
template_folder='templates', # registers airflow/plugins/templates as a Jinja template folder
static_folder='static',
static_url_path='/static/test_plugin')
# Defining the plugin class
class AirflowTestPlugin(AirflowPlugin):
name = "test_plugin"
operators = [PluginOperator]
sensors = [PluginSensorOperator]
hooks = [PluginHook]
executors = [PluginExecutor]
macros = [plugin_macro]
flask_blueprints = [bp]
appbuilder_views = [v_appbuilder_package]
appbuilder_menu_items = [appbuilder_mitem]
global_operator_extra_links = [
AirflowLink(),
GithubLink(),
]
operator_extra_links = [
GoogleLink(), AirflowLink2(), CustomOpLink(), CustomBaseIndexOpLink(1)
]
class MockPluginA(AirflowPlugin):
name = 'plugin-a'
class MockPluginB(AirflowPlugin):
name = 'plugin-b'
class MockPluginC(AirflowPlugin):
name = 'plugin-c'
|
spektom/incubator-airflow
|
tests/plugins/test_plugin.py
|
Python
|
apache-2.0
| 3,646
|
[
"Galaxy"
] |
d78ae5da109a2892c4567349f26f2fc7a41ac8dbef0df5c9033b5969acb47f28
|
#
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import espressomd._system as es
import espressomd
from espressomd import thermostat
from espressomd import code_info
from espressomd import integrate
from espressomd import visualization
import numpy
from matplotlib import pyplot
from threading import Thread
print("""
=======================================================
= lj_liquid.py =
=======================================================
Program Information:""")
print(code_info.features())
dev = "cpu"
# System parameters
#############################################################
# 10 000 Particles
box_l = 10.7437
density = 0.7
# Interaction parameters (repulsive Lennard Jones)
#############################################################
lj_eps = 1.0
lj_sig = 1.0
lj_cut = 1.12246
lj_cap = 20
# Integration parameters
#############################################################
system = espressomd.System()
system.time_step = 0.001
system.skin = 0.4
#es._espressoHandle.Tcl_Eval('thermostat langevin 1.0 1.0')
system.thermostat.set_langevin(kT=1.0, gamma=1.0)
# warmup integration (with capped LJ potential)
warm_steps = 100
warm_n_times = 30
# do the warmup until the particles have at least the distance min__dist
min_dist = 0.9
# integration
int_steps = 10
int_n_times = 50000
#############################################################
# Setup System #
#############################################################
# Interaction setup
#############################################################
system.box_l = [box_l, box_l, box_l]
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig,
cutoff=lj_cut, shift="auto")
system.non_bonded_inter.set_force_cap(lj_cap)
print("LJ-parameters:")
print(system.non_bonded_inter[0, 0].lennard_jones.get_params())
# Particle setup
#############################################################
volume = box_l * box_l * box_l
n_part = int(volume * density)
for i in range(n_part):
system.part.add(id=i, pos=numpy.random.random(3) * system.box_l)
system.analysis.distto(0)
print("Simulate {} particles in a cubic simulation box {} at density {}."
.format(n_part, box_l, density).strip())
print("Interactions:\n")
act_min_dist = system.analysis.mindist()
print("Start with minimal distance {}".format(act_min_dist))
system.max_num_cells = 2744
#Switch between openGl/Mayavi
visualizer = visualization.mayaviLive(system)
#visualizer = visualization.openGLLive(system)
#############################################################
# Warmup Integration #
#############################################################
# open Observable file
obs_file = open("pylj_liquid.obs", "w")
obs_file.write("# Time\tE_tot\tE_kin\tE_pot\n")
print("""
Start warmup integration:
At maximum {} times {} steps
Stop if minimal distance is larger than {}
""".strip().format(warm_n_times, warm_steps, min_dist))
# set LJ cap
lj_cap = 20
system.non_bonded_inter.set_force_cap(lj_cap)
print(system.non_bonded_inter[0, 0].lennard_jones)
# Warmup Integration Loop
i = 0
while (i < warm_n_times and act_min_dist < min_dist):
integrate.integrate(warm_steps)
# Warmup criterion
act_min_dist = system.analysis.mindist()
# print("\rrun %d at time=%f (LJ cap=%f) min dist = %f\r" % (i,system.time,lj_cap,act_min_dist), end=' ')
i += 1
# Increase LJ cap
lj_cap = lj_cap + 10
system.non_bonded_inter.set_force_cap(lj_cap)
visualizer.update()
# Just to see what else we may get from the c code
print("""
ro variables:
cell_grid {0.cell_grid}
cell_size {0.cell_size}
local_box_l {0.local_box_l}
max_cut {0.max_cut}
max_part {0.max_part}
max_range {0.max_range}
max_skin {0.max_skin}
n_nodes {0.n_nodes}
n_part {0.n_part}
n_part_types {0.n_part_types}
periodicity {0.periodicity}
transfer_rate {0.transfer_rate}
verlet_reuse {0.verlet_reuse}
""".format(system))
# write parameter file
set_file = open("pylj_liquid.set", "w")
set_file.write("box_l %s\ntime_step %s\nskin %s\n" %
(box_l, system.time_step, system.skin))
#############################################################
# Integration #
#############################################################
print("\nStart integration: run %d times %d steps" % (int_n_times, int_steps))
# remove force capping
lj_cap = 0
system.non_bonded_inter.set_force_cap(lj_cap)
print(system.non_bonded_inter[0, 0].lennard_jones)
# print initial energies
energies = system.analysis.energy()
print(energies)
plot, = pyplot.plot([0],[energies['total']], label="total")
pyplot.xlabel("Time")
pyplot.ylabel("Energy")
pyplot.legend()
pyplot.show(block=False)
j = 0
def main_loop():
global energies
print("run %d at time=%f " % (i, system.time))
integrate.integrate(int_steps)
visualizer.update()
energies = system.analysis.energy()
print(energies)
plot.set_xdata(numpy.append(plot.get_xdata(), system.time))
plot.set_ydata(numpy.append(plot.get_ydata(), energies['total']))
obs_file.write('{ time %s } %s\n' % (system.time, energies))
linear_momentum = system.analysis.analyze_linear_momentum()
print(linear_momentum)
def main_thread():
for i in range(0, int_n_times):
main_loop()
last_plotted = 0
def update_plot():
global last_plotted
current_time = plot.get_xdata()[-1]
if last_plotted == current_time:
return
last_plotted = current_time
pyplot.xlim(0, plot.get_xdata()[-1])
pyplot.ylim(plot.get_ydata().min(), plot.get_ydata().max())
pyplot.draw()
t = Thread(target=main_thread)
t.daemon = True
t.start()
visualizer.registerCallback(update_plot, interval=2000)
visualizer.start()
# write end configuration
end_file = open("pylj_liquid.end", "w")
end_file.write("{ time %f } \n { box_l %f }\n" % (system.time, box_l))
end_file.write("{ particles {id pos type} }")
for i in range(n_part):
end_file.write("%s\n" % system.part[i].pos)
# id & type not working yet
obs_file.close()
set_file.close()
end_file.close()
# terminate program
print("\nFinished.")
|
lahnerml/espresso
|
samples/python/visualization.py
|
Python
|
gpl-3.0
| 6,984
|
[
"ESPResSo",
"Mayavi"
] |
f93843bf4f65f959b71ed7cfe1071e5203b3e095bf6f99cc9c3a573d84bf27c0
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Run writer unit tests for cclib."""
import sys
import unittest
sys.path.append('io')
from testccio import *
from testfilewriter import *
from testxyzwriter import *
from testcjsonreader import *
from testcjsonwriter import *
if __name__ == "__main__":
unittest.main()
|
Schamnad/cclib
|
test/test_io.py
|
Python
|
bsd-3-clause
| 478
|
[
"cclib"
] |
1370dc9608be094da3e35da30f85cfefc652bb91303e4263d8124cd0ee4a10f3
|
# DIALS_ENABLE_COMMAND_LINE_COMPLETION
"""
This program is used to integrate the reflections on the diffraction images. It
is called with an experiment list outputted from dials.index or dials.refine and
a corresponding set of strong spots from which a profile model is calculated.
The program will output a set of integrated reflections and an experiment list
with additional profile model data. The data can be reintegrated using the same
profile model by inputting this integrated.expt file back into
dials.integate.
Examples::
dials.integrate models.expt refined.refl
dials.integrate models.expt refined.refl output.reflections=integrated.refl
dials.integrate models.expt refined.refl profile.fitting=False
dials.integrate models.expt refined.refl background.algorithm=glm
"""
from __future__ import annotations
import logging
import math
import sys
from orderedset import OrderedSet
from dxtbx.model.experiment_list import Experiment, ExperimentList
from libtbx.phil import parse
import dials.util.log
from dials.algorithms.integration.integrator import create_integrator
from dials.algorithms.profile_model.factory import ProfileModelFactory
from dials.array_family import flex
from dials.util import show_mail_handle_errors
from dials.util.command_line import heading
from dials.util.options import ArgumentParser, reflections_and_experiments_from_files
from dials.util.slice import slice_crystal
from dials.util.version import dials_version
logger = logging.getLogger("dials.command_line.integrate")
# Create the phil scope
phil_scope = parse(
"""
output {
experiments = 'integrated.expt'
.type = str
.help = "The experiments output filename"
output_unintegrated_reflections = False
.type = bool
.expert_level = 2
.help = "Include unintegrated reflections in output file"
reflections = 'integrated.refl'
.type = str
.help = "The integrated output filename"
phil = 'dials.integrate.phil'
.type = str
.help = "The output phil file"
log = 'dials.integrate.log'
.type = str
.help = "The log filename"
report = None
.type = str
.help = "The integration report filename (*.xml or *.json)"
include_bad_reference = False
.type = bool
.help = "Include bad reference data including unindexed spots,"
"and reflections whose predictions are messed up in the"
"reflection table output. Reflections will have the"
"'bad_reference' flag set."
}
scan_range = None
.type = ints(size=2)
.help = "Explicitly specify the images to be processed. Only applicable"
"when experiment list contains a single imageset."
.multiple = True
create_profile_model = True
.type = bool
.help = "Create the profile model"
sampling
.expert_level = 1
{
reflections_per_degree = 50
.help = "The number of predicted reflections per degree of the sequence "
"to integrate."
.type = float(value_min=0.)
minimum_sample_size = 1000
.help = "cutoff that determines whether subsetting of the input "
"prediction list is done"
.type = int
maximum_sample_size = None
.help = "The maximum number of predictions to integrate."
"Overrides reflections_per_degree if that produces a"
"larger sample size."
.type = int(value_min=1)
integrate_all_reflections = True
.help = "Override reflections_per_degree and integrate all predicted"
"reflections."
.type = bool
random_seed = 0
.help = "Random seed for sampling"
.type = int
}
exclude_images = None
.type = ints
.help = "Exclude images from integration (e.g. 1,2,3,4,5 etc)"
include scope dials.algorithms.integration.integrator.phil_scope
include scope dials.algorithms.profile_model.factory.phil_scope
include scope dials.algorithms.spot_prediction.reflection_predictor.phil_scope
include scope dials.algorithms.integration.stills_significance_filter.phil_scope
include scope dials.algorithms.integration.kapton_correction.absorption_phil_scope
""",
process_includes=True,
)
# Local overrides for dials.integrate
phil_overrides = parse(
"""
integration {
mp {
nproc = Auto
}
}
"""
)
working_phil = phil_scope.fetch(sources=[phil_overrides])
def process_reference(reference):
"""
Remove bad reflections from the reference.
Remove unindexed, bad_for_refinement, bad miller index.
Args:
reference: A reflection table.
Returns:
(tuple): tuple containing:
reference: A reduction of the input reference reflection table.
rubbish: A reflection table containing the reflections filtered out of
the input table.
Raises:
ValueError: If no indexed spots, bad id, unmatched panel.
"""
if reference is None:
return None, None
assert "miller_index" in reference
assert "id" in reference
logger.info(
"Processing reference reflections\n read %d strong spots", reference.size()
)
mask = reference.get_flags(reference.flags.indexed)
rubbish = reference.select(~mask)
n_unindexed = mask.count(False)
if n_unindexed > 0:
reference.del_selected(~mask)
logger.info(" removing %d unindexed reflections", n_unindexed)
if reference.size() == 0:
raise ValueError(
"Invalid input for reference reflections. No indexed spots found."
)
mask = reference.get_flags(reference.flags.bad_for_refinement, all=False)
n_masked = mask.count(True)
if n_masked:
rubbish.extend(reference.select(mask))
reference.del_selected(mask)
logger.info(" removing %d reflections marked as bad for refinement", n_masked)
mask = reference["miller_index"] == (0, 0, 0)
n_masked = mask.count(True)
if n_masked > 0:
rubbish.extend(reference.select(mask))
reference.del_selected(mask)
logger.info(" removing %d reflections with hkl (0,0,0)", n_masked)
mask = reference["id"] < 0
n_masked = mask.count(True)
if n_masked > 0:
raise ValueError(
"""
Invalid input for reference reflections.
%d reference spots have an invalid experiment id
"""
% n_masked
)
if (reference["panel"] == reference["shoebox"].panels()).count(False) > 0:
raise ValueError(
'reflection table "panel" column does not match "shoebox" panel'
)
logger.info(" using %d indexed reflections", reference.size())
logger.info(" found %d junk reflections", rubbish.size())
return reference, rubbish
def filter_reference_pixels(reference, experiments):
"""
Set any pixel closer to other reflections to background.
Args:
reference: A reflection table
experiments: The experiment list
Returns:
The input reflection table with modified shoeboxes.
"""
modified_count = 0
for experiment, indices in reference.iterate_experiments_and_indices(experiments):
subset = reference.select(indices)
modified = subset["shoebox"].mask_neighbouring(
subset["miller_index"],
experiment.beam,
experiment.detector,
experiment.goniometer,
experiment.scan,
experiment.crystal,
)
modified_count += modified.count(True)
reference.set_selected(indices, subset)
logger.info(" masked neighbouring pixels in %d shoeboxes", modified_count)
return reference
def sample_predictions(experiments, predicted, params):
"""
Select a random sample of the predicted reflections to integrate.
Args:
experiments: The experiment list
predicted: A reflection table of predicted reflections
params: The integration phil parameters
Returns:
A subset of the original predicted table.
"""
if params.sampling.random_seed:
flex.set_random_seed(params.sampling.random_seed)
nref_per_degree = params.sampling.reflections_per_degree
min_sample_size = params.sampling.minimum_sample_size
max_sample_size = params.sampling.maximum_sample_size
# this code is very similar to David's code in algorithms/refinement/reflection_manager.py!
working_isel = flex.size_t()
for iexp, exp in enumerate(experiments):
sel = predicted["id"] == iexp
isel = sel.iselection()
nrefs = sample_size = len(isel)
# set sample size according to nref_per_degree (per experiment)
if exp.scan and nref_per_degree:
sequence_range_rad = exp.scan.get_oscillation_range(deg=False)
width = math.degrees(abs(sequence_range_rad[1] - sequence_range_rad[0]))
sample_size = int(nref_per_degree * width)
else:
sequence_range_rad = None
# adjust sample size if below the chosen limit
sample_size = max(sample_size, min_sample_size)
# set maximum sample size if requested
if max_sample_size:
sample_size = min(sample_size, max_sample_size)
# determine subset and collect indices
if sample_size < nrefs:
isel = isel.select(flex.random_selection(nrefs, sample_size))
working_isel.extend(isel)
# create subset
return predicted.select(working_isel)
def split_for_scan_range(experiments, reference, scan_range):
"""Update experiments when scan range is set.
Args:
experiments: An experiment list
reference: A reflection table of reference reflections
scan_range (tuple): Range of scan images to be processed
Returns:
experiments: A new experiment list with the requested scan ranges
reference: A reflection table with data from the scan ranges
Raises:
ValueError: If bad input for scan range.
"""
# Only do anything is the scan range is set
if scan_range is not None and len(scan_range) > 0:
# Ensure that all experiments have the same imageset and scan
iset = [e.imageset for e in experiments]
scan = [e.scan for e in experiments]
assert all(x == iset[0] for x in iset)
assert all(x == scan[0] for x in scan)
# Get the imageset and scan
iset = experiments[0].imageset
scan = experiments[0].scan
# Get the array range
if scan is not None:
frames_start, frames_end = scan.get_array_range()
assert scan.get_num_images() == len(iset)
else:
frames_start, frames_end = (0, len(iset))
# Create the new lists
new_experiments = ExperimentList()
new_reference_all = reference.split_by_experiment_id()
new_reference = flex.reflection_table()
for i in range(len(new_reference_all) - len(experiments)):
new_reference_all.append(flex.reflection_table())
assert len(new_reference_all) == len(experiments)
# Loop through all the scan ranges and create a new experiment list with
# the requested scan ranges.
for scan_start, scan_end in scan_range:
# Validate the requested scan range
if scan_end == scan_start:
raise ValueError(
"Scan range end must be higher than start; pass {},{} for single image".format(
scan_start, scan_start + 1
)
)
if scan_end < scan_start:
raise ValueError("Scan range must be in ascending order")
elif scan_start < frames_start or scan_end > frames_end:
raise ValueError(
"Scan range must be within image range {}..{}".format(
frames_start, frames_end
)
)
assert scan_end > scan_start
assert scan_start >= frames_start
assert scan_end <= frames_end
index_start = scan_start - frames_start
index_end = index_start + (scan_end - scan_start)
assert index_start < index_end
assert index_start >= 0
assert index_end <= len(iset)
new_iset = iset[index_start:index_end]
if scan is None:
new_scan = None
else:
new_scan = scan[index_start:index_end]
for i, e1 in enumerate(experiments):
e2 = Experiment()
e2.beam = e1.beam
e2.detector = e1.detector
e2.goniometer = e1.goniometer
e2.crystal = slice_crystal(e1.crystal, (index_start, index_end))
e2.profile = e1.profile
e2.imageset = new_iset
e2.scan = new_scan
new_reference_all[i]["id"] = flex.int(
len(new_reference_all[i]), len(new_experiments)
)
new_reference.extend(new_reference_all[i])
new_experiments.append(e2)
experiments = new_experiments
reference = new_reference
# Print some information
logger.info("Modified experiment list to integrate over requested scan range")
for scan_start, scan_end in scan_range:
logger.info(" scan_range = %d -> %d", scan_start, scan_end)
# Return the experiments
return experiments, reference
def run_integration(params, experiments, reference=None):
"""Perform the integration.
Returns:
experiments: The integrated experiments
reflections: The integrated reflections
report(optional): An integration report.
Raises:
ValueError: For a number of bad inputs
RuntimeError: If the profile model creation fails
"""
predicted = None
rubbish = None
for abs_params in params.absorption_correction:
if abs_params.apply:
if not (
params.integration.debug.output
and not params.integration.debug.separate_files
):
raise ValueError(
"Shoeboxes must be saved to integration intermediates to apply an absorption correction. "
+ "Set integration.debug.output=True, integration.debug.separate_files=False and "
+ "integration.debug.delete_shoeboxes=True to temporarily store shoeboxes."
)
# Print if we're using a mask
for i, exp in enumerate(experiments):
mask = exp.imageset.external_lookup.mask
if mask.filename is not None:
if mask.data:
logger.info("Using external mask: %s", mask.filename)
for tile in mask.data:
logger.info(" Mask has %d pixels masked", tile.data().count(False))
# Print the experimental models
for i, exp in enumerate(experiments):
summary = "\n".join(
(
"",
"=" * 80,
"",
"Experiments",
"",
"Models for experiment %d" % i,
"",
str(exp.beam),
str(exp.detector),
)
)
if exp.goniometer:
summary += str(exp.goniometer) + "\n"
if exp.scan:
summary += str(exp.scan) + "\n"
summary += str(exp.crystal)
logger.info(summary)
logger.info("\n".join(("", "=" * 80, "")))
logger.info(heading("Initialising"))
# Load the data
if reference:
reference, rubbish = process_reference(reference)
# Check pixels don't belong to neighbours
if exp.goniometer is not None and exp.scan is not None:
reference = filter_reference_pixels(reference, experiments)
# Modify experiment list if scan range is set.
experiments, reference = split_for_scan_range(
experiments, reference, params.scan_range
)
# Modify experiment list if exclude images is set
if params.exclude_images:
for experiment in experiments:
for index in params.exclude_images:
experiment.imageset.mark_for_rejection(index, True)
# Predict the reflections
logger.info("\n".join(("", "=" * 80, "")))
logger.info(heading("Predicting reflections"))
predicted = flex.reflection_table.from_predictions_multi(
experiments,
dmin=params.prediction.d_min,
dmax=params.prediction.d_max,
margin=params.prediction.margin,
force_static=params.prediction.force_static,
padding=params.prediction.padding,
)
isets = OrderedSet(e.imageset for e in experiments)
predicted["imageset_id"] = flex.int(predicted.size(), 0)
if len(isets) > 1:
for e in experiments:
iset_id = isets.index(e.imageset)
for id_ in predicted.experiment_identifiers().keys():
identifier = predicted.experiment_identifiers()[id_]
if identifier == e.identifier:
sel = predicted["id"] == id_
predicted["imageset_id"].set_selected(sel, iset_id)
break
# Match reference with predicted
if reference:
matched, reference, unmatched = predicted.match_with_reference(reference)
assert len(matched) == len(predicted)
assert matched.count(True) <= len(reference)
if matched.count(True) == 0:
raise ValueError(
"""
Invalid input for reference reflections.
Zero reference spots were matched to predictions
"""
)
elif unmatched:
msg = (
"Warning: %d reference spots were not matched to predictions"
% unmatched.size()
)
border = "\n".join(("", "*" * 80, ""))
logger.info("".join((border, msg, border)))
rubbish.extend(unmatched)
if len(experiments) > 1:
# filter out any experiments without matched reference reflections
# f_: filtered
f_reference = flex.reflection_table()
f_predicted = flex.reflection_table()
f_rubbish = flex.reflection_table()
f_experiments = ExperimentList()
good_expt_count = 0
def refl_extend(src, dest, eid):
old_id = eid
new_id = good_expt_count
tmp = src.select(src["id"] == old_id)
tmp["id"] = flex.int(len(tmp), good_expt_count)
if old_id in tmp.experiment_identifiers():
identifier = tmp.experiment_identifiers()[old_id]
del tmp.experiment_identifiers()[old_id]
tmp.experiment_identifiers()[new_id] = identifier
dest.extend(tmp)
for expt_id, experiment in enumerate(experiments):
if len(reference.select(reference["id"] == expt_id)) != 0:
refl_extend(reference, f_reference, expt_id)
refl_extend(predicted, f_predicted, expt_id)
refl_extend(rubbish, f_rubbish, expt_id)
f_experiments.append(experiment)
good_expt_count += 1
else:
logger.info(
"Removing experiment %d: no reference reflections matched to predictions",
expt_id,
)
reference = f_reference
predicted = f_predicted
experiments = f_experiments
rubbish = f_rubbish
# Select a random sample of the predicted reflections
if not params.sampling.integrate_all_reflections:
predicted = sample_predictions(experiments, predicted, params)
# Compute the profile model - either load existing or compute
# can raise RuntimeError
experiments = ProfileModelFactory.create(params, experiments, reference)
for expr in experiments:
if expr.profile is None:
raise ValueError("No profile information in experiment list")
del reference
# Compute the bounding box
predicted.compute_bbox(experiments)
# Create the integrator
integrator = create_integrator(params, experiments, predicted)
# Integrate the reflections
reflections = integrator.integrate()
# Remove unintegrated reflections
if not params.output.output_unintegrated_reflections:
keep = reflections.get_flags(reflections.flags.integrated, all=False)
logger.info(
"Removing %d unintegrated reflections of %d total",
keep.count(False),
keep.size(),
)
reflections = reflections.select(keep)
# Append rubbish data onto the end
if rubbish is not None and params.output.include_bad_reference:
mask = flex.bool(len(rubbish), True)
rubbish.unset_flags(mask, rubbish.flags.integrated_sum)
rubbish.unset_flags(mask, rubbish.flags.integrated_prf)
rubbish.set_flags(mask, rubbish.flags.bad_reference)
reflections.extend(rubbish)
# Correct integrated intensities for absorption correction, if necessary
for abs_params in params.absorption_correction:
if abs_params.apply and abs_params.algorithm == "fuller_kapton":
from dials.algorithms.integration.kapton_correction import (
multi_kapton_correction,
)
experiments, reflections = multi_kapton_correction(
experiments, reflections, abs_params.fuller_kapton, logger=logger
)()
if params.significance_filter.enable:
from dials.algorithms.integration.stills_significance_filter import (
SignificanceFilter,
)
sig_filter = SignificanceFilter(params)
filtered_refls = sig_filter(experiments, reflections)
accepted_expts = ExperimentList()
accepted_refls = flex.reflection_table()
logger.info(
"Removed %d reflections out of %d when applying significance filter",
(reflections.size() - filtered_refls.size()),
reflections.size(),
)
for expt_id, expt in enumerate(experiments):
refls = filtered_refls.select(filtered_refls["id"] == expt_id)
if refls:
accepted_expts.append(expt)
current_id = expt_id
new_id = len(accepted_expts) - 1
refls["id"] = flex.int(len(refls), new_id)
if expt.identifier:
del refls.experiment_identifiers()[current_id]
refls.experiment_identifiers()[new_id] = expt.identifier
accepted_refls.extend(refls)
else:
logger.info(
"Removed experiment %d which has no reflections left after applying significance filter",
expt_id,
)
if not accepted_refls:
raise ValueError("No reflections left after applying significance filter")
experiments = accepted_expts
reflections = accepted_refls
# Write a report if requested
report = None
if params.output.report is not None:
report = integrator.report()
return experiments, reflections, report
@show_mail_handle_errors()
def run(args=None, phil=working_phil):
"""Run the integration command line script."""
usage = "usage: dials.integrate [options] models.expt"
# Create the parser
parser = ArgumentParser(
usage=usage,
phil=phil,
epilog=__doc__,
read_experiments=True,
read_reflections=True,
)
params, options = parser.parse_args(args=args, show_diff_phil=False)
# Configure the logging
dials.util.log.config(verbosity=options.verbose, logfile=params.output.log)
logger.info(dials_version())
# Log the PHIL diff
# Log the diff phil
diff_phil = parser.diff_phil.as_str()
if diff_phil:
logger.info("The following parameters have been modified:\n%s", diff_phil)
# Save phil parameters
if params.output.phil is not None:
with open(params.output.phil, "w") as outfile:
outfile.write(parser.diff_phil.as_str())
reference, experiments = reflections_and_experiments_from_files(
params.input.reflections, params.input.experiments
)
if not reference and not experiments:
parser.print_help()
return
if not experiments:
sys.exit("No experiment list was specified")
if not reference:
reference = None
elif len(reference) != 1:
sys.exit("More than 1 reflection file was given")
else:
reference = reference[0]
if reference and "shoebox" not in reference:
sys.exit("Error: shoebox data missing from reflection table")
try:
experiments, reflections, report = run_integration(
params, experiments, reference
)
except (ValueError, RuntimeError) as e:
sys.exit(e)
else:
# Delete the shoeboxes used for intermediate calculations, if requested
if params.integration.debug.delete_shoeboxes and "shoebox" in reflections:
del reflections["shoebox"]
logger.info(
"Saving %d reflections to %s", reflections.size(), params.output.reflections
)
reflections.as_file(params.output.reflections)
logger.info("Saving the experiments to %s", params.output.experiments)
experiments.as_file(params.output.experiments)
if report:
report.as_file(params.output.report)
if __name__ == "__main__":
run()
|
dials/dials
|
command_line/integrate.py
|
Python
|
bsd-3-clause
| 25,755
|
[
"CRYSTAL"
] |
beb953dc1295b50d3ad8e9cab86afd55af45b7f19d9ac82e70162e91798015f2
|
#!/usr/bin/env python
"""
Make plots of the chi-squared distribution for different degrees of freedom
"""
import matplotlib.pyplot as pl
from scipy.stats import norm
from scipy.stats import chi2
import numpy as np
mu = 0. # the mean, mu
nus = [1., 3, 5., 10., 15.] # standard deviations, sigma
markers = ['b-', 'r-', 'm-', 'c-', 'g-']
x = np.linspace(0, 25, 1000) # x
# set plot to render labels using latex
pl.rc('text', usetex=True)
pl.rc('font', family='serif')
pl.rc('font', size=14)
fig = pl.figure(figsize=(6,5), dpi=100)
# plot pdfs
for i, nu in enumerate(nus):
pl.plot(x, chi2.pdf(x, nu), markers[i], label='$\\nu=%d$'%nu)
# plot a Gaussian for comparison
pl.plot(x, norm.pdf(x, nus[-1], np.sqrt(2.*nus[-1])), 'k--',
label='$N(%d,%.1f)$' % (nus[-1], np.sqrt(2.*nus[-1])))
ax = pl.gca()
ax.set_xlabel('$\chi^2$', fontsize=14)
ax.set_ylabel('$p(\chi^2)$', fontsize=14)
ax.set_ylim(0., 1.)
ax.legend(loc='best', frameon=False)
fig.subplots_adjust(bottom=0.15)
pl.savefig('../chisquared.pdf')
pl.show()
|
mattpitkin/GraWIToNStatisticsLectures
|
figures/scripts/chisquared.py
|
Python
|
mit
| 1,029
|
[
"Gaussian"
] |
6d584cec4aaeca9c780dae55e936c9227fb3b1376d6163c22ec2362f3f2af027
|
import os
import mdtraj as md
import glob
stride = 1
min_num_frames = 400
filenames = glob.glob("/home/kyleb/dat/FAH/munged/protein/10470/*.h5")
for k, filename in enumerate(filenames):
print(filename)
trj = md.formats.HDF5TrajectoryFile(filename, mode='r')
n_frames = len(trj)
trj.close()
if n_frames >= min_num_frames:
out_filename = os.path.join("./Trajectories/", os.path.basename(filename))
md.load(filename, stride=stride).save(out_filename)
|
kyleabeauchamp/PMTStuff
|
code/subsample_trajectories.py
|
Python
|
gpl-2.0
| 495
|
[
"MDTraj"
] |
92da619df74a153c36266244d8a5e325d79996376c85f06a08ef498003801066
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import warnings
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_grad # pylint: disable=unused-import
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import image_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_ops # pylint: disable=unused-import
from tensorflow.python.ops import logging_ops # pylint: disable=unused-import
from tensorflow.python.ops import math_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import spectral_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import tf_logging as logging
# Warn the user if we convert a sparse representation to dense with at
# least this number of elements.
_LARGE_SPARSE_NUM_ELEMENTS = 100000000
def _IndexedSlicesToTensor(value, dtype=None, name=None, as_ref=False):
"""Converts an IndexedSlices object `value` to a Tensor.
NOTE(mrry): This function is potentially expensive.
Args:
value: An ops.IndexedSlices object.
dtype: The dtype of the Tensor to be returned.
name: Optional name to use for the returned Tensor.
as_ref: True if a ref is requested.
Returns:
A dense Tensor representing the values in the given IndexedSlices.
Raises:
ValueError: If the IndexedSlices does not have the same dtype.
"""
_ = as_ref
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for IndexedSlices with dtype %s" %
(dtype.name, value.dtype.name))
if value.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices without dense_shape: %s"
% str(value))
# TODO(mrry): Consider adding static shape information to
# IndexedSlices, to avoid using numpy here.
dense_shape_value = tensor_util.constant_value(value.dense_shape)
if dense_shape_value is not None:
num_elements = np.prod(dense_shape_value)
if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor with %d elements. "
"This may consume a large amount of memory." % num_elements)
else:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
"This may consume a large amount of memory.")
return math_ops.unsorted_segment_sum(
value.values, value.indices, value.dense_shape[0], name=name)
ops.register_tensor_conversion_function(ops.IndexedSlices,
_IndexedSlicesToTensor)
def _MarkReachedOps(from_ops, reached_ops):
"""Mark all ops reached from "from_ops".
Args:
from_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
"""
queue = collections.deque()
queue.extend(from_ops)
while queue:
op = queue.popleft()
if not reached_ops[op._id]:
reached_ops[op._id] = True
for output in op.outputs:
queue.extend(output.consumers())
def _GatherInputs(to_ops, reached_ops):
"""List all inputs of to_ops that are in reached_ops.
Args:
to_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
Returns:
The list of all inputs of to_ops that are in reached_ops.
That list includes all elements of to_ops.
"""
inputs = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
inputs.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
return inputs
def _PendingCount(graph, to_ops, from_ops, colocate_gradients_with_ops):
"""Initialize the pending count for ops between two lists of Operations.
'pending_count[op._id]' indicates the number of backprop inputs
to this operation.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
colocate_gradients_with_ops: Python bool. See docstring of gradients().
Returns:
A tuple containing: (1) a list of integers indexed by operation id,
indicating the number of backprop inputs to this operation, and (2)
a ControlFlowState object which is not None if the ops between from_ops
and to_ops contain control flow loops.
"""
# Mark reachable ops from from_ops.
reached_ops = [False] * (graph._last_id + 1)
for op in to_ops:
reached_ops[op._id] = True
_MarkReachedOps(from_ops, reached_ops)
# Mark between ops.
between_ops = [False] * (graph._last_id + 1)
between_op_list = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
between_ops[op._id] = True
between_op_list.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
# 'loop_state' is None if there are no while loops.
loop_state = control_flow_ops.MaybeCreateControlFlowState(
between_op_list, between_ops, colocate_gradients_with_ops)
# Initialize pending count for between ops.
pending_count = [0] * (graph._last_id + 1)
for op in between_op_list:
for x in op.inputs:
if between_ops[x.op._id]:
pending_count[x.op._id] += 1
return pending_count, loop_state
def _AsList(x):
return x if isinstance(x, (list, tuple)) else [x]
def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops):
"""Fill in default values for grad_ys.
Args:
grad_ys: List of gradients, can contain None.
ys: List of tensors.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
Returns:
A list of gradients to use, without None.
Raises:
ValueError: If sizes of gradients and inputs don't match
TypeError: If type of any gradient is not valid for its input.
"""
if len(grad_ys) != len(ys):
raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
new_grad_ys = []
for i in xrange(len(grad_ys)):
grad_y = grad_ys[i]
y = ys[i]
with _maybe_colocate_with(y.op, colocate_gradients_with_ops):
if grad_y is None:
if y.dtype.is_complex:
raise TypeError(
"Gradients of complex tensors must set grad_ys (y.dtype = %r)" %
y.dtype)
new_grad_ys.append(array_ops.fill(
array_ops.shape(y), constant_op.constant(
1, dtype=y.dtype, name="grad_ys_%d" % i)))
continue
if y.dtype.is_floating or y.dtype.is_integer:
if not grad_y.dtype.is_floating and not grad_y.dtype.is_integer:
raise TypeError("Gradient type %s generated for real or "
"integer-valued tensor %s with type %s must be "
"real or integer" %
(dtypes.as_dtype(grad_y.dtype).name, y,
dtypes.as_dtype(y.dtype).name))
elif y.dtype.is_complex:
if not grad_y.dtype.is_complex:
raise TypeError("Gradient type %s generated for complex-valued "
"tensor %s with type %s must be real" %
(dtypes.as_dtype(grad_y.dtype).name, y,
dtypes.as_dtype(y.dtype).name))
else:
raise TypeError("Tensor %s with type %s must be numeric "
"to obtain a default gradient" %
(y, dtypes.as_dtype(y.dtype).name))
# Create a grad_y tensor in the name scope of the gradient.
# Required for TensorArrays to identify which gradient call a
# grad_y value is coming from.
if isinstance(grad_y, ops.IndexedSlices):
new_grad_ys.append(
ops.IndexedSlices(
indices=(array_ops.identity(
grad_y.indices, name="grad_ys_%d_indices" % i)
if isinstance(grad_y.indices, ops.Tensor) else
grad_y.indices),
values=(array_ops.identity(
grad_y.values, name="grad_ys_%d_values" % i) if isinstance(
grad_y.values, ops.Tensor) else grad_y.values),
dense_shape=(array_ops.identity(
grad_y.dense_shape, name="grad_ys_%d_shape" % i)
if isinstance(grad_y.dense_shape, ops.Tensor) else
grad_y.dense_shape)))
else:
new_grad_ys.append(array_ops.identity(grad_y, name="grad_ys_%d" % i))
return new_grad_ys
def _IsTrainable(tensor):
dtype = dtypes.as_dtype(tensor.dtype)
return dtype.base_dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128)
def _VerifyGeneratedGradients(grads, op):
"""Verify that gradients are valid in number and type.
Args:
grads: List of generated gradients.
op: Operation for which the gradients where generated.
Raises:
ValueError: if sizes of gradients and inputs don't match.
TypeError: if type of any gradient is not valid for its input.
"""
if len(grads) != len(op.inputs):
raise ValueError("Num gradients %d generated for op %s do not match num "
"inputs %d" % (len(grads), op.node_def, len(op.inputs)))
def _StopOps(from_ops, stop_gradient_ops, pending_count):
"""The set of ops that terminate the gradient computation.
This computes the frontier of the forward graph *before* which backprop
should stop. Operations in the returned set will not be differentiated.
This set is defined as the subset of `from_ops` containing ops that have
no predecessor in `from_ops`. `pending_count` is the result of
`_PendingCount(g, xs, from_ops)`. An 'op' has predecessors in `from_ops`
iff pending_count[op._id] > 0.
In addition, none of `stop_gradient_ops` will be differentiated.
Args:
from_ops: list of Operations.
stop_gradient_ops: list of Operations never to backprop through.
pending_count: List of integers, indexed by operation id.
Returns:
The set of operations.
"""
stop_ops = set()
for op in from_ops:
is_stop_op = True
for inp in op.inputs:
if pending_count[inp.op._id] > 0:
is_stop_op = False
break
if is_stop_op:
stop_ops.add(op._id)
stop_ops.update(op._id for op in stop_gradient_ops) # pylint: disable=protected-access
return stop_ops
@contextlib.contextmanager
def _maybe_colocate_with(op, colocate_gradients_with_ops):
"""Context to colocate with `op` if `colocate_gradients_with_ops`."""
if colocate_gradients_with_ops:
with ops.colocate_with(op):
yield
else:
yield
def _SymGrad(op, out_grads):
"""Backprop through a function call node op given its outputs' gradients."""
f_in = [x for x in op.inputs] + out_grads
f_types = [x.dtype for x in op.inputs]
f = attr_value_pb2.NameAttrList()
f.name = op.type
for k in op.node_def.attr:
f.attr[k].CopyFrom(op.node_def.attr[k])
# pylint: disable=protected-access
in_grads = functional_ops._symbolic_gradient(input=f_in, Tout=f_types, f=f)
# pylint: enable=protected-access
return in_grads
def _MaybeCompile(scope, op, func, grad_fn):
"""Compile the calculation in grad_fn if op was marked as compiled."""
scope = scope.rstrip("/").replace("/", "_")
if func is not None:
xla_compile = func.definition.attr["_XlaCompile"].b
xla_separate_compiled_gradients = func.definition.attr[
"_XlaSeparateCompiledGradients"].b
xla_scope = func.definition.attr["_XlaScope"].s.decode()
else:
try:
xla_compile = op.get_attr("_XlaCompile")
xla_separate_compiled_gradients = op.get_attr(
"_XlaSeparateCompiledGradients")
xla_scope = op.get_attr("_XlaScope").decode()
except ValueError:
return grad_fn() # Exit early
if not xla_compile:
return grad_fn() # Exit early
# If the gradients are supposed to be compiled separately, we give them a
# _XlaScope name that is based on the name_scope of the gradients. Otherwise
# they just inherit the existing _XlaScope name, which lets them be merged
# together with the non-gradient computation.
if xla_separate_compiled_gradients:
xla_grad_scope = "%s_grad_%s" % (xla_scope, scope)
else:
xla_grad_scope = xla_scope
attrs = {
"_XlaCompile": attr_value_pb2.AttrValue(b=xla_compile),
"_XlaScope": attr_value_pb2.AttrValue(s=xla_grad_scope.encode())
}
with ops.get_default_graph()._attr_scope(attrs): # pylint: disable=protected-access
return grad_fn()
def gradients(ys,
xs,
grad_ys=None,
name="gradients",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None,
stop_gradients=None):
"""Constructs symbolic derivatives of sum of `ys` w.r.t. x in `xs`.
`ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys`
is a list of `Tensor`, holding the gradients received by the
`ys`. The list must be the same length as `ys`.
`gradients()` adds ops to the graph to output the derivatives of `ys` with
respect to `xs`. It returns a list of `Tensor` of length `len(xs)` where
each tensor is the `sum(dy/dx)` for y in `ys`.
`grad_ys` is a list of tensors of the same length as `ys` that holds
the initial gradients for each y in `ys`. When `grad_ys` is None,
we fill in a tensor of '1's of the shape of y for each y in `ys`. A
user can provide their own initial `grad_ys` to compute the
derivatives using a different initial gradient for each y (e.g., if
one wanted to weight the gradient differently for each value in
each y).
`stop_gradients` is a `Tensor` or a list of tensors to be considered constant
with respect to all `xs`. These tensors will not be backpropagated through,
as though they had been explicitly disconnected using `stop_gradient`. Among
other things, this allows computation of partial derivatives as opposed to
total derivatives. For example:
```python
a = tf.constant(0.)
b = 2 * a
g = tf.gradients(a + b, [a, b], stop_gradients=[a, b])
```
Here the partial derivatives `g` evaluate to `[1.0, 1.0]`, compared to the
total derivatives `tf.gradients(a + b, [a, b])`, which take into account the
influence of `a` on `b` and evaluate to `[3.0, 1.0]`. Note that the above is
equivalent to:
```python
a = tf.stop_gradient(tf.constant(0.))
b = tf.stop_gradient(2 * a)
g = tf.gradients(a + b, [a, b])
```
`stop_gradients` provides a way of stopping gradient after the graph has
already been constructed, as compared to `tf.stop_gradient` which is used
during graph construction. When the two approaches are combined,
backpropagation stops at both `tf.stop_gradient` nodes and nodes in
`stop_gradients`, whichever is encountered first.
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
grad_ys: Optional. A `Tensor` or list of tensors the same size as
`ys` and holding the gradients computed for each y in `ys`.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'gradients'.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
gate_gradients: If True, add a tuple around the gradients returned
for an operations. This avoids some race conditions.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
stop_gradients: Optional. A `Tensor` or list of tensors not to differentiate
through.
Returns:
A list of `sum(dy/dx)` for each x in `xs`.
Raises:
LookupError: if one of the operations between `x` and `y` does not
have a registered gradient function.
ValueError: if the arguments are invalid.
RuntimeError: if called in Eager mode.
"""
if context.in_eager_mode():
raise RuntimeError("tf.gradients not supported in EAGER mode. Use "
"functions in tf.contrib.eager.backprop instead.")
ys = _AsList(ys)
xs = _AsList(xs)
stop_gradients = [] if stop_gradients is None else _AsList(stop_gradients)
if grad_ys is None:
grad_ys = [None] * len(ys)
else:
grad_ys = _AsList(grad_ys)
with ops.name_scope(
name, "gradients",
list(ys) + list(xs) + list(stop_gradients) + list(grad_ys)) as grad_scope:
ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name="y")
xs = [x.handle if isinstance(x, resource_variable_ops.ResourceVariable)
else x
for x in xs]
xs = ops.internal_convert_n_to_tensor_or_indexed_slices(xs, name="x",
as_ref=True)
grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops)
# The approach we take here is as follows: Create a list of all ops in the
# subgraph between the ys and xs. Visit these ops in reverse order of ids
# to ensure that when we visit an op the gradients w.r.t its outputs have
# been collected. Then aggregate these gradients if needed, call the op's
# gradient function, and add the generated gradients to the gradients for
# its input.
# Initialize the pending count for ops in the connected subgraph from ys
# to the xs.
if len(ys) > 1:
ys = [array_ops.identity(y) if y.consumers() else y for y in ys]
to_ops = [t.op for t in ys]
from_ops = [t.op for t in xs]
stop_gradient_ops = [t.op for t in stop_gradients]
pending_count, loop_state = _PendingCount(ops.get_default_graph(), to_ops,
from_ops,
colocate_gradients_with_ops)
# Iterate over the collected ops.
#
# grads: op => list of gradients received on each output endpoint of the
# op. The gradients for each endpoint are initially collected as a list.
# When it is time to call the op's gradient function, for each endpoint we
# aggregate the list of received gradients into a Add() Operation if there
# is more than one.
grads = {}
# Add the initial gradients for the ys.
for y, grad_y in zip(ys, grad_ys):
_SetGrad(grads, y, grad_y)
# Initialize queue with to_ops.
queue = collections.deque()
# Add the ops in 'to_ops' into the queue.
to_ops_set = set()
for op in to_ops:
# 'ready' handles the case where one output gradient relies on
# another output's gradient.
# pylint: disable=protected-access
ready = (pending_count[op._id] == 0)
if ready and op._id not in to_ops_set:
to_ops_set.add(op._id)
queue.append(op)
# pylint: enable=protected-access
if loop_state:
loop_exits = loop_state.ProcessUnusedLoopExits(pending_count, to_ops_set)
for y in loop_exits:
if _IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
stop_ops = _StopOps(from_ops, stop_gradient_ops, pending_count)
while queue:
# generate gradient subgraph for op.
op = queue.popleft()
with _maybe_colocate_with(op, colocate_gradients_with_ops):
if loop_state:
loop_state.EnterGradWhileContext(op, before=True)
out_grads = _AggregatedGrads(grads, op, loop_state, aggregation_method)
if loop_state:
loop_state.ExitGradWhileContext(op, before=True)
grad_fn = None
# pylint: disable=protected-access
func_call = None
is_func_call = ops.get_default_graph()._is_function(op.type)
has_out_grads = any(isinstance(g, ops.Tensor) or g for g in out_grads)
if has_out_grads and (op._id not in stop_ops):
if is_func_call:
func_call = ops.get_default_graph()._get_function(op.type)
grad_fn = func_call.python_grad_func
# pylint: enable=protected-access
else:
# A grad_fn must be defined, either as a function or as None
# for ops that do not have gradients.
try:
grad_fn = ops.get_gradient_function(op)
except LookupError:
raise LookupError(
"No gradient defined for operation '%s' (op type: %s)" %
(op.name, op.type))
if loop_state:
loop_state.EnterGradWhileContext(op, before=False)
if (grad_fn or is_func_call) and has_out_grads:
# NOTE: If _AggregatedGrads didn't compute a value for the i'th
# output, it means that the cost does not depend on output[i],
# therefore dC/doutput[i] is 0.
for i, out_grad in enumerate(out_grads):
if (not isinstance(out_grad, ops.Tensor) and
not out_grad) and ((not grad_fn and is_func_call) or
_IsTrainable(op.outputs[i])):
# Only trainable outputs or outputs for a function call that
# will use SymbolicGradient get a zero gradient. Gradient
# functions should ignore the gradient for other outputs.
# TODO(apassos) gradients of resource handles might be an
# issue here because of zeros.
if loop_state:
out_grads[i] = loop_state.ZerosLike(op, i)
else:
out_grads[i] = control_flow_ops.ZerosLikeOutsideLoop(op, i)
with ops.name_scope(op.name + "_grad"):
# pylint: disable=protected-access
with ops.get_default_graph()._original_op(op):
# pylint: enable=protected-access
if grad_fn:
# If grad_fn was found, do not use SymbolicGradient even for
# functions.
in_grads = _MaybeCompile(
grad_scope, op, func_call, lambda: grad_fn(op, *out_grads))
else:
# For function call ops, we add a 'SymbolicGradient'
# node to the graph to compute gradients.
in_grads = _MaybeCompile(
grad_scope, op, func_call, lambda: _SymGrad(op, out_grads))
in_grads = _AsList(in_grads)
_VerifyGeneratedGradients(in_grads, op)
if gate_gradients and len(
[x for x in in_grads if x is not None]) > 1:
with ops.device(None):
with ops.colocate_with(None, ignore_existing=True):
in_grads = control_flow_ops.tuple(in_grads)
_LogOpGradients(op, out_grads, in_grads)
else:
# If no grad_fn is defined or none of out_grads is available,
# just propagate a list of None backwards.
in_grads = [None] * len(op.inputs)
for i, (t_in, in_grad) in enumerate(zip(op.inputs, in_grads)):
if in_grad is not None:
if (isinstance(in_grad, ops.Tensor) and
t_in.dtype != dtypes.resource):
try:
in_grad.set_shape(t_in.get_shape())
except ValueError:
raise ValueError(
"Incompatible shapes between op input and calculated "
"input gradient. Forward operation: %s. Input index: %d. "
"Original input shape: %s. "
"Calculated input gradient shape: %s"
% (op.name, i, t_in.shape, in_grad.shape))
_SetGrad(grads, t_in, in_grad)
if loop_state:
loop_state.ExitGradWhileContext(op, before=False)
# Update pending count for the inputs of op and enqueue ready ops.
_UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state)
if loop_state:
loop_state.PostProcessing()
return [_GetGrad(grads, x) for x in xs]
def _HasAnyNotNoneGrads(grads, op):
"""Return true iff op has real gradient."""
out_grads = _GetGrads(grads, op)
for out_grad in out_grads:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
return True
if out_grad and isinstance(out_grad, collections.Sequence):
if any([g is not None for g in out_grad]):
return True
return False
def _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state):
"""Update pending count for the inputs of op and enqueue ready ops."""
for x in op.inputs:
# pylint: disable=protected-access
pending_count[x.op._id] -= 1
ready = (pending_count[x.op._id] == 0)
if loop_state and not ready:
ready = (pending_count[x.op._id] > 0 and
control_flow_util.IsLoopSwitch(x.op))
# pylint: enable=protected-access
if ready:
if control_flow_util.IsLoopExit(x.op):
# if x is an exit without real gradient, defer processing them.
grad_state = loop_state.GetGradState(x.op, before=False)
grad_state.deferred_exits.append(x)
grad_state.pending_exits_count -= 1
if grad_state.pending_exits_count == 0:
# We now have all the exits so process them.
has_not_none_grad = False
for y in grad_state.deferred_exits:
if _HasAnyNotNoneGrads(grads, y.op):
has_not_none_grad = True
queue.append(y.op)
else:
grad_state.unused_exits.append(y)
if has_not_none_grad:
# For an unused exit, if it has trainable outputs, backprop
# a zero gradient. Otherwise, just ignore it.
for y in grad_state.unused_exits:
if _IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
else:
# All exits are "unused" so use None as gradient.
for y in grad_state.unused_exits:
queue.append(y.op)
else:
queue.append(x.op)
def _SetGrad(grads, t, grad):
"""Sets gradient "grad" in "grads" for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
op_grads = [[] for _ in xrange(len(op.outputs))]
grads[op] = op_grads
t_grads = op_grads[t.value_index]
if isinstance(t_grads, list):
t_grads.append(grad)
else:
assert control_flow_util.IsLoopSwitch(op)
op_grads[t.value_index] = grad
def _GetGrad(grads, t):
"""Gets gradient for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
return None
t_grad = op_grads[t.value_index]
assert not isinstance(t_grad, list), (
"gradients list should have been aggregated by now.")
return t_grad
def _GetGrads(grads, op):
"""Gets all gradients for op."""
if op in grads:
return grads[op]
else:
return [[] for _ in xrange(len(op.outputs))]
def _HandleNestedIndexedSlices(grad):
assert isinstance(grad, ops.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, ops.IndexedSlices)
g = _HandleNestedIndexedSlices(grad.values)
return ops.IndexedSlices(g.values,
array_ops.gather(grad.indices, g.indices),
g.dense_shape)
def _AccumulatorShape(inputs):
shape = tensor_shape.unknown_shape()
for i in inputs:
if isinstance(i, ops.Tensor):
shape = shape.merge_with(i.get_shape())
return shape
def _LogOpGradients(op, out_grads, in_grads):
"""Log the in and out grads of an op."""
logging.vlog(1, "Gradient for '" + op.name + "'")
def _FilterGrad(x):
if x is None:
return False
if isinstance(x, (list, tuple)):
return bool(x)
else:
return True
logging.vlog(1, " in --> %s",
", ".join([x.name for x in out_grads if _FilterGrad(x)]))
logging.vlog(1, " out --> %s",
", ".join([x.name for x in in_grads if _FilterGrad(x)]))
def _MultiDeviceAddN(tensor_list):
"""Adds tensors from potentially multiple devices."""
# Basic function structure comes from control_flow_ops.group().
# Sort tensors according to their devices.
tensors_on_device = collections.defaultdict(lambda: [])
for tensor in tensor_list:
tensors_on_device[tensor.device].append(tensor)
# For each device, add the tensors on that device first.
# Then gather the partial sums from multiple devices.
# TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.
# E.g., aggregate per GPU, then per task, and so on.
summands = []
def DeviceKey(dev):
return "" if dev is None else dev
for dev in sorted(six.iterkeys(tensors_on_device), key=DeviceKey):
tensors = tensors_on_device[dev]
with ops.colocate_with(tensors[0].op, ignore_existing=True):
summands.append(math_ops.add_n(tensors))
return math_ops.add_n(summands)
class AggregationMethod(object):
"""A class listing aggregation methods used to combine gradients.
Computing partial derivatives can require aggregating gradient
contributions. This class lists the various methods that can
be used to combine gradients in the graph:
* `ADD_N`: All of the gradient terms are summed as part of one
operation using the "AddN" op. It has the property that all
gradients must be ready before any aggregation is performed.
* `DEFAULT`: The system-chosen default aggregation method.
"""
ADD_N = 0
DEFAULT = ADD_N
# The following are experimental and may not be supported in future releases.
EXPERIMENTAL_TREE = 1
EXPERIMENTAL_ACCUMULATE_N = 2
def _AggregatedGrads(grads, op, loop_state, aggregation_method=None):
"""Get the aggregated gradients for op.
Args:
grads: The map of memoized gradients.
op: The op to get gradients for.
loop_state: An object for maintaining the state of the while loops in the
graph. It is of type ControlFlowState. None if the graph
contains no while loops.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of gradients, one per each output of `op`. If the gradients
for a particular output is a list, this function aggregates it
before returning.
Raises:
TypeError: if the incoming grads are not Tensors or IndexedSlices.
ValueError: if the arguments are invalid.
"""
if aggregation_method is None:
aggregation_method = AggregationMethod.DEFAULT
if aggregation_method not in [
AggregationMethod.ADD_N, AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
raise ValueError("Invalid aggregation_method specified %s." %
aggregation_method)
out_grads = _GetGrads(grads, op)
for i, out_grad in enumerate(out_grads):
if loop_state:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
assert control_flow_util.IsLoopSwitch(op)
continue
# Grads have to be Tensors or IndexedSlices
if (isinstance(out_grad, collections.Sequence) and not all([
isinstance(g, (ops.Tensor, ops.IndexedSlices)) for g in out_grad
if g is not None
])):
raise TypeError("gradients have to be either all Tensors "
"or all IndexedSlices")
# Aggregate multiple gradients, and convert [] to None.
if out_grad:
if len(out_grad) < 2:
used = "nop"
out_grads[i] = out_grad[0]
elif all([isinstance(g, ops.Tensor) for g in out_grad if g is not None]):
tensor_shape = _AccumulatorShape(out_grad)
if (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
# The benefit of using AccumulateN is that its inputs can be combined
# in any order and this can allow the expression to be evaluated with
# a smaller memory footprint. When used with gpu_allocator_retry,
# it is possible to compute a sum of terms which are much larger than
# total GPU memory.
# AccumulateN can currently only be used if we know the shape for
# an accumulator variable. If this is not known, or if we only have
# 2 grads then we fall through to the "tree" case below.
used = "accumulate_n"
out_grads[i] = math_ops.accumulate_n(out_grad)
elif aggregation_method in [
AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
# Aggregate all gradients by doing pairwise sums: this may
# reduce performance, but it can improve memory because the
# gradients can be released earlier.
#
# TODO(vrv): Consider replacing this with a version of
# tf.AddN() that eagerly frees its inputs as soon as they are
# ready, so the order of this tree does not become a problem.
used = "tree"
with ops.name_scope(op.name + "_gradient_sum"):
running_sum = out_grad[0]
for grad in out_grad[1:]:
running_sum = math_ops.add_n([running_sum, grad])
out_grads[i] = running_sum
else:
used = "add_n"
out_grads[i] = _MultiDeviceAddN(out_grad)
logging.vlog(2, " _AggregatedGrads %d x %s using %s",
len(out_grad), tensor_shape, used)
else:
out_grad = math_ops._as_indexed_slices_list(
[g for g in out_grad if g is not None])
out_grad = [_HandleNestedIndexedSlices(x) for x in out_grad]
# Form IndexedSlices out of the concatenated values and
# indices.
out_grads[i] = ops.IndexedSlices(
array_ops.concat([x.values for x in out_grad], 0),
array_ops.concat([x.indices for x in out_grad], 0),
out_grad[0].dense_shape)
else: # not out_grad
# out_grads[i] is [], thus its aggregation is simply None.
out_grads[i] = None
return out_grads
# TODO(vrv): Make this available when we want to make it public.
def _hessian_vector_product(ys, xs, v):
"""Multiply the Hessian of `ys` wrt `xs` by `v`.
This is an efficient construction that uses a backprop-like approach
to compute the product between the Hessian and another vector. The
Hessian is usually too large to be explicitly computed or even
represented, but this method allows us to at least multiply by it
for the same big-O cost as backprop.
Implicit Hessian-vector products are the main practical, scalable way
of using second derivatives with neural networks. They allow us to
do things like construct Krylov subspaces and approximate conjugate
gradient descent.
Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y,
x, v)` will return an expression that evaluates to the same values
as (A + A.T) `v`.
Args:
ys: A scalar value, or a tensor or list of tensors to be summed to
yield a scalar.
xs: A list of tensors that we should construct the Hessian over.
v: A list of tensors, with the same shapes as xs, that we want to
multiply by the Hessian.
Returns:
A list of tensors (or if the list would be length 1, a single tensor)
containing the product between the Hessian and `v`.
Raises:
ValueError: `xs` and `v` have different length.
"""
# Validate the input
length = len(xs)
if len(v) != length:
raise ValueError("xs and v must have the same length.")
# First backprop
grads = gradients(ys, xs)
assert len(grads) == length
elemwise_products = [
math_ops.multiply(grad_elem, array_ops.stop_gradient(v_elem))
for grad_elem, v_elem in zip(grads, v) if grad_elem is not None
]
# Second backprop
return gradients(elemwise_products, xs)
def hessians(ys, xs, name="hessians", colocate_gradients_with_ops=False,
gate_gradients=False, aggregation_method=None):
"""Constructs the Hessian of sum of `ys` with respect to `x` in `xs`.
`hessians()` adds ops to the graph to output the Hessian matrix of `ys`
with respect to `xs`. It returns a list of `Tensor` of length `len(xs)`
where each tensor is the Hessian of `sum(ys)`. This function currently
only supports evaluating the Hessian with respect to (a list of) one-
dimensional tensors.
The Hessian is a matrix of second-order partial derivatives of a scalar
tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'hessians'.
colocate_gradients_with_ops: See `gradients()` documentation for details.
gate_gradients: See `gradients()` documentation for details.
aggregation_method: See `gradients()` documentation for details.
Returns:
A list of Hessian matrices of `sum(ys)` for each `x` in `xs`.
Raises:
LookupError: if one of the operations between `xs` and `ys` does not
have a registered gradient function.
"""
xs = _AsList(xs)
kwargs = {
'colocate_gradients_with_ops': colocate_gradients_with_ops,
'gate_gradients': gate_gradients,
'aggregation_method': aggregation_method
}
# Compute first-order derivatives and iterate for each x in xs.
hessians = []
_gradients = gradients(ys, xs, **kwargs)
for i, _gradient, x in zip(range(len(xs)), _gradients, xs):
# Ensure that x is a vector.
check_rank = check_ops.assert_rank(
x, 1, message='Cannot compute Hessian because element %d of `xs` does '
'not have rank one.' % i
)
with ops.control_dependencies([check_rank]):
# Declare an iterator and tensor array loop variables for the gradients.
n = array_ops.size(x)
loop_vars = [
array_ops.constant(0, dtypes.int32),
tensor_array_ops.TensorArray(x.dtype, n)
]
# Iterate over all elements of the gradient and compute second order
# derivatives.
_, hessian = control_flow_ops.while_loop(
lambda j, _: j < n,
lambda j, result: (j + 1,
result.write(j, gradients(_gradient[j], x)[0])),
loop_vars
)
hessians.append(hessian.stack())
return hessians
|
Kongsea/tensorflow
|
tensorflow/python/ops/gradients_impl.py
|
Python
|
apache-2.0
| 40,444
|
[
"VisIt"
] |
049d681d5c579e71fa7887c4223ad150052f3b14fcbe17aa41c1dc646931a3ef
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Execute computations asynchronously using threads or processes."""
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
from concurrent.futures._base import (FIRST_COMPLETED,
FIRST_EXCEPTION,
ALL_COMPLETED,
CancelledError,
TimeoutError,
Future,
Executor,
wait,
as_completed)
from concurrent.futures.process import ProcessPoolExecutor
from concurrent.futures.thread import ThreadPoolExecutor
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/concurrent/concurrent/futures/__init__.py
|
Python
|
bsd-3-clause
| 800
|
[
"Brian"
] |
e8aaaf62976a6f64efb7b1e338cb775abab16ee46e3d7ac7f2a67efbde2abded
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Break spin symmetry for UHF/UKS by initial guess.
See also examples/dft/32-broken_symmetry_dft.py
'''
import numpy
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.verbose = 4
mol.atom = [
["H", (0., 0., 2.5)],
["H", (0., 0., -2.5)],]
mol.basis = 'cc-pvdz'
mol.build()
mf = scf.UHF(mol)
#
# We can modify the initial guess DM to break spin symmetry.
# For UHF/UKS calculation, the initial guess DM can be a two-item list
# (alpha,beta). Assigning alpha-DM and beta-DM to different value can break
# the spin symmetry.
#
# In the following example, the funciton get_init_guess returns the
# superposition of atomic density matrices in which the alpha and beta
# components are degenerated. The degeneracy are destroyed by zeroing out the
# beta 1s,2s components.
#
dm_alpha, dm_beta = mf.get_init_guess()
dm_beta[:2,:2] = 0
dm = (dm_alpha,dm_beta)
mf.kernel(dm)
|
gkc1000/pyscf
|
examples/scf/32-break_spin_symm.py
|
Python
|
apache-2.0
| 969
|
[
"PySCF"
] |
86a01e065cb6b9d4824ce27738517b85f24e46ddbfb10bddc0bba7909c099ec8
|
#!/usr/bin/env python
import argparse
parser = argparse.ArgumentParser(prog='smooth_traj.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''version1''')
parser.add_argument("top", help="A prmtop file", type=str)
parser.add_argument("traj", help="""A NetCDF MD trajectory""", nargs='+')
parser.add_argument("-n", "--name", help="The name of the output smoothed trajectoryaj", type=str,
default="traj_superposed.nc", required=False)
parser.add_argument("-r", "--ref", help="A reference structure to superpose to", type=str,
default=None, required=False)
if __name__ == '__main__':
import mdtraj
import os
args = parser.parse_args()
print(args)
if len(args.traj) == 1:
traj_name = os.path.basename(args.traj[0])[:-3] # drop the .nc ending
print('Loading traj...')
traj = mdtraj.load(args.traj[0], top=args.top)
print('Superposing...')
# alpha carbons of the actin filament
atoms = traj.topology.select('resid 1 to 6016 and name CA')
if args.ref is None:
traj.superpose(traj, 0, atom_indices=atoms)
else:
ref = mdtraj.load(args.ref)
traj.superpose(ref, 0, atom_indices=atoms)
traj.save_netcdf(''.join([traj_name, '_superposed.nc']))
else:
print('Loading {} trajs as one...'.format(len(args.traj)))
traj = mdtraj.load(args.traj, top=args.top)
print('Superposing...')
# alpha carbons of the actin filament
atoms = traj.topology.select('resid 1 to 6016 and name CA')
if args.ref is None:
traj.superpose(traj, 0, atom_indices=atoms)
else:
ref = mdtraj.load(args.ref)
traj.superpose(ref, 0, atom_indices=atoms)
traj.save_netcdf(args.name)
|
jeiros/Scripts
|
AnalysisMDTraj/superpose_ctf.py
|
Python
|
mit
| 1,895
|
[
"MDTraj",
"NetCDF"
] |
731c902d05290db242097fb030c654293e47eec798cbac9db5d425959db28684
|
"""Go through a BAM file made from alignments on a FASTQ and do the following:
1. BADBAM: Create a bam file containing only the misaligned reads. The read (CHROM, POS, CIGAR) are set to be the correct alignment
and the original alignment (CHROM, POS, CIGAR) is encoded in extended tags (see below).
The reason for setting the read CHROM, POS and CIGAR attributes to the correct one is to be able to merge several of
these BAM files. Having the reads in the correct (and therefore fixed) order enables us to merge sort them
2. PERBAM: Create a BAM file containing all reads with the correct (CHROM, POS, CIGAR) set and the aligned (CHROM,POS,CIGAR)
stored using extended tags (same as above, details below).
If we are asked for just a read analysis then this file omits read data (such as sequence and quality scores) so that
the resultant BAM is smaller and more manageable.
If we are asked for a perfect BAM then we also write the full read data into this file and can use this as a perfect
input for variant callers and so on."""
import os
import time
import io
import pysam
import click
from mitty.version import __version__
import mitty.benchmarking.creed as creed
import mitty.lib.mio as mio # For the bam sort and index function
from mitty.lib import DNA_complement
from string import translate
import logging
logger = logging.getLogger(__name__)
__extended_bam_tags_info__ = """
Extended tags
TAG TYPE VALUE
Zc i 0 - read comes from chrom copy 0, 1 - read comes from chrom copy 1
ZE i Read stop (Read start is in POS)
Ze i Mate stop (Mate start is available from other BAM info)
Xf i 0 - incorrectly mapped, 1 - correctly mapped, 2 - unmapped
YR i 0 - chrom was wrong, 1 - chrom was correct
YP i 0 - pos was wrong, 1 - pos was correct
YC i 0 - CIGAR was wrong, 1 - CIGAR was correct
XR i Aligned chromosome
XP i Aligned pos
XC Z Aligned CIGAR
"""
def print_tags(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo(__extended_bam_tags_info__)
ctx.exit()
def process_file(bam_in_fp, bad_bam_fp=None, per_bam_fp=None, full_perfect_bam=False, window=0, extended=False,
flag_cigar_errors_as_misalignments=False,
progress_bar_update_interval=100):
"""Main processing function that goes through the bam file, analyzing read alignment and writing out
:param bam_in_fp: Pointer to original BAM
:param bad_bam_fp: Pointer to BADBAM being created
:param per_bam_fp: Pointer to PERBAM being created
:param full_perfect_bam: If True, write the read seq and qual scores too, to make a complete perfect bam
:param window: Tolerance window for deciding if read is correctly aligned
:param extended: If True write out new style CIGARs (With '=' and 'X')
:param flag_cigar_errors_as_misalignments: Set to True if we want CIGAR errors to count as misalignments
:param progress_bar_update_interval: how many reads to process before yielding (to update progress bar as needed)
:return: number of reads processed
"""
n0 = progress_bar_update_interval
analyze_read = creed.analyze_read
mis_read_cnt = 0
for tot_read_cnt, read in enumerate(bam_in_fp):
read_serial, chrom, cpy, ro, pos, rl, cigar, ro_m, pos_m, rl_m, cigar_m, chrom_c, pos_c, cigar_c, read_is_unmapped \
= analyze_read(read, window, extended)
if read_serial is None: continue # Something wrong with this read.
read_is_misaligned = not (chrom_c and pos_c and (cigar_c or (not flag_cigar_errors_as_misalignments)))
if read_is_misaligned or full_perfect_bam: # Need all the read info, incl seq and quality
new_read = read
else: # Need only qname, some tags and pos, chrom info
new_read = pysam.AlignedSegment()
new_read.qname = read.qname
# File size note
#For a file with 202999 reads (per_bam, condensed - perfectbam -v -v -p reads.bam):
#With 'A' -> 2571157 Oct 5 14:23 reads_per.bam
#with 'i' -> 2576795 Oct 5 14:25 reads_per.bam
# -> 0.2 % increase in size. This is negligible (probably due to BAM compression)
# Hence we use 'i' instead of 'A' even for data that can fit in a byte. The extra hassle of conversion is not
# worth the tiny savings
# Needs to be consistent with __extended_bam_tags_info__
new_read.set_tags([('Zc', cpy, 'i'),
('ZE', pos + rl, 'i'),
('Ze', pos_m + rl_m, 'i'),
('Xf', 2 if read_is_unmapped else (chrom_c and pos_c), 'i'),
('YR', chrom_c, 'i'),
('YP', pos_c, 'i'),
('YC', cigar_c, 'i'),
('XR', read.reference_id, 'i'),
('XP', read.pos, 'i'),
('XC', read.cigarstring or '', 'Z')])
if read_is_misaligned: # We need to check if the read was reverse complemented when it should have been and vv
if new_read.is_reverse != ro: # The complement is not consistent
qual = new_read.qual[::-1]
new_read.seq = translate(new_read.seq, DNA_complement)[::-1]
new_read.qual = qual
new_read.is_reverse = ro
new_read.mate_is_reverse = 1 - ro
new_read.is_unmapped = False
new_read.mate_is_unmapped = False # Gotta check this - what if mate is deep in an insert?
new_read.pnext = pos_m
new_read.reference_id = chrom - 1
new_read.pos = pos
new_read.cigarstring = cigar # What if this is deep in an insert?
if read_is_misaligned or read_is_unmapped:
bad_bam_fp.write(new_read)
mis_read_cnt += 1
per_bam_fp.write(new_read)
n0 -= 1
if n0 == 0:
yield tot_read_cnt, mis_read_cnt
n0 = progress_bar_update_interval
yield tot_read_cnt + 1, mis_read_cnt # tot_read_cnt starts from 0 actually ...
def sort_and_index_bams(bad_bam_fname, per_bam_fname):
t0 = time.time()
mio.sort_and_index_bam(bad_bam_fname)
t1 = time.time()
logger.debug('Sort and indexed bad BAM in {:2.2f}s'.format(t1 - t0))
t0 = time.time()
mio.sort_and_index_bam(per_bam_fname)
t1 = time.time()
logger.debug('Sort and indexed perfect BAM in {:2.2f}s'.format(t1 - t0))
@click.command()
@click.version_option()
@click.argument('inbam', type=click.Path(exists=True))
@click.option('--bad-bam', type=click.Path(), help='BADBAM name (if omitted, file will be written to same directory as input file)')
@click.option('--per-bam', type=click.Path(), help='PERBAM name (if omitted, file will be written to same directory as input file)')
@click.option('--tags', is_flag=True, callback=print_tags, expose_value=False, is_eager=True, help='Print documentation for extended BAM tags')
@click.option('--cigar-errors', is_flag=True, help='CIGAR errors result in reads being classified as misaligned')
@click.option('--perfect-bam', is_flag=True, help='Perfect BAM has full read information')
@click.option('--window', help='Size of tolerance window', default=0, type=int)
@click.option('-x', is_flag=True, help='Use extended CIGAR ("X"s and "="s) rather than traditional CIGAR (just "M"s)')
@click.option('--no-index', is_flag=True, help="Mostly for the platform: Don't sort and index the output files")
@click.option('-v', count=True, help='Verbosity level')
@click.option('-p', is_flag=True, help='Show progress bar')
def cli(inbam, bad_bam, per_bam, cigar_errors, perfect_bam, window, x, no_index, v, p):
"""Analyse BAMs produced from Mitty generated FASTQs for alignment accuracy.
Produces two BAM files with reads having correct POS, CIGAR values. The original
alignment information is written in the extended tags (use --tags for documentation)
\b
bad.bam - contains just the misaligned reads.
The CHROM, POS and CIGAR are the correct values
Alignment errors are stored as extended tags.
per.bam - contains all reads.
The CHROM, POS and CIGAR are the correct values
Alignment errors are stored as extended tags.
If the --perfect-bam flag is set, full read information
is written, otherwise only the POS and CIGAR values are filled out
The <INBAM>_bad.bam file can be used for analyzing misalignments whereas the <INBAM>_per.bam file is
important for analyzing true positive alignment rates.
"""
level = logging.DEBUG if v > 0 else logging.WARNING
logging.basicConfig(level=level)
bad_bam_fname = bad_bam or (os.path.splitext(inbam)[0] + '_bad.bam')
per_bam_fname = per_bam or (os.path.splitext(inbam)[0] + '_per.bam')
#process_bams(inbam, bad_bam_fname, per_bam_fname, cigar_errors, perfect_bam, window, x, p)
bam_in_fp = pysam.AlignmentFile(inbam, 'rb')
def true2str(v):
return 'true' if v else 'false'
new_header = bam_in_fp.header
new_header['PG'].append({
'CL': 'perfectbam ....',
'ID': 'mitty-perfectbam',
'PN': 'perfectbam',
'VN': __version__,
'PP': new_header['PG'][-1]['ID'],
'DS': 'window={:d}, cigar errors result in misalignments={:s}, extended_cigar={:s}'.
format(window, true2str(cigar_errors), true2str(x))
})
bad_bam_fp = pysam.AlignmentFile(bad_bam_fname, 'wb', header=new_header)
per_bam_fp = pysam.AlignmentFile(per_bam_fname, 'wb', header=new_header)
cnt, mis = 0, 0
t0 = time.time()
total_read_count = bam_in_fp.mapped + bam_in_fp.unmapped # Sadly, this is only approximate
progress_bar_update_interval = int(0.01 * total_read_count)
with click.progressbar(length=total_read_count, label='Processing BAM',
file=None if p else io.BytesIO()) as bar:
for cnt, mis in process_file(bam_in_fp=bam_in_fp, bad_bam_fp=bad_bam_fp, per_bam_fp=per_bam_fp,
full_perfect_bam=perfect_bam, window=window,
flag_cigar_errors_as_misalignments=cigar_errors, extended=x,
progress_bar_update_interval=progress_bar_update_interval):
bar.update(progress_bar_update_interval)
t1 = time.time()
logger.debug('Analyzed {:d} reads in {:2.2f}s. Found {:d} ({:2.2f}%) mis-aligned reads'.format(cnt, t1 - t0, mis, (100.0 * mis) / cnt))
if not no_index: sort_and_index_bams(bad_bam_fname, per_bam_fname)
if __name__ == "__main__":
cli()
|
latticelabs/Mitty
|
mitty/benchmarking/perfectbam.py
|
Python
|
gpl-2.0
| 10,257
|
[
"pysam"
] |
25a5fdb1fcdba5d73eb82ab4ac68cb6c7ad1e26aac009697896158865cd16cd9
|
# Copyright (C) 2014-2015 Kate Cook, 2016-2017 Kevin Ha
#
# This file is part of rnascan.
#
# rnascan is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# rnascan is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with rnascan. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import sys
import time
import glob
import fileinput
import os
import os.path
import warnings
import argparse
import ast
import re
from collections import defaultdict
import multiprocessing
import pandas as pd
import numpy as np
from itertools import repeat
from .BioAddons.Alphabet import ContextualSecondaryStructure
from .BioAddons.motifs import matrix
from Bio import motifs, SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import RNAAlphabet, IUPAC
from .version import __version__
def getoptions():
desc = "Scan sequence for motif binding sites. Results sent to STDOUT."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('fastafiles', metavar='FASTA', nargs='*',
help="Input sequence and structure FASTA files")
pfm_grp = parser.add_argument_group("PFM options")
pfm_grp.add_argument('-p', '--pfm_seq', dest="pfm_seq", type=str,
help="Sequence PFM")
pfm_grp.add_argument('-q', '--pfm_struct', dest="pfm_struct", type=str,
help="Structure PFM")
parser.add_argument('-C', '--pseudocount', type=float,
dest="pseudocount", default=0,
help="Pseudocount for normalizing PFM. [%(default)s]")
parser.add_argument('-m', '--minscore', type=float, dest='minscore',
default=6,
help="Minimum score for motif hits. [%(default)s]")
parser.add_argument('-t', '--testseq', dest='testseq', default=None,
help=("Supply a test sequence to scan. FASTA files "
"will be ignored. Can supply sequence and "
"structure as single string separated by "
" comma."))
parser.add_argument('-c', '--cores', type=int, default=8, dest="cores",
help="Number of processing cores [%(default)s]")
bg_grp = parser.add_argument_group('Background frequency options')
bg_grp.add_argument('-u', '--uniformbg', action="store_true",
default=False, dest="uniform_background",
help=("Use uniform background for calculating "
"log-odds [%(default)s]. Default is to compute "
"background from input sequences. This option "
"is mutually exclusive with -B."))
bg_grp.add_argument('-g', '--bgonly', action="store_true", default=False,
dest="bgonly",
help=("Compute background probabilities from input "
"sequences (STDOUT) and exit. Useful for "
"getting background probabilities from a "
"superset of sequences. Then, these values can "
"be subsequently supplied using -b. "
"[%(default)s]"))
bg_grp.add_argument('-b', '--bg_seq', default=None, dest="bg_seq",
help=("Load file of pre-computed background "
"probabilities for nucleotide sequences"))
bg_grp.add_argument('-B', '--bg_struct', default=None, dest="bg_struct",
help=("Load file of pre-computed background "
"probabilities for nucleotide sequences"))
parser.add_argument('-v', '--version', action='version',
version='%(prog)s ' + __version__)
parser.add_argument('-x', '--debug', action="store_true", default=False,
dest="debug",
help=("Turn on debug mode "
"(aka disable parallelization) [%(default)s]"))
args = parser.parse_args()
if not (args.pfm_seq or args.pfm_struct):
parser.error("Must specify PFMs with -p and/or -q")
if args.uniform_background and (args.bg_seq or args.bg_struct):
parser.error("You cannot set uniform and custom background options "
"at the same time\n")
return args
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
###############################################################################
# Sequence functions
###############################################################################
def _guess_seq_type(args):
"""Given arguments, determine the sequence analysis type: RNA, SS, or RNASS
"""
nfiles = len(args.fastafiles)
if nfiles == 2:
if not (args.pfm_seq or args.pfm_struct):
eprint("Missing PFMs")
sys.exit(1)
seq_type = "RNASS"
else: # nfiles == 1
if args.pfm_seq and args.pfm_struct and not args.testseq:
eprint("Can't specify two PFMs with one input file")
sys.exit(1)
elif args.pfm_seq and args.pfm_struct and args.testseq:
seq_type = "RNASS"
elif args.pfm_seq:
seq_type = "RNA"
elif args.pfm_struct:
seq_type = "SS"
else:
eprint("Must specify PFMs with -p and/or -q")
sys.exit(1)
return seq_type
def batch_iterator(iterator, batch_size):
"""Returns lists of length batch_size.
This can be used on any iterator, for example to batch up
SeqRecord objects from Bio.SeqIO.parse(...), or to batch
Alignment objects from Bio.AlignIO.parse(...), or simply
lines from a file handle.
This is a generator function, and it returns lists of the
entries from the supplied iterator. Each list will have
batch_size entries, although the final list may be shorter.
Source: http://biopython.org/wiki/Split_large_file
"""
entry = True # Make sure we loop once
while entry:
batch = []
while len(batch) < batch_size:
try:
entry = next(iterator)
except StopIteration:
entry = None
if entry is None:
# End of file
break
batch.append(entry)
if batch:
yield batch
def parse_sequences(fasta_file):
"""Load FASTA sequence and return SeqRecord iterator
"""
fin = fileinput.input(fasta_file, openhook=fileinput.hook_compressed)
return SeqIO.parse(fin, 'fasta')
def preprocess_seq(seqrec, alphabet):
"""Pre-process the SeqRecord by setting the alphabet and performing
transcription if necessary.
Return Seq object
"""
if not isinstance(seqrec, SeqRecord):
raise TypeError("SeqRecord object must be supplied")
if isinstance(alphabet, IUPAC.IUPACAmbiguousRNA) and \
not isinstance(seqrec.seq.alphabet, RNAAlphabet):
# If RNA alphabet is specified and input sequences are in DNA, we need
# to transcribe them to RNA
try:
seq = seqrec.seq.transcribe()
seq.alphabet = alphabet
seq = seq.upper()
except:
raise
else:
seq = seqrec.seq
## If strand is specified, reverse-complement the sequence
#strand_match = re.search(r'strand=([+-])', seqrec.description)
#if strand_match and strand_match.group(1) == "-":
#seq = seq.reverse_complement()
return seq
###############################################################################
# PFM functions
###############################################################################
def load_motif(pfm_file, *args):
""" Load PFM
"""
motifs_set = {}
eprint("Loading PFM %s" % pfm_file, end="")
tic = time.time()
try:
motif_id = os.path.splitext(os.path.basename(pfm_file))[0]
motifs_set[motif_id] = pfm2pssm(pfm_file, *args)
except ValueError:
eprint("\nFailed to load motif %s" % pfm_file)
except KeyError:
eprint("\nFailed to load motif %s" % pfm_file)
eprint("Check that you are using the correct --type")
raise
except:
eprint("Unexpected error: %s" % sys.exc_info()[0])
raise
eprint("\b.", end="")
sys.stderr.flush()
toc = time.time()
eprint("done in %0.2f seconds!" % (float(toc - tic)))
eprint("Found %d motifs" % len(motifs_set))
if len(motifs_set) == 0:
raise ValueError("No motifs found.")
return motifs_set
def pfm2pssm(pfm_file, pseudocount, alphabet, background=None):
"""
Convert load PFM and convert it to PSSM (take the log_odds)
"""
pfm = pd.read_table(pfm_file)
pfm = pfm.drop(pfm.columns[0], 1).to_dict(orient='list')
pfm = motifs.Motif(alphabet=alphabet, counts=pfm)
pfm = pfm.counts.normalize(pseudocount)
pssm = pfm.log_odds(background=background)
pssm = matrix.ExtendedPositionSpecificScoringMatrix(pssm.alphabet, pssm)
return pssm
###############################################################################
# Motif scan functions
###############################################################################
def scan(pssm, seq, alphabet, minscore):
""" Core scanning function
"""
results = []
(motif_id, pm) = list(pssm.items())[0]
for position, score in pm.search(seq, threshold=minscore, both=False):
end_position = position + len(pm.consensus)
fragment = seq[position:end_position]
#if isinstance(seq.alphabet, IUPAC.IUPACUnambiguousRNA):
#fragment = fragment.transcribe()
values = [motif_id,
position + 1, end_position,
str(fragment),
round(score, 3)]
results.append(values)
return results
def scan_all(seqrecord, pssm, alphabet, *args):
""" Scan seq for all motifs in pssms
"""
seq = preprocess_seq(seqrecord, alphabet)
results = scan(pssm, seq, alphabet, *args)
columns=['Motif_ID', 'Start', 'End', 'Sequence', 'LogOdds']
final = pd.DataFrame(results, columns=columns)
return final.sort_values(['Start', 'Motif_ID'])
def _scan_all(a_b):
return scan_all(*a_b)
def scan_averaged_structure(struct_file, pssm, minscore):
"""Scan PSSM on an averaged secondary structure model
"""
struct = pd.read_table(struct_file)
del struct['PO']
(motif_id, pm) = list(pssm.items())[0]
motif_scores = []
pm = pd.DataFrame(pm) # Convert dict back to data frame
N = len(pm.index)
for i in range(0, len(struct.index) - N + 1):
score = 0
for j in range(0, N):
# Multiply by SSM
score += np.nan_to_num(np.dot(struct.iloc[i + j, :],
pm.iloc[j, :]))
# Sum log odds
if score > minscore:
motif_scores.append(pd.Series([motif_id, i + 1, i + N, '.', score],
index=['Motif_ID', 'Start', 'End',
'Sequence',
'LogOdds']))
return pd.DataFrame(motif_scores)
def _scan_averaged_structure(a_b):
return scan_averaged_structure(*a_b)
def _add_sequence_id(df, seq_id, description):
""" Helper function to add Sequence_ID and Description (df is a reference)
"""
df['Sequence_ID'] = seq_id
df['Description'] = description
def _add_match_id(df):
"""Insert a unique identifier between 1...n
"""
df['Match_ID'] = list(range(1, df.shape[0] + 1))
def scan_main(fasta_file, pssm, alphabet, bg, args):
""" Main function for handling scanning of PSSM and a sequence/structure
"""
final = pd.DataFrame()
count = 0
if isinstance(fasta_file, SeqRecord):
final = scan_all(fasta_file, pssm, alphabet, args.minscore)
_add_sequence_id(final, 'testseq', '')
count += 1
else:
results = []
if os.path.isdir(fasta_file):
eprint("Scanning averaged secondary structures ")
structures = glob.glob(fasta_file + "/structure.*.txt")
if len(structures) == 0:
raise IOError("No averaged structure files found")
if args.debug:
for struct_file in structures:
hits = scan_averaged_structure(struct_file, pssm,
args.minscore)
_add_sequence_id(hits, struct_file, '')
results.append(hits)
count += 1
else:
p = multiprocessing.Pool(args.cores)
batch_results = p.map(_scan_averaged_structure,
zip(structures, repeat(pssm),
repeat(args.minscore)))
for j, hits in enumerate(batch_results):
if hits is None:
continue
match = re.search(r'^structure\.(.*)\.txt$',
os.path.basename(structures[j]))
_add_sequence_id(hits, match.group(1), '')
count += 1
results.append(hits)
p.close()
else:
eprint("Scanning sequences ")
seq_iter = parse_sequences(fasta_file)
if args.debug:
for seqrecord in seq_iter:
hits = scan_all(seqrecord, pssm, alphabet, args.minscore)
_add_sequence_id(hits, seqrecord.id, seqrecord.description)
results.append(hits)
count += 1
else:
p = multiprocessing.Pool(args.cores)
for i, batch in enumerate(batch_iterator(seq_iter, 2000)):
batch_results = p.map(_scan_all, zip(batch,
repeat(pssm),
repeat(alphabet),
repeat(args.minscore)
)
)
# Process each result
for j, hits in enumerate(batch_results):
if hits is None:
continue
_add_sequence_id(hits, batch[j].id,
batch[j].description)
count += 1
results.append(hits)
p.close()
if len(results) != 0:
final = pd.concat(results)
eprint("Processed %d sequences" % count)
cols = final.columns.tolist()
cols = cols[-2:] + cols[:-2]
return final[cols]
def combine(seq_results, struct_results):
""" If scoring sequence and structure together, add up the log odds at
every position
"""
# Keys to match by : Sequence_ID, Start, End
# NB: Motif_ID may not match
result = pd.merge(seq_results, struct_results,
on=['Sequence_ID', 'Start', 'End'])
result.rename(columns={'Description_x': 'Description.Seq',
'Description_y': 'Description.Struct',
'Sequence_x': 'Sequence.Seq',
'Sequence_y': 'Sequence.Struct',
'Motif_ID_x': 'Motif_ID.Seq',
'Motif_ID_y': 'Motif_ID.Struct',
'LogOdds_x': 'LogOdds.Seq',
'LogOdds_y': 'LogOdds.Struct'}, inplace=True)
result['LogOdds.SeqStruct'] = result['LogOdds.Seq'] + \
result['LogOdds.Struct']
return result
###############################################################################
# Background functions
###############################################################################
def compute_background(fastas, alphabet, verbose=True):
""" Compute background probabiilities from all input sequences
"""
eprint("Calculating background probabilities...")
content = defaultdict(int)
total = len(alphabet.letters) # add psuedocount for each letter
seq_iter = parse_sequences(fastas)
for seqrecord in seq_iter:
seqobj = preprocess_seq(seqrecord, alphabet)
for letter in alphabet.letters:
amount = seqobj.count(letter)
content[letter] += amount
total += amount
pct_sum = 0
for letter, count in content.items():
content[letter] = (float(count) + 1) / total # add pseudocount
if content[letter] <= 0.05:
warnings.warn("Letter %s has low content: %0.2f"
% (letter, content[letter]), Warning)
pct_sum += content[letter]
if verbose: eprint(dict(content))
assert abs(1.0 - pct_sum) < 0.0001, "Background sums to %f" % pct_sum
return content
def load_background(bg_file, uniform, *args):
""" Load background probabilities if available, otherwise compute from
input files or use uniform
"""
if bg_file:
eprint("Reading custom background probabilities from %s" % bg_file)
# load custom background
# http://stackoverflow.com/a/11027069
with open(bg_file, 'r') as fin:
bg = fin.read()
bg = ast.literal_eval(bg)
eprint(dict(bg))
elif not uniform:
bg = compute_background(*args)
else:
bg = None
return bg
###############################################################################
# Main
###############################################################################
def main():
tic = time.time()
args = getoptions()
seq_type = _guess_seq_type(args)
bg = None
if args.testseq:
testseq_stack = args.testseq.split(',')[::-1] # make a stack
## Sequence
if seq_type in ['RNA', 'RNASS']:
if args.testseq:
seq_file = SeqRecord(Seq(testseq_stack.pop()))
else:
seq_file = args.fastafiles[0]
if not args.testseq:
bg = load_background(args.bg_seq,
args.uniform_background,
seq_file,
IUPAC.IUPACUnambiguousRNA(),
not args.bgonly)
if not args.bgonly:
pssm = load_motif(args.pfm_seq,
args.pseudocount,
IUPAC.IUPACUnambiguousRNA(),
bg)
seq_results = scan_main(seq_file,
pssm,
IUPAC.IUPACUnambiguousRNA(),
bg, args)
else:
print(dict(bg))
sys.exit()
## Structure
if seq_type in ['SS', 'RNASS']:
if args.testseq:
struct_file = SeqRecord(Seq(testseq_stack.pop()))
elif seq_type == 'SS':
struct_file = args.fastafiles[0]
else:
struct_file = args.fastafiles[1]
if not args.testseq:
bg = load_background(args.bg_struct,
args.uniform_background,
struct_file,
ContextualSecondaryStructure(),
not args.bgonly)
if not args.bgonly:
pssm = load_motif(args.pfm_struct,
args.pseudocount,
ContextualSecondaryStructure(),
bg)
struct_results = scan_main(struct_file,
pssm,
ContextualSecondaryStructure(),
bg, args)
else:
print(dict(bg))
sys.exit()
if seq_type == 'RNASS':
combined_results = combine(seq_results, struct_results)
combined_results.reset_index(drop=True)
_add_match_id(combined_results)
combined_results.to_csv(sys.stdout, sep="\t", index=False)
elif seq_type == 'RNA':
seq_results.reset_index(drop=True)
_add_match_id(seq_results)
seq_results.to_csv(sys.stdout, sep="\t", index=False)
else:
struct_results.reset_index(drop=True)
_add_match_id(struct_results)
struct_results.to_csv(sys.stdout, sep="\t", index=False)
toc = time.time()
runtime = float(toc - tic)
if runtime > 60:
eprint("Done in %0.4f minutes!" % (runtime / 60))
else:
eprint("Done in %0.4f seconds!" % (runtime))
if __name__ == '__main__':
main()
|
morrislab/rnascan
|
rnascan/rnascan.py
|
Python
|
agpl-3.0
| 21,409
|
[
"Biopython"
] |
d83968d3f979e0bfc66c98c4b143d3cbd076209c2679bf148a65b380d2cdbea6
|
# -*- coding: utf-8 -*-
import datetime
import json
import os
import shutil
from django.conf import settings
from django.core.files.storage import default_storage as storage
import mock
from nose.tools import eq_, ok_
from pyquery import PyQuery as pq
import amo
import amo.tests
from addons.models import (Addon, AddonCategory, AddonDeviceType, AddonUser,
Category)
from amo.tests import formset, initial
from amo.tests.test_helpers import get_image_path
from amo.urlresolvers import reverse
from apps.users.models import UserNotification
from apps.users.notifications import app_surveys
from constants.applications import DEVICE_TYPES
from files.tests.test_models import UploadTest as BaseUploadTest
from translations.models import Translation
from users.models import UserProfile
import mkt
from mkt.site.fixtures import fixture
from mkt.submit.decorators import read_dev_agreement_required
from mkt.submit.forms import AppFeaturesForm, NewWebappVersionForm
from mkt.submit.models import AppSubmissionChecklist
from mkt.webapps.models import AddonExcludedRegion as AER, AppFeatures, Webapp
class TestSubmit(amo.tests.TestCase):
fixtures = fixture('user_999')
def setUp(self):
self.fi_mock = mock.patch(
'mkt.developers.tasks.fetch_icon').__enter__()
self.user = self.get_user()
assert self.client.login(username=self.user.email, password='password')
def tearDown(self):
self.fi_mock.__exit__()
def get_user(self):
return UserProfile.objects.get(username='regularuser')
def get_url(self, url):
return reverse('submit.app.%s' % url, args=[self.webapp.app_slug])
def _test_anonymous(self):
self.client.logout()
r = self.client.get(self.url, follow=True)
self.assertLoginRedirects(r, self.url)
def _test_progress_display(self, completed, current):
"""Test that the correct steps are highlighted."""
r = self.client.get(self.url)
progress = pq(r.content)('#submission-progress')
# Check the completed steps.
completed_found = progress.find('.completed')
for idx, step in enumerate(completed):
li = completed_found.eq(idx)
eq_(li.text(), unicode(mkt.APP_STEPS_TITLE[step]))
# Check that we link back to the Developer Agreement.
terms_link = progress.find('.terms a')
if 'terms' in completed:
eq_(terms_link.attr('href'),
reverse('mkt.developers.docs', args=['policies', 'agreement']))
else:
eq_(terms_link.length, 0)
# Check the current step.
eq_(progress.find('.current').text(),
unicode(mkt.APP_STEPS_TITLE[current]))
class TestProceed(TestSubmit):
def setUp(self):
super(TestProceed, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app')
def test_is_authenticated(self):
# Redirect user to Terms.
r = self.client.get(self.url)
self.assert3xx(r, reverse('submit.app.terms'))
def test_is_anonymous(self):
# Show user to Terms page but with the login prompt.
self.client.logout()
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(r.context['proceed'], True)
class TestTerms(TestSubmit):
def setUp(self):
super(TestTerms, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app.terms')
def test_anonymous(self):
self.client.logout()
r = self.client.get(self.url, follow=True)
self.assertLoginRedirects(r, self.url)
def test_jump_to_step(self):
r = self.client.get(reverse('submit.app'), follow=True)
self.assert3xx(r, self.url)
def test_page(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)('#submit-terms')
eq_(doc.length, 1)
eq_(doc.find('input[name=newsletter]').siblings('label').length, 1,
'Missing its <label>!')
def test_progress_display(self):
self._test_progress_display([], 'terms')
@mock.patch('basket.subscribe')
def test_agree(self, subscribe_mock):
self.client.post(self.url, {'read_dev_agreement': True})
dt = self.get_user().read_dev_agreement
self.assertCloseToNow(dt)
eq_(UserNotification.objects.count(), 0)
assert not subscribe_mock.called
@mock.patch('basket.subscribe')
def test_agree_and_sign_me_up(self, subscribe_mock):
self.client.post(self.url, {'read_dev_agreement':
datetime.datetime.now(),
'newsletter': True})
dt = self.get_user().read_dev_agreement
self.assertCloseToNow(dt)
eq_(UserNotification.objects.count(), 1)
notes = UserNotification.objects.filter(user=self.user, enabled=True,
notification_id=app_surveys.id)
eq_(notes.count(), 1, 'Expected to not be subscribed to newsletter')
assert subscribe_mock.called
def test_disagree(self):
r = self.client.post(self.url)
eq_(r.status_code, 200)
eq_(self.user.read_dev_agreement, None)
eq_(UserNotification.objects.count(), 0)
def test_read_dev_agreement_required(self):
f = mock.Mock()
f.__name__ = 'function'
request = mock.Mock()
request.amo_user.read_dev_agreement = None
request.get_full_path.return_value = self.url
func = read_dev_agreement_required(f)
res = func(request)
assert not f.called
eq_(res.status_code, 302)
eq_(res['Location'], reverse('submit.app'))
class TestManifest(TestSubmit):
def setUp(self):
super(TestManifest, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app')
def _step(self):
self.user.update(read_dev_agreement=datetime.datetime.now())
def test_anonymous(self):
r = self.client.get(self.url, follow=True)
eq_(r.context['step'], 'terms')
def test_cannot_skip_prior_step(self):
r = self.client.get(self.url, follow=True)
# And we start back at one...
self.assert3xx(r, reverse('submit.app.terms'))
def test_jump_to_step(self):
# I already read the Terms.
self._step()
# So jump me to the Manifest step.
r = self.client.get(reverse('submit.app'), follow=True)
eq_(r.context['step'], 'manifest')
def test_legacy_redirects(self):
def check():
for before, status in redirects:
r = self.client.get(before, follow=True)
self.assert3xx(r, dest, status)
# I haven't read the dev agreement.
redirects = (
('/developers/submit/', 302),
('/developers/submit/app', 302),
('/developers/submit/app/terms', 302),
('/developers/submit/app/manifest', 302),
)
dest = '/developers/submit/terms'
check()
# I have read the dev agreement.
self._step()
redirects = (
('/developers/submit/app', 302),
('/developers/submit/app/terms', 302),
('/developers/submit/app/manifest', 302),
('/developers/submit/manifest', 301),
)
dest = '/developers/submit/'
check()
def test_page(self):
self._step()
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(pq(r.content)('#upload-file').length, 1)
def test_progress_display(self):
self._step()
self._test_progress_display(['terms'], 'manifest')
class UploadAddon(object):
def post(self, expect_errors=False, data=None):
if data is None:
data = {'free_platforms': ['free-desktop']}
data.update(upload=self.upload.pk)
r = self.client.post(self.url, data, follow=True)
eq_(r.status_code, 200)
if not expect_errors:
# Show any unexpected form errors.
if r.context and 'form' in r.context:
eq_(r.context['form'].errors, {})
return r
class BaseWebAppTest(BaseUploadTest, UploadAddon, amo.tests.TestCase):
fixtures = fixture('app_firefox', 'platform_all', 'user_999', 'user_10482')
def setUp(self):
super(BaseWebAppTest, self).setUp()
self.manifest = self.manifest_path('mozball.webapp')
self.manifest_url = 'http://allizom.org/mozball.webapp'
self.upload = self.get_upload(abspath=self.manifest)
self.upload.update(name=self.manifest_url, is_webapp=True)
self.url = reverse('submit.app')
assert self.client.login(username='regular@mozilla.com',
password='password')
def post_addon(self, data=None):
eq_(Addon.objects.count(), 0)
self.post(data=data)
return Addon.objects.get()
class TestCreateWebApp(BaseWebAppTest):
@mock.patch('mkt.developers.tasks.fetch_icon')
def test_post_app_redirect(self, fi_mock):
r = self.post()
webapp = Webapp.objects.get()
self.assert3xx(r,
reverse('submit.app.details', args=[webapp.app_slug]))
assert fi_mock.delay.called, (
'The fetch_icon task was expected to be called')
def test_no_hint(self):
self.post_addon()
self.upload = self.get_upload(abspath=self.manifest)
r = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=self.manifest_url), follow=True)
eq_(r.status_code, 200)
assert 'already submitted' not in r.content, (
'Unexpected helpful error (trap_duplicate)')
assert 'already exists' not in r.content, (
'Unexpected validation error (verify_app_domain)')
def test_no_upload(self):
data = {'free_platforms': ['free-desktop']}
res = self.client.post(self.url, data, follow=True)
eq_(res.context['form'].errors,
{'upload': NewWebappVersionForm.upload_error})
@mock.patch('mkt.developers.tasks.fetch_icon')
def test_bad_upload(self, fi_mock):
data = {'free_platforms': ['free-desktop'], 'upload': 'foo'}
res = self.client.post(self.url, data, follow=True)
eq_(res.context['form'].errors,
{'upload': NewWebappVersionForm.upload_error})
assert not fi_mock.delay.called, (
'The fetch_icon task was not expected to be called')
def test_hint_for_same_manifest(self):
self.create_switch(name='webapps-unique-by-domain')
self.post_addon()
self.upload = self.get_upload(abspath=self.manifest)
r = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=self.manifest_url))
data = json.loads(r.content)
assert 'Oops' in data['validation']['messages'][0]['message'], (
'Expected oops')
def test_no_hint_for_same_manifest_different_author(self):
self.create_switch(name='webapps-unique-by-domain')
self.post_addon()
# Submit same manifest as different user.
assert self.client.login(username='clouserw@gmail.com',
password='password')
self.upload = self.get_upload(abspath=self.manifest)
r = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=self.manifest_url))
data = json.loads(r.content)
eq_(data['validation']['messages'][0]['message'],
'An app already exists on this domain; only one app per domain is '
'allowed.')
def test_app_from_uploaded_manifest(self):
addon = self.post_addon()
eq_(addon.type, amo.ADDON_WEBAPP)
eq_(addon.is_packaged, False)
assert addon.guid is not None, (
'Expected app to have a UUID assigned to guid')
eq_(unicode(addon.name), u'MozillaBall ょ')
eq_(addon.slug, 'app-%s' % addon.id)
eq_(addon.app_slug, u'mozillaball-ょ')
eq_(addon.description, u'Exciting Open Web development action!')
eq_(addon.manifest_url, u'http://allizom.org/mozball.webapp')
eq_(addon.app_domain, u'http://allizom.org')
eq_(Translation.objects.get(id=addon.description.id, locale='it'),
u'Azione aperta emozionante di sviluppo di fotoricettore!')
eq_(addon.current_version.developer_name, 'Mozilla Labs')
eq_(addon.current_version.manifest,
json.loads(open(self.manifest).read()))
def test_manifest_with_any_extension(self):
self.manifest = os.path.join(settings.ROOT, 'mkt', 'developers',
'tests', 'addons', 'mozball.owa')
self.upload = self.get_upload(abspath=self.manifest, is_webapp=True)
addon = self.post_addon()
eq_(addon.type, amo.ADDON_WEBAPP)
def test_version_from_uploaded_manifest(self):
addon = self.post_addon()
eq_(addon.current_version.version, '1.0')
def test_file_from_uploaded_manifest(self):
addon = self.post_addon()
files = addon.current_version.files.all()
eq_(len(files), 1)
eq_(files[0].status, amo.STATUS_PENDING)
def test_set_platform(self):
app = self.post_addon(
{'free_platforms': ['free-android-tablet', 'free-desktop']})
self.assertSetEqual(app.device_types,
[amo.DEVICE_TABLET, amo.DEVICE_DESKTOP])
def test_free(self):
app = self.post_addon({'free_platforms': ['free-firefoxos']})
self.assertSetEqual(app.device_types, [amo.DEVICE_GAIA])
eq_(app.premium_type, amo.ADDON_FREE)
def test_premium(self):
self.create_flag('allow-b2g-paid-submission')
app = self.post_addon({'paid_platforms': ['paid-firefoxos']})
self.assertSetEqual(app.device_types, [amo.DEVICE_GAIA])
eq_(app.premium_type, amo.ADDON_PREMIUM)
def test_supported_locales(self):
addon = self.post_addon()
eq_(addon.default_locale, 'en-US')
eq_(addon.versions.latest().supported_locales, 'es,it')
def test_short_locale(self):
# This manifest has a locale code of "pt" which is in the
# SHORTER_LANGUAGES setting and should get converted to "pt-PT".
self.manifest = self.manifest_path('short-locale.webapp')
self.upload = self.get_upload(abspath=self.manifest)
addon = self.post_addon()
eq_(addon.default_locale, 'pt-PT')
eq_(addon.versions.latest().supported_locales, 'es')
def test_unsupported_detail_locale(self):
# This manifest has a locale code of "en-GB" which is unsupported, so
# we default to "en-US".
self.manifest = self.manifest_path('unsupported-default-locale.webapp')
self.upload = self.get_upload(abspath=self.manifest)
addon = self.post_addon()
eq_(addon.default_locale, 'en-US')
eq_(addon.versions.latest().supported_locales, 'es,it')
def test_appfeatures_creation(self):
addon = self.post_addon(data={
'free_platforms': ['free-desktop'],
'has_contacts': 'on'
})
features = addon.current_version.features
ok_(isinstance(features, AppFeatures))
field_names = [f.name for f in AppFeaturesForm().all_fields()]
for field in field_names:
expected = field == 'has_contacts'
eq_(getattr(features, field), expected)
class TestCreateWebAppFromManifest(BaseWebAppTest):
def setUp(self):
super(TestCreateWebAppFromManifest, self).setUp()
Webapp.objects.create(app_slug='xxx',
app_domain='http://existing-app.com')
def upload_webapp(self, manifest_url, **post_kw):
self.upload.update(name=manifest_url) # Simulate JS upload.
return self.post(**post_kw)
def post_manifest(self, manifest_url):
rs = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=manifest_url))
if 'json' in rs['content-type']:
rs = json.loads(rs.content)
return rs
def test_duplicate_domain(self):
self.create_switch(name='webapps-unique-by-domain')
rs = self.upload_webapp('http://existing-app.com/my.webapp',
expect_errors=True)
eq_(rs.context['form'].errors,
{'upload':
['An app already exists on this domain; only one '
'app per domain is allowed.']})
def test_allow_duplicate_domains(self):
self.upload_webapp('http://existing-app.com/my.webapp') # No errors.
def test_duplicate_domain_from_js(self):
self.create_switch(name='webapps-unique-by-domain')
data = self.post_manifest('http://existing-app.com/my.webapp')
eq_(data['validation']['errors'], 1)
eq_(data['validation']['messages'][0]['message'],
'An app already exists on this domain; '
'only one app per domain is allowed.')
def test_allow_duplicate_domains_from_js(self):
rs = self.post_manifest('http://existing-app.com/my.webapp')
eq_(rs.status_code, 302)
class BasePackagedAppTest(BaseUploadTest, UploadAddon, amo.tests.TestCase):
fixtures = fixture('webapp_337141', 'user_999')
def setUp(self):
super(BasePackagedAppTest, self).setUp()
self.app = Webapp.objects.get(pk=337141)
self.app.update(is_packaged=True)
self.version = self.app.current_version
self.file = self.version.all_files[0]
self.file.update(filename='mozball.zip')
self.package = self.packaged_app_path('mozball.zip')
self.upload = self.get_upload(abspath=self.package)
self.upload.update(name='mozball.zip', is_webapp=True)
self.url = reverse('submit.app')
assert self.client.login(username='regular@mozilla.com',
password='password')
def post_addon(self, data=None):
eq_(Addon.objects.count(), 1)
self.post(data=data)
return Addon.objects.order_by('-id')[0]
def setup_files(self, filename='mozball.zip'):
# Make sure the source file is there.
# Original packaged file.
if not storage.exists(self.file.file_path):
try:
# We don't care if these dirs exist.
os.makedirs(os.path.dirname(self.file.file_path))
except OSError:
pass
shutil.copyfile(self.packaged_app_path(filename),
self.file.file_path)
# Signed packaged file.
if not storage.exists(self.file.signed_file_path):
try:
# We don't care if these dirs exist.
os.makedirs(os.path.dirname(self.file.signed_file_path))
except OSError:
pass
shutil.copyfile(self.packaged_app_path(filename),
self.file.signed_file_path)
class TestCreatePackagedApp(BasePackagedAppTest):
@mock.patch('mkt.webapps.models.Webapp.get_cached_manifest')
def test_post_app_redirect(self, _mock):
res = self.post()
webapp = Webapp.objects.order_by('-created')[0]
self.assert3xx(res,
reverse('submit.app.details', args=[webapp.app_slug]))
@mock.patch('mkt.webapps.models.Webapp.get_cached_manifest')
@mock.patch('mkt.submit.forms.verify_app_domain')
def test_app_from_uploaded_package(self, _verify, _mock):
addon = self.post_addon(
data={'packaged': True, 'free_platforms': ['free-firefoxos']})
eq_(addon.type, amo.ADDON_WEBAPP)
eq_(addon.current_version.version, '1.0')
eq_(addon.is_packaged, True)
assert addon.guid is not None, (
'Expected app to have a UUID assigned to guid')
eq_(unicode(addon.name), u'Packaged MozillaBall ょ')
eq_(addon.slug, 'app-%s' % addon.id)
eq_(addon.app_slug, u'packaged-mozillaball-ょ')
eq_(addon.description, u'Exciting Open Web development action!')
eq_(addon.manifest_url, None)
eq_(addon.app_domain, 'app://hy.fr')
eq_(Translation.objects.get(id=addon.description.id, locale='it'),
u'Azione aperta emozionante di sviluppo di fotoricettore!')
eq_(addon.current_version.developer_name, 'Mozilla Labs')
assert _verify.called, (
'`verify_app_domain` should be called for packaged apps with '
'origins.')
@mock.patch('mkt.webapps.models.Webapp.get_cached_manifest')
def test_packaged_app_not_unique(self, _mock):
Webapp.objects.create(is_packaged=True, app_domain='app://hy.fr')
res = self.post(
data={'packaged': True, 'free_platforms': ['free-firefoxos']},
expect_errors=True)
eq_(res.context['form'].errors, {
'upload': ['An app already exists on this domain; only one app '
'per domain is allowed.']})
class TestDetails(TestSubmit):
fixtures = fixture('webapp_337141', 'user_999', 'user_10482')
def setUp(self):
super(TestDetails, self).setUp()
self.webapp = self.get_webapp()
self.webapp.update(status=amo.STATUS_NULL)
self.url = reverse('submit.app.details', args=[self.webapp.app_slug])
def get_webapp(self):
return Webapp.objects.get(id=337141)
def upload_preview(self, image_file=None):
if not image_file:
image_file = get_image_path('preview.jpg')
return self._upload_image(self.webapp.get_dev_url('upload_preview'),
image_file=image_file)
def upload_icon(self, image_file=None):
if not image_file:
image_file = get_image_path('mozilla-sq.png')
return self._upload_image(self.webapp.get_dev_url('upload_icon'),
image_file=image_file)
def _upload_image(self, url, image_file):
with open(image_file, 'rb') as data:
rp = self.client.post(url, {'upload_image': data})
eq_(rp.status_code, 200)
hash_ = json.loads(rp.content)['upload_hash']
assert hash_, 'No hash: %s' % rp.content
return hash_
def _step(self):
self.user.update(read_dev_agreement=datetime.datetime.now())
self.cl = AppSubmissionChecklist.objects.create(addon=self.webapp,
terms=True, manifest=True)
# Associate app with user.
AddonUser.objects.create(addon=self.webapp, user=self.user)
# Associate device type with app.
self.dtype = DEVICE_TYPES.values()[0]
AddonDeviceType.objects.create(addon=self.webapp,
device_type=self.dtype.id)
self.device_types = [self.dtype]
# Associate category with app.
self.cat1 = Category.objects.create(type=amo.ADDON_WEBAPP, name='Fun')
AddonCategory.objects.create(addon=self.webapp, category=self.cat1)
def test_anonymous(self):
self._test_anonymous()
def test_resume_later(self):
self._step()
self.webapp.appsubmissionchecklist.update(details=True)
r = self.client.get(reverse('submit.app.resume',
args=[self.webapp.app_slug]))
self.assert3xx(r, self.webapp.get_dev_url('edit'))
def test_not_owner(self):
self._step()
assert self.client.login(username='clouserw@gmail.com',
password='password')
eq_(self.client.get(self.url).status_code, 403)
def test_page(self):
self._step()
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(pq(r.content)('#submit-details').length, 1)
def test_progress_display(self):
self._step()
self._test_progress_display(['terms', 'manifest'], 'details')
def new_preview_formset(self, *args, **kw):
ctx = self.client.get(self.url).context
blank = initial(ctx['form_previews'].forms[-1])
blank.update(**kw)
return blank
def preview_formset(self, *args, **kw):
kw.setdefault('initial_count', 0)
kw.setdefault('prefix', 'files')
fs = formset(*[a for a in args] + [self.new_preview_formset()], **kw)
return dict([(k, '' if v is None else v) for k, v in fs.items()])
def get_dict(self, **kw):
data = {
'app_slug': 'testname',
'description': 'desc',
'privacy_policy': 'XXX <script>alert("xss")</script>',
'homepage': 'http://www.goodreads.com/user/show/7595895-krupa',
'support_url': 'http://www.goodreads.com/user_challenges/351558',
'support_email': 'krupa+to+the+rescue@goodreads.com',
'categories': [self.cat1.id],
'flash': '1',
'publish': '1'
}
# Add the required screenshot.
data.update(self.preview_formset({
'upload_hash': '<hash>',
'position': 0
}))
data.update(**kw)
# Remove fields without values.
data = dict((k, v) for k, v in data.iteritems() if v is not None)
return data
def check_dict(self, data=None, expected=None):
if data is None:
data = self.get_dict()
addon = self.get_webapp()
# Build a dictionary of expected results.
expected_data = {
'app_slug': 'testname',
'description': 'desc',
'privacy_policy': 'XXX <script>alert("xss")</script>',
'uses_flash': True,
'make_public': amo.PUBLIC_IMMEDIATELY
}
if expected:
expected_data.update(expected)
for field, expected in expected_data.iteritems():
got = unicode(getattr(addon, field))
expected = unicode(expected)
eq_(got, expected,
'Expected %r for %r. Got %r.' % (expected, field, got))
self.assertSetEqual(addon.device_types, self.device_types)
@mock.patch('mkt.submit.views.record_action')
def test_success(self, record_action):
self._step()
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
eq_(self.webapp.status, amo.STATUS_PENDING)
assert record_action.called
@mock.patch('mkt.submit.views.record_action')
def test_success_iarc(self, record_action):
"""TODO: delete the above test when cleaning up waffle."""
self.create_switch('iarc')
self._step()
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
eq_(self.webapp.status, amo.STATUS_NULL)
assert record_action.called
def test_success_paid(self):
self._step()
self.webapp = self.get_webapp()
self.make_premium(self.webapp)
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
eq_(self.webapp.status, amo.STATUS_NULL)
eq_(self.webapp.highest_status, amo.STATUS_PENDING)
def test_success_prefill_device_types_if_empty(self):
"""
The new submission flow asks for device types at step one.
This ensures that existing incomplete apps still have device
compatibility.
"""
self._step()
AddonDeviceType.objects.all().delete()
self.device_types = amo.DEVICE_TYPES.values()
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
def test_success_for_public_waiting(self):
self._step()
data = self.get_dict()
del data['publish']
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data, expected={'make_public': amo.PUBLIC_WAIT})
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
def test_media_types(self):
self._step()
res = self.client.get(self.url)
doc = pq(res.content)
eq_(doc('.screenshot_upload').attr('data-allowed-types'),
'image/jpeg|image/png|video/webm')
eq_(doc('#id_icon_upload').attr('data-allowed-types'),
'image/jpeg|image/png')
def test_screenshot(self):
self._step()
im_hash = self.upload_preview()
data = self.get_dict()
data.update(self.preview_formset({
'upload_hash': im_hash,
'position': 0
}))
rp = self.client.post(self.url, data)
eq_(rp.status_code, 302)
ad = Addon.objects.get(pk=self.webapp.pk)
eq_(ad.previews.all().count(), 1)
def test_icon(self):
self._step()
im_hash = self.upload_icon()
data = self.get_dict()
data['icon_upload_hash'] = im_hash
data['icon_type'] = 'image/png'
rp = self.client.post(self.url, data)
eq_(rp.status_code, 302)
ad = self.get_webapp()
eq_(ad.icon_type, 'image/png')
for size in amo.ADDON_ICON_SIZES:
fn = '%s-%s.png' % (ad.id, size)
assert os.path.exists(os.path.join(ad.get_icon_dir(), fn)), (
'Expected %s in %s' % (fn, os.listdir(ad.get_icon_dir())))
def test_screenshot_or_video_required(self):
self._step()
data = self.get_dict()
for k in data:
if k.startswith('files') and k.endswith('upload_hash'):
data[k] = ''
rp = self.client.post(self.url, data)
eq_(rp.context['form_previews'].non_form_errors(),
['You must upload at least one screenshot or video.'])
def test_unsaved_screenshot(self):
self._step()
# If there are form errors we should still pass the previews URIs.
preview_type = 'video/webm'
preview_uri = 'moz-filedata:p00p'
data = self.preview_formset({
'position': 1,
'upload_hash': '<hash_one>',
'unsaved_image_type': preview_type,
'unsaved_image_data': preview_uri
})
r = self.client.post(self.url, data)
eq_(r.status_code, 200)
form = pq(r.content)('form')
eq_(form.find('input[name=files-0-unsaved_image_type]').val(),
preview_type)
eq_(form.find('input[name=files-0-unsaved_image_data]').val(),
preview_uri)
def test_unique_allowed(self):
self._step()
r = self.client.post(self.url, self.get_dict(name=self.webapp.name))
self.assertNoFormErrors(r)
app = Webapp.objects.exclude(app_slug=self.webapp.app_slug)[0]
self.assert3xx(r, reverse('submit.app.done', args=[app.app_slug]))
eq_(self.get_webapp().status, amo.STATUS_PENDING)
def test_unique_allowed_iarc(self):
"""TODO: delete the above test when cleaning up waffle."""
self.create_switch('iarc')
self._step()
r = self.client.post(self.url, self.get_dict(name=self.webapp.name))
self.assertNoFormErrors(r)
app = Webapp.objects.exclude(app_slug=self.webapp.app_slug)[0]
self.assert3xx(r, reverse('submit.app.done', args=[app.app_slug]))
eq_(self.get_webapp().status, amo.STATUS_NULL)
def test_slug_invalid(self):
self._step()
# Submit an invalid slug.
d = self.get_dict(app_slug='slug!!! aksl23%%')
r = self.client.post(self.url, d)
eq_(r.status_code, 200)
self.assertFormError(r, 'form_basic', 'app_slug',
"Enter a valid 'slug' consisting of letters, numbers, underscores "
"or hyphens.")
def test_slug_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(app_slug=''))
eq_(r.status_code, 200)
self.assertFormError(r, 'form_basic', 'app_slug',
'This field is required.')
def test_description_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(description=''))
eq_(r.status_code, 200)
self.assertFormError(r, 'form_basic', 'description',
'This field is required.')
def test_privacy_policy_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(privacy_policy=None))
self.assertFormError(r, 'form_basic', 'privacy_policy',
'This field is required.')
def test_clashing_locale(self):
self.webapp.default_locale = 'de'
self.webapp.save()
self._step()
self.client.cookies['current_locale'] = 'en-us'
data = self.get_dict(name=None, name_de='Test name',
privacy_policy=None,
**{'privacy_policy_en-us': 'XXX'})
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
def test_homepage_url_optional(self):
self._step()
r = self.client.post(self.url, self.get_dict(homepage=None))
self.assertNoFormErrors(r)
def test_homepage_url_invalid(self):
self._step()
r = self.client.post(self.url, self.get_dict(homepage='xxx'))
self.assertFormError(r, 'form_basic', 'homepage', 'Enter a valid URL.')
def test_support_url_optional(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_url=None))
self.assertNoFormErrors(r)
def test_support_url_invalid(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_url='xxx'))
self.assertFormError(r, 'form_basic', 'support_url',
'Enter a valid URL.')
def test_support_email_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_email=None))
self.assertFormError(r, 'form_basic', 'support_email',
'This field is required.')
def test_support_email_invalid(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_email='xxx'))
self.assertFormError(r, 'form_basic', 'support_email',
'Enter a valid e-mail address.')
def test_categories_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(categories=[]))
eq_(r.context['form_cats'].errors['categories'],
['This field is required.'])
def test_categories_max(self):
self._step()
eq_(amo.MAX_CATEGORIES, 2)
cat2 = Category.objects.create(type=amo.ADDON_WEBAPP, name='bling')
cat3 = Category.objects.create(type=amo.ADDON_WEBAPP, name='blang')
cats = [self.cat1.id, cat2.id, cat3.id]
r = self.client.post(self.url, self.get_dict(categories=cats))
eq_(r.context['form_cats'].errors['categories'],
['You can have only 2 categories.'])
def _post_cats(self, cats):
self.client.post(self.url, self.get_dict(categories=cats))
eq_(sorted(self.get_webapp().categories.values_list('id', flat=True)),
sorted(cats))
def test_categories_add(self):
self._step()
cat2 = Category.objects.create(type=amo.ADDON_WEBAPP, name='bling')
self._post_cats([self.cat1.id, cat2.id])
def test_categories_add_and_remove(self):
self._step()
cat2 = Category.objects.create(type=amo.ADDON_WEBAPP, name='bling')
self._post_cats([cat2.id])
def test_categories_remove(self):
# Add another category here so it gets added to the initial formset.
cat2 = Category.objects.create(type=amo.ADDON_WEBAPP, name='bling')
AddonCategory.objects.create(addon=self.webapp, category=cat2)
self._step()
# `cat2` should get removed.
self._post_cats([self.cat1.id])
class TestDone(TestSubmit):
fixtures = fixture('base_users', 'user_999', 'webapp_337141')
def setUp(self):
super(TestDone, self).setUp()
self.webapp = self.get_webapp()
self.url = reverse('submit.app.done', args=[self.webapp.app_slug])
def get_webapp(self):
return Webapp.objects.get(id=337141)
def _step(self, **kw):
data = dict(addon=self.webapp, terms=True, manifest=True,
details=True)
data.update(kw)
self.cl = AppSubmissionChecklist.objects.create(**data)
AddonUser.objects.create(addon=self.webapp, user=self.user)
def test_anonymous(self):
self._test_anonymous()
def test_progress_display(self):
self._step()
self._test_progress_display(['terms', 'manifest', 'details'], 'done')
def test_done(self):
self._step()
res = self.client.get(self.url)
eq_(res.status_code, 200)
class TestNextSteps(amo.tests.TestCase):
# TODO: Delete this test suite once we deploy IARC.
fixtures = fixture('user_999', 'webapp_337141')
def setUp(self):
self.create_switch('iarc')
self.user = UserProfile.objects.get(username='regularuser')
assert self.client.login(username=self.user.email, password='password')
self.webapp = Webapp.objects.get(id=337141)
self.webapp.update(status=amo.STATUS_PENDING)
self.url = reverse('submit.app.done', args=[self.webapp.app_slug])
def test_200(self, **kw):
data = dict(addon=self.webapp, terms=True, manifest=True,
details=True)
data.update(kw)
self.cl = AppSubmissionChecklist.objects.create(**data)
AddonUser.objects.create(addon=self.webapp, user=self.user)
res = self.client.get(self.url)
eq_(res.status_code, 200)
|
robhudson/zamboni
|
mkt/submit/tests/test_views.py
|
Python
|
bsd-3-clause
| 38,483
|
[
"exciting"
] |
c9120086dea06fed63ef16b032493327c60c0cdd5caf1354522b7cc021a808f1
|
import os
from setuptools import setup, find_packages
__version__ = (0, 0, 1)
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "netcdf-pycli",
version='.'.join(str(d) for d in __version__),
author = "Basil Veerman",
author_email = "bveerman@uvic.ca",
description = ("A collection of command line netCDF tools"),
url="http://www.pacificclimate.org/",
packages=find_packages('.'),
scripts = ['scripts/ncvarsubset.py'],
install_requires=['netCDF4'],
long_description=read('README.md')
)
|
basilveerman/netcdf-pycli
|
setup.py
|
Python
|
gpl-3.0
| 588
|
[
"NetCDF"
] |
058654104f74f4674e4e9effdb568212f28834fc18e98c178dc41b36893e678f
|
import sys
import os
import traceback
import unittest
from antlr4 import * # type: ignore
from c2p.grammar.antlr.SmallCLexer import SmallCLexer
from c2p.grammar.antlr.SmallCParser import SmallCParser
from c2p.grammar.ast.visitor import ASTVisitor
from c2p.grammar.ast.visualize import Visualizer
from .environment import Environment
from .error import SemanticError
filepath = os.path.dirname(__file__) + "/../../test/wrong/"
def _test(filename, expectedString):
parser = SmallCParser(CommonTokenStream(SmallCLexer(FileStream(filepath + filename))))
tree = parser.program()
try:
ast = ASTVisitor().visit(tree)
code = ast.to_code(Environment()).code
return False
except SemanticError as e:
if expectedString == str(e):
return True
else:
print('Unexpected SemanticError "{}" for file "{}"'.format(str(e), filename))
return False
except e:
print('Unexpected {} "{}" for file "{}".'.format(e.__class__.__name__, str(e), filename))
return False
class TestSemanticErrors(unittest.TestCase):
def test_no_main(self):
self.assertTrue(_test('no_main.c', 'No \'main\' function found.'))
def test_func_as_var(self):
self.assertTrue(_test('func_as_var.c', 'Attempted to use symbol "func" as a variable when it is a function.'))
def test_var_as_func(self):
self.assertTrue(_test('var_as_func.c', 'Attempted to use symbol "x" as a function when it is a variable.'))
def test_repeat_func(self):
self.assertTrue(_test('repeat_func.c', 'Redefinition of "main"!'))
def test_repeat_var(self):
self.assertTrue(_test('repeat_var.c', 'Repeated declaration of symbol "i"'))
def test_undefined_func(self):
self.assertTrue(_test('undefined_func.c', 'Use of undefined function "x"'))
def test_undefined_var(self):
self.assertTrue(_test('undefined_var.c', 'Use of undefined variable "x"'))
def test_missing_args(self):
self.assertTrue(_test('missing_args.c', 'Invalid call to "func": Expected 1 argument, got 0.'))
def test_too_many_args(self):
self.assertTrue(_test('too_many_args.c', 'Invalid call to "func": Expected 0 arguments, got 1.'))
def test_wrong_args(self):
self.assertTrue(_test('wrong_args.c', 'Invalid call to "func": Expected expression of type int or less, got int *.'))
def test_arrlength_const(self):
self.assertTrue(_test('arrlength_const.c', 'Array length is not a compile-time constant'))
def test_arrlength_type(self):
self.assertTrue(_test('arrlength_type.c', 'Array length is not of type int'))
def test_break(self):
self.assertTrue(_test('break.c', 'Attempted to "break" outside a loop.'))
def test_continue(self):
self.assertTrue(_test('continue.c', 'Attempted to "continue" outside a loop.'))
if __name__ == '__main__':
unittest.main()
|
Sibert-Aerts/c2p
|
src/c2p/codegen/test_semantic_errors.py
|
Python
|
mit
| 2,939
|
[
"VisIt"
] |
b071e3c77bb8a0d59d217192ab973804bc57988a8d1a1bd1f096c685f96d9781
|
#!/usr/bin/env python
"""
dirac-rss-set-status
Script that facilitates the modification of a element through the command line.
However, the usage of this script will set the element token to the command
issuer with a duration of 1 day.
"""
__RCSID__ = '$Id$'
from datetime import datetime, timedelta
from DIRAC import gLogger, exit as DIRACExit, S_OK, version
from DIRAC.Core.Base import Script
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ResourceStatusSystem.Client import ResourceStatusClient
from DIRAC.ResourceStatusSystem.PolicySystem import StateMachine
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
subLogger = None
switchDict = {}
def registerSwitches():
'''
Registers all switches that can be used while calling the script from the
command line interface.
'''
switches = (
('element=', 'Element family to be Synchronized ( Site, Resource or Node )'),
('name=', 'Name (or comma-separeted list of names) of the element where the change applies'),
('statusType=', 'StatusType (or comma-separeted list of names), if none applies to all possible statusTypes'),
('status=', 'Status to be changed'),
('reason=', 'Reason to set the Status'),
)
for switch in switches:
Script.registerSwitch('', switch[0], switch[1])
def registerUsageMessage():
'''
Takes the script __doc__ and adds the DIRAC version to it
'''
hLine = ' ' + '=' * 78 + '\n'
usageMessage = hLine
usageMessage += ' DIRAC %s\n' % version
usageMessage += __doc__
usageMessage += '\n' + hLine
Script.setUsageMessage(usageMessage)
def parseSwitches():
'''
Parses the arguments passed by the user
'''
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
if args:
subLogger.error("Found the following positional args '%s', but we only accept switches" % args)
subLogger.error("Please, check documentation below")
Script.showHelp()
DIRACExit(1)
switches = dict(Script.getUnprocessedSwitches())
switches.setdefault('statusType', None)
for key in ('element', 'name', 'status', 'reason'):
if key not in switches:
subLogger.error("%s Switch missing" % key)
subLogger.error("Please, check documentation below")
Script.showHelp()
DIRACExit(1)
if not switches['element'] in ('Site', 'Resource', 'Node'):
subLogger.error("Found %s as element switch" % switches['element'])
subLogger.error("Please, check documentation below")
Script.showHelp()
DIRACExit(1)
statuses = StateMachine.RSSMachine(None).getStates()
if not switches['status'] in statuses:
subLogger.error("Found %s as element switch" % switches['element'])
subLogger.error("Please, check documentation below")
Script.showHelp()
DIRACExit(1)
subLogger.debug("The switches used are:")
map(subLogger.debug, switches.iteritems())
return switches
#...............................................................................
def checkStatusTypes(statusTypes):
'''
To check if values for 'statusType' are valid
'''
opsH = Operations().getValue('ResourceStatus/Config/StatusTypes/StorageElement')
acceptableStatusTypes = opsH.replace(',', '').split()
for statusType in statusTypes:
if statusType not in acceptableStatusTypes and statusType != 'all':
acceptableStatusTypes.append('all')
subLogger.error("'%s' is a wrong value for switch 'statusType'.\n\tThe acceptable values are:\n\t%s"
% (statusType, str(acceptableStatusTypes)))
if 'all' in statusType:
return acceptableStatusTypes
return statusTypes
def unpack(switchDict):
'''
To split and process comma-separated list of values for 'name' and 'statusType'
'''
switchDictSet = []
names = []
statusTypes = []
if switchDict['name'] is not None:
names = filter(None, switchDict['name'].split(','))
if switchDict['statusType'] is not None:
statusTypes = filter(None, switchDict['statusType'].split(','))
statusTypes = checkStatusTypes(statusTypes)
if len(names) > 0 and len(statusTypes) > 0:
combinations = [(a, b) for a in names for b in statusTypes]
for combination in combinations:
n, s = combination
switchDictClone = switchDict.copy()
switchDictClone['name'] = n
switchDictClone['statusType'] = s
switchDictSet.append(switchDictClone)
elif len(names) > 0 and len(statusTypes) == 0:
for name in names:
switchDictClone = switchDict.copy()
switchDictClone['name'] = name
switchDictSet.append(switchDictClone)
elif len(names) == 0 and len(statusTypes) > 0:
for statusType in statusTypes:
switchDictClone = switchDict.copy()
switchDictClone['statusType'] = statusType
switchDictSet.append(switchDictClone)
elif len(names) == 0 and len(statusTypes) == 0:
switchDictClone = switchDict.copy()
switchDictClone['name'] = None
switchDictClone['statusType'] = None
switchDictSet.append(switchDictClone)
return switchDictSet
def getTokenOwner():
'''
Function that gets the userName from the proxy
'''
proxyInfo = getProxyInfo()
if not proxyInfo['OK']:
return proxyInfo
userName = proxyInfo['Value']['username']
return S_OK(userName)
def setStatus(switchDict, tokenOwner):
'''
Function that gets the user token, sets the validity for it. Gets the elements
in the database for a given name and statusType(s). Then updates the status
of all them adding a reason and the token.
'''
rssClient = ResourceStatusClient.ResourceStatusClient()
elements = rssClient.selectStatusElement(switchDict['element'], 'Status',
name=switchDict['name'],
statusType=switchDict['statusType'],
meta={'columns': ['Status', 'StatusType']})
if not elements['OK']:
return elements
elements = elements['Value']
if not elements:
subLogger.warn('Nothing found for %s, %s, %s' % (switchDict['element'],
switchDict['name'],
switchDict['statusType']))
return S_OK()
tomorrow = datetime.utcnow().replace(microsecond=0) + timedelta(days=1)
for status, statusType in elements:
subLogger.debug('%s %s' % (status, statusType))
if switchDict['status'] == status:
subLogger.notice('Status for %s (%s) is already %s. Ignoring..' % (switchDict['name'], statusType, status))
continue
result = rssClient.modifyStatusElement(switchDict['element'], 'Status',
name=switchDict['name'],
statusType=statusType,
status=switchDict['status'],
reason=switchDict['reason'],
tokenOwner=tokenOwner,
tokenExpiration=tomorrow)
if not result['OK']:
return result
return S_OK()
#...............................................................................
def run(switchDict):
'''
Main function of the script
'''
tokenOwner = getTokenOwner()
if not tokenOwner['OK']:
subLogger.error(tokenOwner['Message'])
DIRACExit(1)
tokenOwner = tokenOwner['Value']
subLogger.notice('TokenOwner is %s' % tokenOwner)
result = setStatus(switchDict, tokenOwner)
if not result['OK']:
subLogger.error(result['Message'])
DIRACExit(1)
#...............................................................................
if __name__ == "__main__":
subLogger = gLogger.getSubLogger(__file__)
# Script initialization
registerSwitches()
registerUsageMessage()
switchDict = parseSwitches()
switchDictSets = unpack(switchDict)
# Run script
for switchDict in switchDictSets:
run(switchDict)
# Bye
DIRACExit(0)
################################################################################
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
andresailer/DIRAC
|
ResourceStatusSystem/scripts/dirac-rss-set-status.py
|
Python
|
gpl-3.0
| 8,219
|
[
"DIRAC"
] |
4049089350b3497d86c93dfd2694b544ad85ac084a7d5b3c0fe98ebff3607cef
|
""" :mod: SRM2Storage
=================
.. module: python
:synopsis: SRM v2 interface to StorageElement
"""
# # imports
import os
import re
import time
import errno
from types import StringType, StringTypes, ListType, IntType
from stat import S_ISREG, S_ISDIR, S_IMODE, ST_MODE, ST_SIZE
# # from DIRAC
from DIRAC import gLogger, gConfig
from DIRAC.Core.Utilities import DErrno, DError
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
from DIRAC.Resources.Storage.Utilities import checkArgumentFormat
from DIRAC.Resources.Storage.StorageBase import StorageBase
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC.Core.Utilities.Subprocess import pythonCall
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.Core.Utilities.File import getSize
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
# # RCSID
__RCSID__ = "$Id$"
class SRM2Storage( StorageBase ):
""" .. class:: SRM2Storage
SRM v2 interface to StorageElement using lcg_util and gfal
"""
def __init__( self, storageName, parameters ):
""" c'tor
:param self: self reference
:param str storageName: SE name
:param dict parameters: dictionary of protocol parameters
"""
StorageBase.__init__( self, storageName, parameters )
self.spaceToken = self.protocolParameters['SpaceToken']
self.log = gLogger.getSubLogger( "SRM2Storage", True )
self.isok = True
# # placeholder for gfal reference
self.gfal = None
# # placeholder for lcg_util reference
self.lcg_util = None
# # save c'tor params
self.pluginName = 'SRM2'
# # stage limit - 12h
self.stageTimeout = gConfig.getValue( '/Resources/StorageElements/StageTimeout', 12 * 60 * 60 )
# # 1 file timeout
self.fileTimeout = gConfig.getValue( '/Resources/StorageElements/FileTimeout', 30 )
# # nb of surls per gfal call
self.filesPerCall = gConfig.getValue( '/Resources/StorageElements/FilesPerCall', 20 )
# # gfal timeout
self.gfalTimeout = gConfig.getValue( "/Resources/StorageElements/GFAL_Timeout", 100 )
# # gfal long timeout
self.gfalLongTimeOut = gConfig.getValue( "/Resources/StorageElements/GFAL_LongTimeout", 1200 )
# # gfal retry on errno.ECONN
self.gfalRetry = gConfig.getValue( "/Resources/StorageElements/GFAL_Retry", 3 )
# # should busy files be considered to exist
self.busyFilesExist = gConfig.getValue( "/Resources/StorageElements/SRMBusyFilesExist", False )
# # set checksum type, by default this is 0 (GFAL_CKSM_NONE)
checksumType = gConfig.getValue( "/Resources/StorageElements/ChecksumType", '' )
# enum gfal_cksm_type, all in lcg_util
# GFAL_CKSM_NONE = 0,
# GFAL_CKSM_CRC32,
# GFAL_CKSM_ADLER32,
# GFAL_CKSM_MD5,
# GFAL_CKSM_SHA1
# GFAL_CKSM_NULL = 0
self.checksumTypes = { "CRC32" : 1, "ADLER32" : 2,
"MD5" : 3, "SHA1" : 4, "NONE" : 0, "NULL" : 0 }
self.checksumType = self.checksumTypes.get( checksumType.upper(), 0 )
if self.checksumType:
gLogger.debug( "SRM2Storage: will use %s checksum check" % self.checksumType )
elif checksumType:
gLogger.warn( "SRM2Storage: unknown checksum, check disabled", checksumType )
else:
self.log.debug( "SRM2Storage: will use no checksum" )
# setting some variables for use with lcg_utils
self.nobdii = 1
self.defaulttype = 2
self.voName = None
ret = getProxyInfo( disableVOMS = True )
if ret['OK'] and 'group' in ret['Value']:
self.voName = getVOForGroup( ret['Value']['group'] )
# enable lcg-utils debugging for debug level DEBUG
lcgdebuglevel = 0
dlevel = self.log.getLevel()
if dlevel == 'DEBUG':
lcgdebuglevel = 999
self.verbose = lcgdebuglevel
self.conf_file = 'ignored'
self.insecure = 0
self.defaultLocalProtocols = gConfig.getValue( '/Resources/StorageElements/DefaultProtocols', [] )
self.MAX_SINGLE_STREAM_SIZE = 1024 * 1024 * 10 # 10 MB ???
self.MIN_BANDWIDTH = 0.5 * ( 1024 * 1024 ) # 0.5 MB/s ???
def __importExternals( self ):
""" import lcg_util and gfalthr or gfal
:param self: self reference
"""
if ( self.lcg_util ) and ( self.gfal ):
return S_OK()
# # get lcg_util
try:
import lcg_util
self.log.debug( "Using lcg_util version %s from %s" % ( lcg_util.lcg_util_version(),
lcg_util.__file__ ) )
except ImportError, error:
gLogger.exception( "__importExternals: Failed to import lcg_util", "", error )
return DError( DErrno.EIMPERR, error )
# # and gfalthr
try:
import gfalthr as gfal
self.log.debug( 'Using gfalthr version %s from %s' % ( gfal.gfal_version(),
gfal.__file__ ) )
except ImportError, error:
self.log.warn( "__importExternals: Failed to import gfalthr: %s." % error )
# # so gfal maybe?
try:
import gfal
self.log.debug( "Using gfal version %s from %s" % ( gfal.gfal_version(),
gfal.__file__ ) )
except ImportError, error:
gLogger.exception( "__importExternals: Failed to import gfal", "", error )
return DError( DErrno.EIMPERR, error )
self.lcg_util = lcg_util
self.gfal = gfal
return S_OK()
################################################################################
#
# The methods below are URL manipulation methods
#
################################################################################
def __convertRandomSRMOutputIntoAFullURL( self, srmPath ):
""" When calling gfal operation, srm sometimes returns as a surl just the physical path on the storage
without the host, port and else. Sometimes it is the full surl. Sometimes it doesn't have the WSUrl.
So we correct all this and make sure that we return to the caller a full surl.
/my/base/path/the/lfn.raw -> srm://host:port/srm/v2/server?SFN=/my/base/path/the/lfn.raw
"""
from DIRAC.Core.Utilities.Pfn import pfnunparse, pfnparse
# if self.isURL( srmPath )['Value']:
if ':' in srmPath:
dic = pfnparse( srmPath )['Value']
dic['WSUrl'] = self.protocolParameters['WSUrl']
srmPath = pfnunparse( dic )['Value']
return S_OK( srmPath )
urlDict = dict( self.protocolParameters )
urlDict['Path'] = ''
unp = pfnunparse( urlDict )['Value']
unp = os.path.join( unp, srmPath.lstrip( '/' ) )
return S_OK( unp )
#############################################################
#
# These are the methods for directory manipulation
#
######################################################################
#
# This has to be updated once the new gfal_makedir() becomes available
# TODO: isn't it there? when somebody made above comment?
#
def createDirectory( self, path ):
""" mkdir -p path on storage
:param self: self reference
:param str path:
"""
urls = checkArgumentFormat( path )
if not urls['OK']:
return urls
urls = urls['Value']
successful = {}
failed = {}
self.log.debug( "createDirectory: Attempting to create %s directories." % len( urls ) )
for url in urls:
strippedUrl = url.rstrip( '/' )
res = self.__makeDirs( strippedUrl )
if res['OK']:
self.log.debug( "createDirectory: Successfully created directory on storage: %s" % url )
successful[url] = True
else:
self.log.error( "createDirectory: Failed to create directory on storage.",
"\n%s: \n%s" % ( url, res['Message'] ) )
failed[url] = res['Message']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __makeDir( self, path ):
""" mkdir path in a weird way
:param self: self reference
:param str path:
"""
srcFile = os.path.join( os.environ.get( 'TMPDIR', os.environ.get( 'TMP', '/tmp' ) ), 'dirac_directory' )
if not os.path.exists( srcFile ):
dfile = open( srcFile, 'w' )
dfile.write( " " )
dfile.close()
destFile = os.path.join( path, 'dirac_directory.%s' % time.time() )
res = self.__putFile( srcFile, destFile, 0, checkExists = False )
if res['OK']:
self.__executeOperation( destFile, 'removeFile' )
return res
def __makeDirs( self, path ):
""" black magic contained within...
:param self: self reference
:param str path: dir name
"""
res = self.__executeOperation( path, 'exists' )
if not res['OK']:
return res
if res['Value']:
return S_OK()
# directory doesn't exist, create it
dirName = os.path.dirname( path )
res = self.__executeOperation( dirName, 'exists' )
if not res['OK']:
return res
if not res['Value']:
res = self.__makeDirs( dirName )
if not res['OK']:
return res
return self.__makeDir( path )
################################################################################
#
# The methods below use the new generic methods for executing operations
#
################################################################################
def removeFile( self, path ):
""" rm path on storage
:param self: self reference
:param str path: file path
"""
log = self.log.getSubLogger( 'removeFile' )
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "removeFile: Performing the removal of %s file(s)" % len( urls ) )
resDict = self.__gfaldeletesurls_wrapper( urls )
if not resDict["OK"]:
self.log.error( "Failed removeFile", "%s" % resDict["Message"] )
return resDict
resDict = resDict['Value']
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
self.log.debug( "removeFile: Successfully removed file: %s" % pathSURL )
successful[pathSURL] = True
elif urlDict['status'] == 2:
# This is the case where the file doesn't exist.
self.log.debug( "removeFile: File did not exist, successfully removed: %s" % pathSURL )
successful[pathSURL] = True
else:
errStr = "removeFile: Failed to remove file."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def getTransportURL( self, path, protocols = False ):
""" obtain the tURLs for the supplied path and protocols
:param self: self reference
:param str path: path on storage
:param mixed protocols: protocols to use
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
if not protocols:
protocols = self.__getProtocols()
if not protocols['OK']:
return protocols
listProtocols = protocols['Value']
elif type( protocols ) == StringType:
listProtocols = [protocols]
elif type( protocols ) == ListType:
listProtocols = protocols
else:
return DError( errno.EPROTO, "getTransportURL: Must supply desired protocols to this plug-in." )
if self.protocolParameters['Protocol'] in listProtocols:
successful = {}
failed = {}
for url in urls:
if self.isURL( url )['Value']:
successful[url] = url
else:
failed[url] = 'getTransportURL: Failed to obtain turls.'
return S_OK( {'Successful' : successful, 'Failed' : failed} )
if not self.se.getStatus().get( 'Value', {} ).get( 'Read' ):
return S_ERROR( "SRM2Storage.getTransportURL: Read access not currently permitted." )
# Here we must go out to the SRM service
self.log.debug( "getTransportURL: Obtaining tURLs for %s file(s)." % len( urls ) )
resDict = self.__gfalturlsfromsurls_wrapper( urls, listProtocols )
if not resDict["OK"]:
self.log.error( "Failed getTransportURL", "%s" % resDict["Message"] )
return resDict
resDict = resDict['Value']
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
self.log.debug( "getTransportURL: Obtained tURL for file. %s" % pathSURL )
successful[pathSURL] = urlDict['turl']
elif urlDict['status'] == 2:
errMessage = "getTransportURL: File does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "getTransportURL: Failed to obtain turls."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def prestageFile( self, path, lifetime = 86400 ):
""" Issue prestage request for file
:param self: self reference
:param str path: PFN path
:param int lifetime: prestage lifetime in seconds (default 24h)
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "prestageFile: Attempting to issue stage requests for %s file(s)." % len( urls ) )
resDict = self.__gfal_prestage_wrapper( urls, lifetime )
if not resDict["OK"]:
self.log.error( "Failed prestageFile", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
self.log.debug( "prestageFile: Issued stage request for file %s." % pathSURL )
successful[pathSURL] = urlDict['SRMReqID']
elif urlDict['status'] == 1:
self.log.debug( "prestageFile: File found to be already staged.", pathSURL )
successful[pathSURL] = urlDict['SRMReqID']
# It can be 11 or 22 depending on the srm-ifce version...
elif urlDict['status'] in ( 11, 22 ):
self.log.debug( "prestageFile: Stage request for file %s queued.", pathSURL )
successful[pathSURL] = urlDict['SRMReqID']
elif urlDict['status'] == 2:
errMessage = "prestageFile: File does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "prestageFile: Failed issue stage request."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( errMessage, pathSURL ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def prestageFileStatus( self, path ):
""" Monitor prestage request for files
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "prestageFileStatus: Attempting to get status "
"of stage requests for %s file(s)." % len( urls ) )
resDict = self.__gfal_prestagestatus_wrapper( urls )
if not resDict["OK"]:
self.log.error( "Failed prestageFileStatus", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = urlDict['surl']
if urlDict['status'] == 1:
self.log.debug( "SRM2Storage.prestageFileStatus: File found to be staged %s." % pathSURL )
successful[pathSURL] = True
elif urlDict['status'] == 0:
self.log.debug( "SRM2Storage.prestageFileStatus: File not staged %s." % pathSURL )
successful[pathSURL] = False
elif urlDict['status'] == 2:
errMessage = "SRM2Storage.prestageFileStatus: File does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "SRM2Storage.prestageFileStatus: Failed get prestage status."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( errMessage, pathSURL ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def getFileMetadata( self, path ):
""" Get metadata associated to the file
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
self.log.debug( "getFileMetadata: Obtaining metadata for %s file(s)." % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "Failed getFileMetadata:", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed.update( resDict['Failed'] )
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if urlDict.get( 'surl' ):
# Get back the input value for that surl
path = urlDict['surl']
if urlDict['status'] == 0:
statDict = self.__parse_file_metadata( urlDict )
if statDict['File']:
successful[path] = statDict
else:
errStr = "getFileMetadata: Supplied path is not a file."
self.log.error( errStr, path )
failed[path] = errStr
elif urlDict['status'] == 2:
errMessage = "getFileMetadata: File does not exist."
self.log.error( errMessage, path )
failed[path] = errMessage
else:
errStr = "SRM2Storage.getFileMetadata: Failed to get file metadata."
errMessage = "%s: %s" % ( path, urlDict['ErrorMessage'] )
self.log.error( errStr, errMessage )
failed[path] = "%s %s" % ( errStr, urlDict['ErrorMessage'] )
else:
errStr = "getFileMetadata: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return DError( errno.ENOMSG, errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def isFile( self, path ):
"""Check if the given path exists and it is a file
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "isFile: Checking whether %s path(s) are file(s)." % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "Failed isFile:", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if urlDict.get( 'surl' ):
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
statDict = self.__parse_file_metadata( urlDict )
if statDict['File']:
successful[pathSURL] = True
else:
self.log.debug( "isFile: Path is not a file: %s" % pathSURL )
successful[pathSURL] = False
elif urlDict['status'] == 2:
errMessage = "isFile: File does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "isFile: Failed to get file metadata."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
else:
errStr = "isFile: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return DError( errno.ENOMSG, errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def pinFile( self, path, lifetime = 86400 ):
""" Pin a file with a given lifetime
:param self: self reference
:param str path: PFN path
:param int lifetime: pin lifetime in seconds (default 24h)
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "pinFile: Attempting to pin %s file(s)." % len( urls ) )
resDict = self.__gfal_pin_wrapper( urls, lifetime )
if not resDict["OK"]:
self.log.error( "Failed pinFile:", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
self.log.debug( "pinFile: Issued pin request for file %s." % pathSURL )
successful[pathSURL] = urlDict['SRMReqID']
elif urlDict['status'] == 2:
errMessage = "pinFile: File does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "pinFile: Failed issue pin request."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( errMessage, pathSURL ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def releaseFile( self, path ):
""" Release a pinned file
:param self: self reference
:param str path: PFN path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "releaseFile: Attempting to release %s file(s)." % len( urls ) )
resDict = self.__gfal_release_wrapper( urls )
if not resDict["OK"]:
self.log.error( "Failed releaseFile:", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
self.log.debug( "Failed releaseFile:", "Issued release request for file %s." % pathSURL )
successful[pathSURL] = urlDict['SRMReqID']
elif urlDict['status'] == 2:
errMessage = "releaseFile: File does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "releaseFile: Failed issue release request."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( errMessage, pathSURL ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def exists( self, path ):
""" Check if the given path exists. """
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.exists: Checking the existance of %s path(s)" % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "Failed exists:", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if urlDict.get( 'surl' ):
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
self.log.debug( "SRM2Storage.exists: Path exists: %s" % pathSURL )
successful[pathSURL] = True
elif urlDict['status'] in ( 16, 22 ) and self.busyFilesExist:
self.log.debug( "SRM2Storage.exists: Path exists, file busy (e.g., stage-out): %s" % pathSURL )
successful[pathSURL] = True
elif urlDict['status'] == 2:
self.log.debug( "SRM2Storage.exists: Path does not exist: %s" % pathSURL )
successful[pathSURL] = False
else:
errStr = "SRM2Storage.exists: Failed to get path metadata."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
else:
errStr = "SRM2Storage.exists: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return DError( errno.ENOMSG, errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def getFileSize( self, path ):
"""Get the physical size of the given file
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.getFileSize: Obtaining the size of %s file(s)." % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "Failed getFileSize:", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if urlDict.get( 'surl' ):
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
statDict = self.__parse_file_metadata( urlDict )
if statDict['File']:
successful[pathSURL] = statDict['Size']
else:
errStr = "SRM2Storage.getFileSize: Supplied path is not a file."
self.log.verbose( errStr, pathSURL )
failed[pathSURL] = errStr
elif urlDict['status'] == 2:
errMessage = "SRM2Storage.getFileSize: File does not exist."
self.log.verbose( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "SRM2Storage.getFileSize: Failed to get file metadata."
errMessage = urlDict['ErrorMessage']
self.log.verbose( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
else:
errStr = "SRM2Storage.getFileSize: Returned element does not contain surl."
self.log.error( errStr, self.name )
return DError( errno.ENOMSG, errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def putFile( self, path, sourceSize = 0 ):
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
successful = {}
for dest_url, src_file in urls.items():
# Create destination directory
res = self.__executeOperation( os.path.dirname( dest_url ), 'createDirectory' )
if not res['OK']:
failed[dest_url] = res['Message']
else:
res = self.__putFile( src_file, dest_url, sourceSize )
if res['OK']:
successful[dest_url] = res['Value']
else:
failed[dest_url] = res['Message']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __putFile( self, src_file, dest_url, sourceSize, checkExists = True ):
""" put :src_file: to :dest_url:
:param self: self reference
:param str src_file: file path in local fs
:param str dest_url: destination url on storage
:param int sourceSize: :src_file: size in B
"""
if checkExists:
# Pre-transfer check
res = self.__executeOperation( dest_url, 'exists' )
if not res['OK']:
self.log.debug( "__putFile: Failed to find pre-existance of destination file." )
return res
if res['Value']:
res = self.__executeOperation( dest_url, 'removeFile' )
if not res['OK']:
self.log.debug( "__putFile: Failed to remove remote file %s." % dest_url )
else:
self.log.debug( "__putFile: Removed remote file %s." % dest_url )
dsttype = self.defaulttype
src_spacetokendesc = ''
dest_spacetokendesc = self.spaceToken
if re.search( 'srm:', src_file ):
src_url = src_file
srctype = 2
if not sourceSize:
return DError( errno.EINVAL, "__putFile: For file replication the source file size must be provided." )
else:
if not os.path.exists( src_file ):
errStr = "__putFile: The source local file does not exist."
self.log.error( errStr, src_file )
return DError( errno.ENOENT, errStr )
sourceSize = getSize( src_file )
if sourceSize == -1:
errStr = "__putFile: Failed to get file size."
self.log.error( errStr, src_file )
return DError( DErrno.EFILESIZE, errStr )
src_url = 'file:%s' % src_file
srctype = 0
if sourceSize == 0:
errStr = "__putFile: Source file is zero size."
self.log.error( errStr, src_file )
return DError( DErrno.EFILESIZE, errStr )
timeout = int( sourceSize / self.MIN_BANDWIDTH + 300 )
if sourceSize > self.MAX_SINGLE_STREAM_SIZE:
nbstreams = 4
else:
nbstreams = 1
self.log.info( "__putFile: Executing transfer of %s to %s using %s streams" % ( src_url, dest_url, nbstreams ) )
res = pythonCall( ( timeout + 10 ), self.__lcg_cp_wrapper, src_url, dest_url,
srctype, dsttype, nbstreams, timeout, src_spacetokendesc, dest_spacetokendesc )
if not res['OK']:
# Remove the failed replica, just in case
result = self.__executeOperation( dest_url, 'removeFile' )
if result['OK']:
self.log.debug( "__putFile: Removed remote file remnant %s." % dest_url )
else:
self.log.debug( "__putFile: Unable to remove remote file remnant %s." % dest_url )
return res
res = res['Value']
if not res['OK']:
# Remove the failed replica, just in case
result = self.__executeOperation( dest_url, 'removeFile' )
if result['OK']:
self.log.debug( "__putFile: Removed remote file remnant %s." % dest_url )
else:
self.log.debug( "__putFile: Unable to remove remote file remnant %s." % dest_url )
return res
errCode, errStr = res['Value']
if errCode == 0:
self.log.info( '__putFile: Successfully put file to storage.' )
# # checksum check? return!
if self.checksumType:
return S_OK( sourceSize )
# # else compare sizes
res = self.__executeOperation( dest_url, 'getFileSize' )
if res['OK']:
destinationSize = res['Value']
if sourceSize == destinationSize :
self.log.debug( "__putFile: Post transfer check successful." )
return S_OK( destinationSize )
errorMessage = "__putFile: Source and destination file sizes do not match."
errObj = DError( DErrno.EFILESIZE, errorMessage )
self.log.error( errorMessage, src_url )
else:
errorMessage = "__putFile: Failed to put file to storage."
errObj = DError( errCode, errorMessage )
if errCode > 0:
errStr = "%s %s" % ( errStr, os.strerror( errCode ) )
self.log.error( errorMessage, errStr )
res = self.__executeOperation( dest_url, 'removeFile' )
if res['OK']:
self.log.debug( "__putFile: Removed remote file remnant %s." % dest_url )
else:
self.log.debug( "__putFile: Unable to remove remote file remnant %s." % dest_url )
return errObj
def __lcg_cp_wrapper( self, src_url, dest_url, srctype, dsttype, nbstreams,
timeout, src_spacetokendesc, dest_spacetokendesc ):
""" lcg_util.lcg_cp wrapper
:param self: self reference
:param str src_url: source SURL
:param str dest_url: destination SURL
:param srctype: source SE type
:param dsttype: destination SE type
:param int nbstreams: nb of streams used for trasnfer
:param int timeout: timeout in seconds
:param str src_spacetoken: source space token
:param str dest_spacetoken: destination space token
"""
try:
errCode, errStr = self.lcg_util.lcg_cp4( src_url,
dest_url,
self.defaulttype,
srctype,
dsttype,
self.nobdii,
self.voName,
nbstreams,
self.conf_file,
self.insecure,
self.verbose,
timeout,
src_spacetokendesc,
dest_spacetokendesc,
self.checksumType )
if type( errCode ) != IntType:
self.log.error( "__lcg_cp_wrapper: Returned errCode was not an integer",
"%s %s" % ( errCode, type( errCode ) ) )
if type( errCode ) == ListType:
msg = []
for err in errCode:
msg.append( '%s of type %s' % ( err, type( err ) ) )
self.log.error( "__lcg_cp_wrapper: Returned errCode was List:\n" , "\n".join( msg ) )
return DError( DErrno.EGFAL, "__lcg_cp_wrapper: Returned errCode was not an integer %s" % msg )
if type( errStr ) not in StringTypes:
self.log.error( "__lcg_cp_wrapper: Returned errStr was not a string",
"%s %s" % ( errCode, type( errStr ) ) )
return DError( DErrno.EGFAL, "__lcg_cp_wrapper: Returned errStr was not a string" )
return S_OK( ( errCode, errStr ) )
except Exception, error:
self.log.exception( "__lcg_cp_wrapper", "", error )
return DError( DErrno.EGFAL, "__lcg_cp_wrapper:Exception while attempting file upload %s" % error )
def getFile( self, path, localPath = False ):
""" make a local copy of a storage :path:
:param self: self reference
:param str path: path on storage
:param mixed localPath: if not specified, os.getcwd()
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
successful = {}
for src_url in urls:
fileName = os.path.basename( src_url )
if localPath:
dest_file = "%s/%s" % ( localPath, fileName )
else:
dest_file = "%s/%s" % ( os.getcwd(), fileName )
res = self.__getFile( src_url, dest_file )
if res['OK']:
successful[src_url] = res['Value']
else:
failed[src_url] = res['Message']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __getFile( self, src_url, dest_file ):
""" do a real copy of storage file :src_url: to local fs under :dest_file:
:param self: self reference
:param str src_url: SE url to cp
:param str dest_file: local fs path
"""
if not os.path.exists( os.path.dirname( dest_file ) ):
os.makedirs( os.path.dirname( dest_file ) )
if os.path.exists( dest_file ):
self.log.debug( "__getFile: Local file already exists %s. Removing..." % dest_file )
os.remove( dest_file )
srctype = self.defaulttype
src_spacetokendesc = self.spaceToken
dsttype = 0
dest_spacetokendesc = ''
dest_url = 'file:%s' % dest_file
res = self.__executeOperation( src_url, 'getFileSize' )
if not res['OK']:
return res
remoteSize = res['Value']
timeout = int( remoteSize / self.MIN_BANDWIDTH * 4 + 300 )
nbstreams = 1
self.log.info( "__getFile: Using %d streams" % nbstreams )
self.log.info( "__getFile: Executing transfer of %s to %s" % ( src_url, dest_url ) )
res = pythonCall( ( timeout + 10 ), self.__lcg_cp_wrapper, src_url, dest_url, srctype, dsttype,
nbstreams, timeout, src_spacetokendesc, dest_spacetokendesc )
if not res['OK']:
return res
res = res['Value']
if not res['OK']:
return res
errCode, errStr = res['Value']
if errCode == 0:
self.log.debug( '__getFile: Got a file from storage.' )
localSize = getSize( dest_file )
if localSize == remoteSize:
self.log.debug( "__getFile: Post transfer check successful." )
return S_OK( localSize )
errorMessage = "__getFile: Source and destination file sizes do not match."
self.log.error( errorMessage, src_url )
else:
errorMessage = "__getFile: Failed to get file from storage."
if errCode > 0:
errStr = "%s %s" % ( errStr, os.strerror( errCode ) )
self.log.error( errorMessage, errStr )
if os.path.exists( dest_file ):
self.log.debug( "__getFile: Removing local file %s." % dest_file )
os.remove( dest_file )
return S_ERROR( errorMessage )
def __executeOperation( self, url, method ):
""" executes the requested :method: with the supplied url
:param self: self reference
:param str url: SE url
:param str method: fcn name
"""
fcn = None
if hasattr( self, method ) and callable( getattr( self, method ) ):
fcn = getattr( self, method )
if not fcn:
return DError( DErrno.ENOMETH, "Unable to invoke %s, it isn't a member funtion of SRM2Storage" % method )
res = fcn( url )
if not res['OK']:
return res
elif url not in res['Value']['Successful']:
if url not in res['Value']['Failed']:
if res['Value']['Failed'].values():
return S_ERROR( res['Value']['Failed'].values()[0] )
elif res['Value']['Successful'].values():
return S_OK( res['Value']['Successful'].values()[0] )
else:
self.log.error( 'Wrong Return structure', str( res['Value'] ) )
return S_ERROR( 'Wrong Return structure' )
return S_ERROR( res['Value']['Failed'][url] )
return S_OK( res['Value']['Successful'][url] )
############################################################################################
#
# Directory based methods
#
def isDirectory( self, path ):
""" isdir on storage path
:param self: self reference
:param str path: SE path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.isDirectory: Checking whether %s path(s) are directory(ies)" % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "Failed isDirectory:", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if urlDict.get( 'surl' ):
dirSURL = urlDict['surl']
if urlDict['status'] == 0:
statDict = self.__parse_file_metadata( urlDict )
if statDict['Directory']:
successful[dirSURL] = True
else:
self.log.debug( "SRM2Storage.isDirectory: Path is not a directory: %s" % dirSURL )
successful[dirSURL] = False
elif urlDict['status'] == 2:
self.log.debug( "SRM2Storage.isDirectory: Supplied path does not exist: %s" % dirSURL )
failed[dirSURL] = DError( errno.ENOENT, '%s path does not exist' % dirSURL )
else:
errStr = "SRM2Storage.isDirectory: Failed to get file metadata."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( dirSURL, errMessage ) )
failed[dirSURL] = DError( DErrno.EGFAL, "Failed to get file metadata %s" % errMessage )
else:
errStr = "SRM2Storage.isDirectory: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return DError( errno.ENOMSG, errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def getDirectoryMetadata( self, path ):
""" get the metadata for the directory :path:
:param self: self reference
:param str path: SE path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "getDirectoryMetadata: Attempting to obtain metadata for %s directories." % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "Failed getDirectoryMetadata:", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if "surl" in urlDict and urlDict["surl"]:
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
statDict = self.__parse_file_metadata( urlDict )
if statDict['Directory']:
statDict['Exists'] = True
statDict['Type'] = 'Directory'
successful[pathSURL] = statDict
else:
errStr = "SRM2Storage.getDirectoryMetadata: Supplied path is not a directory."
self.log.error( errStr, pathSURL )
failed[pathSURL] = errStr
elif urlDict['status'] == 2:
errMessage = "SRM2Storage.getDirectoryMetadata: Directory does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = DError( errno.ENOENT, 'SRM2Storage.getDirectoryMetadata: %s does not exist' % pathSURL )
else:
errStr = "SRM2Storage.getDirectoryMetadata: Failed to get directory metadata."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = DError( DErrno.EGFAL, "Failed to get file metadata %s" % errMessage )
else:
errStr = "SRM2Storage.getDirectoryMetadata: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return DError( errno.ENOMSG, errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def getDirectorySize( self, path ):
""" Get the size of the directory on the storage
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.getDirectorySize: Attempting to get size of %s directories." % len( urls ) )
res = self.listDirectory( urls )
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
for directory, dirDict in res['Value']['Successful'].items():
directorySize = 0
directoryFiles = 0
filesDict = dirDict['Files']
for fileDict in filesDict.itervalues():
directorySize += fileDict['Size']
directoryFiles += 1
self.log.debug( "SRM2Storage.getDirectorySize: Successfully obtained size of %s." % directory )
subDirectories = len( dirDict['SubDirs'] )
successful[directory] = { 'Files' : directoryFiles, 'Size' : directorySize, 'SubDirs' : subDirectories }
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def listDirectory( self, path, internalCall = False ):
""" List the contents of the directory on the storage
:param interalCall : if this method is called from within
that class, we should return index on SURL, not LFNs
Do not set it to True for a normal call, unless you really
know what you are doing !!
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.listDirectory: Attempting to list %s directories." % len( urls ) )
# The gfal method returns an url, while we want to return an LFN to the user
urlStart = self.getURLBase( withWSUrl = True )['Value']
res = self.isDirectory( urls )
if not res['OK']:
return res
failed = res['Value']['Failed']
directories = {}
for url, isDirectory in res['Value']['Successful'].items():
if isDirectory:
directories[url] = False
else:
errStr = "SRM2Storage.listDirectory: Directory does not exist."
self.log.error( errStr, url )
failed[url] = errStr
resDict = self.__gfal_lsdir_wrapper( directories )
if not resDict["OK"]:
self.log.error( "Failed listDirectory:", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
# resDict = self.__gfalls_wrapper(directories,1)['Value']
failed.update( resDict['Failed'] )
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if "surl" in urlDict and urlDict["surl"]:
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
successful[pathSURL] = {}
self.log.debug( "SRM2Storage.listDirectory: Successfully listed directory %s" % pathSURL )
subPathDirs = {}
subPathFiles = {}
if "subpaths" in urlDict:
subPaths = urlDict['subpaths']
# Parse the subpaths for the directory
for subPathDict in subPaths:
subPathSURL = subPathDict['surl']
if subPathDict['status'] == 22:
self.log.error( "File found with status 22", subPathDict )
elif subPathDict['status'] == 0:
statDict = self.__parse_file_metadata( subPathDict )
# Replace the URL with an LFN in normal cases, but return the SURL if it is an internal call
subPathLFN = subPathSURL if internalCall else subPathSURL.replace( urlStart, '' )
if statDict['File']:
subPathFiles[subPathLFN] = statDict
elif statDict['Directory']:
subPathDirs[subPathLFN] = statDict
# Keep the infomation about this path's subpaths
successful[pathSURL]['SubDirs'] = subPathDirs
successful[pathSURL]['Files'] = subPathFiles
else:
errStr = "SRM2Storage.listDirectory: Failed to list directory."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
else:
errStr = "SRM2Storage.listDirectory: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return DError( errno.ENOMSG, errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def putDirectory( self, path ):
""" cp -R local SE
puts a local directory to the physical storage together with all its files and subdirectories
:param self: self reference
:param str path: local fs path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
self.log.debug( "SRM2Storage.putDirectory: Attemping to put %s directories to remote storage." % len( urls ) )
for destDir, sourceDir in urls.items():
res = self.__putDir( sourceDir, destDir )
if res['OK']:
if res['Value']['AllPut']:
self.log.debug( "SRM2Storage.putDirectory: Successfully put directory to remote storage: %s" % destDir )
successful[destDir] = { 'Files' : res['Value']['Files'], 'Size' : res['Value']['Size']}
else:
self.log.error( "SRM2Storage.putDirectory: Failed to put entire directory to remote storage.", destDir )
failed[destDir] = { 'Files' : res['Value']['Files'], 'Size' : res['Value']['Size']}
else:
self.log.error( "SRM2Storage.putDirectory: Completely failed to put directory to remote storage.", destDir )
failed[destDir] = { "Files" : 0, "Size" : 0 }
return S_OK( { "Failed" : failed, "Successful" : successful } )
def __putDir( self, src_directory, dest_directory ):
""" Black magic contained within...
"""
filesPut = 0
sizePut = 0
# Check the local directory exists
if not os.path.isdir( src_directory ):
errStr = "SRM2Storage.__putDir: The supplied directory does not exist."
self.log.error( errStr, src_directory )
return DError( errno.ENOENT, errStr )
# Get the local directory contents
contents = os.listdir( src_directory )
allSuccessful = True
directoryFiles = {}
for fileName in contents:
localPath = '%s/%s' % ( src_directory, fileName )
remotePath = '%s/%s' % ( dest_directory, fileName )
if not os.path.isdir( localPath ):
directoryFiles[remotePath] = localPath
else:
res = self.__putDir( localPath, remotePath )
if not res['OK']:
errStr = "SRM2Storage.__putDir: Failed to put directory to storage."
self.log.error( errStr, res['Message'] )
else:
if not res['Value']['AllPut']:
pathSuccessful = False
filesPut += res['Value']['Files']
sizePut += res['Value']['Size']
if directoryFiles:
res = self.putFile( directoryFiles )
if not res['OK']:
self.log.error( "SRM2Storage.__putDir: Failed to put files to storage.", res['Message'] )
allSuccessful = False
else:
for fileSize in res['Value']['Successful'].itervalues():
filesPut += 1
sizePut += fileSize
if res['Value']['Failed']:
allSuccessful = False
return S_OK( { 'AllPut' : allSuccessful, 'Files' : filesPut, 'Size' : sizePut } )
def getDirectory( self, path, localPath = False ):
""" Get a local copy in the current directory of a physical file specified by its path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
successful = {}
self.log.debug( "SRM2Storage.getDirectory: Attempting to get local copies of %s directories." % len( urls ) )
for src_dir in urls:
dirName = os.path.basename( src_dir )
if localPath:
dest_dir = "%s/%s" % ( localPath, dirName )
else:
dest_dir = "%s/%s" % ( os.getcwd(), dirName )
res = self.__getDir( src_dir, dest_dir )
if res['OK']:
if res['Value']['AllGot']:
self.log.debug( "SRM2Storage.getDirectory: Successfully got local copy of %s" % src_dir )
successful[src_dir] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
else:
self.log.error( "SRM2Storage.getDirectory: Failed to get entire directory.", src_dir )
failed[src_dir] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
else:
self.log.error( "SRM2Storage.getDirectory: Completely failed to get local copy of directory.", src_dir )
failed[src_dir] = {'Files':0, 'Size':0}
return S_OK( {'Failed' : failed, 'Successful' : successful } )
def __getDir( self, srcDirectory, destDirectory ):
""" Black magic contained within...
"""
filesGot = 0
sizeGot = 0
# Check the remote directory exists
res = self.__executeOperation( srcDirectory, 'isDirectory' )
if not res['OK']:
self.log.error( "SRM2Storage.__getDir: Failed to find the supplied source directory.", srcDirectory )
return res
if not res['Value']:
errStr = "SRM2Storage.__getDir: The supplied source path is not a directory."
self.log.error( errStr, srcDirectory )
return DError( errno.ENOTDIR, errStr )
# Check the local directory exists and create it if not
if not os.path.exists( destDirectory ):
os.makedirs( destDirectory )
# Get the remote directory contents
res = self.__getDirectoryContents( srcDirectory )
if not res['OK']:
errStr = "SRM2Storage.__getDir: Failed to list the source directory."
self.log.error( errStr, srcDirectory )
filesToGet = res['Value']['Files']
subDirs = res['Value']['SubDirs']
allSuccessful = True
res = self.getFile( filesToGet.keys(), destDirectory )
if not res['OK']:
self.log.error( "SRM2Storage.__getDir: Failed to get files from storage.", res['Message'] )
allSuccessful = False
else:
for fileSize in res['Value']['Successful'].itervalues():
filesGot += 1
sizeGot += fileSize
if res['Value']['Failed']:
allSuccessful = False
for subDir in subDirs:
subDirName = os.path.basename( subDir )
localPath = '%s/%s' % ( destDirectory, subDirName )
res = self.__getDir( subDir, localPath )
if res['OK']:
if not res['Value']['AllGot']:
allSuccessful = True
filesGot += res['Value']['Files']
sizeGot += res['Value']['Size']
return S_OK( { 'AllGot' : allSuccessful, 'Files' : filesGot, 'Size' : sizeGot } )
def removeDirectory( self, path, recursive = False ):
""" Remove a directory
"""
if recursive:
return self.__removeDirectoryRecursive( path )
else:
return self.__removeDirectory( path )
def __removeDirectory( self, directory ):
""" This function removes the directory on the storage
"""
res = checkArgumentFormat( directory )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.__removeDirectory: Attempting to remove %s directories." % len( urls ) )
resDict = self.__gfal_removedir_wrapper( urls )
if not resDict["OK"]:
self.log.error( "Failed __removeDirectory:", "%s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if "surl" in urlDict:
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
self.log.debug( "__removeDirectory: Successfully removed directory: %s" % pathSURL )
successful[pathSURL] = True
elif urlDict['status'] == 2:
# This is the case where the file doesn't exist.
self.log.debug( "__removeDirectory: Directory did not exist, sucessfully removed: %s" % pathSURL )
successful[pathSURL] = True
else:
errStr = "removeDirectory: Failed to remove directory."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __removeDirectoryRecursive( self, directory ):
""" Recursively removes the directory and sub dirs. Repeatedly calls itself to delete recursively.
"""
res = checkArgumentFormat( directory )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
self.log.debug( "SRM2Storage.__removeDirectory: Attempting to recursively remove %s directories." % len( urls ) )
for directory in urls:
self.log.debug( "SRM2Storage.removeDirectory: Attempting to remove %s" % directory )
res = self.__getDirectoryContents( directory )
resDict = {'FilesRemoved':0, 'SizeRemoved':0}
if not res['OK']:
failed[directory] = resDict
else:
filesToRemove = res['Value']['Files']
subDirs = res['Value']['SubDirs']
# Remove all the files in the directory
res = self.__removeDirectoryFiles( filesToRemove )
resDict['FilesRemoved'] += res['FilesRemoved']
resDict['SizeRemoved'] += res['SizeRemoved']
allFilesRemoved = res['AllRemoved']
# Remove all the sub-directories
res = self.__removeSubDirectories( subDirs )
resDict['FilesRemoved'] += res['FilesRemoved']
resDict['SizeRemoved'] += res['SizeRemoved']
allSubDirsRemoved = res['AllRemoved']
# If all the files and sub-directories are removed then remove the directory
allRemoved = False
if allFilesRemoved and allSubDirsRemoved:
self.log.debug( "SRM2Storage.removeDirectory: Successfully removed all files and sub-directories." )
res = self.__removeDirectory( directory )
if res['OK']:
if directory in res['Value']['Successful']:
self.log.debug( "SRM2Storage.removeDirectory: Successfully removed the directory %s." % directory )
allRemoved = True
# Report the result
if allRemoved:
successful[directory] = resDict
else:
failed[directory] = resDict
return S_OK ( { 'Failed' : failed, 'Successful' : successful } )
def __getDirectoryContents( self, directory ):
""" ls of storage element :directory:
:param self: self reference
:param str directory: SE path
"""
directory = directory.rstrip( '/' )
errMessage = "SRM2Storage.__getDirectoryContents: Failed to list directory."
res = self.listDirectory( directory, internalCall = True )
if not res['OK']:
self.log.error( errMessage, res['Message'] )
return res
if directory in res['Value']['Failed']:
self.log.error( errMessage, res['Value']['Failed'][directory] )
return S_ERROR( errMessage )
surlsDict = res['Value']['Successful'][directory]['Files']
subDirsDict = res['Value']['Successful'][directory]['SubDirs']
filesToRemove = dict( [ ( url, surlsDict[url]['Size'] ) for url in surlsDict ] )
return S_OK ( { 'Files' : filesToRemove, 'SubDirs' : subDirsDict.keys() } )
def __removeDirectoryFiles( self, filesToRemove ):
""" rm files from SE
:param self: self reference
:param dict filesToRemove: dict with surls as keys
"""
resDict = { 'FilesRemoved' : 0, 'SizeRemoved' : 0, 'AllRemoved' : True }
if len( filesToRemove ) > 0:
res = self.removeFile( filesToRemove.keys() )
if res['OK']:
for removedSurl in res['Value']['Successful']:
resDict['FilesRemoved'] += 1
resDict['SizeRemoved'] += filesToRemove[removedSurl]
if res['Value']['Failed']:
resDict['AllRemoved'] = False
self.log.debug( "SRM2Storage.__removeDirectoryFiles:",
"Removed %s files of size %s bytes." % ( resDict['FilesRemoved'], resDict['SizeRemoved'] ) )
return resDict
def __removeSubDirectories( self, subDirectories ):
""" rm -rf sub-directories
:param self: self reference
:param dict subDirectories: dict with surls as keys
"""
resDict = { 'FilesRemoved' : 0, 'SizeRemoved' : 0, 'AllRemoved' : True }
if len( subDirectories ) > 0:
res = self.__removeDirectoryRecursive( subDirectories )
if res['OK']:
for removedSubDir, removedDict in res['Value']['Successful'].items():
resDict['FilesRemoved'] += removedDict['FilesRemoved']
resDict['SizeRemoved'] += removedDict['SizeRemoved']
self.log.debug( "SRM2Storage.__removeSubDirectories:",
"Removed %s files of size %s bytes from %s." % ( removedDict['FilesRemoved'],
removedDict['SizeRemoved'],
removedSubDir ) )
for removedSubDir, removedDict in res['Value']['Failed'].items():
resDict['FilesRemoved'] += removedDict['FilesRemoved']
resDict['SizeRemoved'] += removedDict['SizeRemoved']
self.log.debug( "SRM2Storage.__removeSubDirectories:",
"Removed %s files of size %s bytes from %s." % ( removedDict['FilesRemoved'],
removedDict['SizeRemoved'],
removedSubDir ) )
if len( res['Value']['Failed'] ) != 0:
resDict['AllRemoved'] = False
return resDict
@staticmethod
def __parse_stat( stat ):
""" get size, ftype and mode from stat struct
:param stat: stat struct
"""
statDict = { 'File' : False, 'Directory' : False }
if S_ISREG( stat[ST_MODE] ):
statDict['File'] = True
statDict['Size'] = stat[ST_SIZE]
if S_ISDIR( stat[ST_MODE] ):
statDict['Directory'] = True
statDict['Mode'] = S_IMODE( stat[ST_MODE] )
return statDict
def __parse_file_metadata( self, urlDict ):
""" parse and save bits and pieces of metadata info
:param self: self reference
:param urlDict: gfal call results
"""
statDict = self.__parse_stat( urlDict['stat'] )
if statDict['File']:
statDict.setdefault( "Checksum", "" )
if "checksum" in urlDict and ( urlDict['checksum'] != '0x' ):
statDict["Checksum"] = urlDict["checksum"]
if 'locality' in urlDict:
urlLocality = urlDict['locality']
if re.search( 'ONLINE', urlLocality ):
statDict['Cached'] = 1
else:
statDict['Cached'] = 0
if re.search( 'NEARLINE', urlLocality ):
statDict['Migrated'] = 1
else:
statDict['Migrated'] = 0
statDict['Lost'] = 0
if re.search( 'LOST', urlLocality ):
statDict['Lost'] = 1
statDict['Unavailable'] = 0
if re.search( 'UNAVAILABLE', urlLocality ):
statDict['Unavailable'] = 1
return statDict
def __getProtocols( self ):
""" returns list of protocols to use at a given site
:warn: priority is given to a protocols list defined in the CS
:param self: self reference
"""
sections = gConfig.getSections( '/Resources/StorageElements/%s/' % ( self.name ) )
if not sections['OK']:
return sections
protocolsList = []
for section in sections['Value']:
path = '/Resources/StorageElements/%s/%s/PluginName' % ( self.name, section )
if gConfig.getValue( path, '' ) == self.pluginName:
protPath = '/Resources/StorageElements/%s/%s/ProtocolsList' % ( self.name, section )
siteProtocols = gConfig.getValue( protPath, [] )
if siteProtocols:
self.log.debug( 'Found SE protocols list to override defaults:', ', '.join( siteProtocols, ) )
protocolsList = siteProtocols
if not protocolsList:
self.log.debug( "SRM2Storage.getTransportURL: No protocols provided, using defaults." )
protocolsList = gConfig.getValue( '/Resources/StorageElements/DefaultProtocols', [] )
if not protocolsList:
return DError( DErrno.ECONF, "SRM2Storage.getTransportURL: No local protocols defined and no defaults found" )
return S_OK( protocolsList )
#######################################################################
#
# These methods wrap the gfal functionality with the accounting. All these are based on __gfal_operation_wrapper()
#
#######################################################################
def __gfal_lsdir_wrapper( self, urls ):
""" This is a hack because the structures returned by the different SEs are different
"""
step = 200
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
gfalDict['srmv2_lslevels'] = 1
gfalDict['srmv2_lscount'] = step
failed = {}
successful = []
for url in urls:
allResults = []
gfalDict['surls'] = [url]
gfalDict['nbfiles'] = 1
gfalDict['timeout'] = self.gfalLongTimeOut
allObtained = False
iteration = 0
while not allObtained:
gfalDict['srmv2_lsoffset'] = iteration * step
iteration += 1
res = self.__gfal_operation_wrapper( 'gfal_ls', gfalDict )
# gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
if re.search( '\[SE\]\[Ls\]\[SRM_FAILURE\]', res['Message'] ):
allObtained = True
else:
failed[url] = res['Message']
else:
results = res['Value']
tempStep = step
if len( results ) == 1:
for result in results:
if 'subpaths' in result:
results = result['subpaths']
tempStep = step - 1
elif re.search( re.escape( result['surl'] ), url ):
results = []
allResults.extend( results )
if len( results ) < tempStep:
allObtained = True
for urlDict in allResults:
if 'surl' in urlDict:
urlDict['surl'] = self.__convertRandomSRMOutputIntoAFullURL( urlDict['surl'] )['Value']
successful.append( { 'surl' : url, 'status' : 0, 'subpaths' : allResults } )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : successful, "Failed" : failed } )
def __gfal_ls_wrapper( self, urls, depth ):
""" gfal_ls wrapper
:param self: self reference
:param list urls: urls to check
:param int depth: srmv2_lslevel (0 or 1)
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
gfalDict['srmv2_lslevels'] = depth
allResults = []
failed = {}
listOfLists = breakListIntoChunks( urls.keys(), self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_ls', gfalDict )
# gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_prestage_wrapper( self, urls, lifetime ):
""" gfal_prestage wrapper
:param self: self refefence
:param list urls: urls to prestage
:param int lifetime: prestage lifetime
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
gfalDict['srmv2_spacetokendesc'] = self.spaceToken
gfalDict['srmv2_desiredpintime'] = lifetime
gfalDict['protocols'] = self.defaultLocalProtocols
allResults = []
failed = {}
listOfLists = breakListIntoChunks( urls.keys(), self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.stageTimeout
res = self.__gfal_operation_wrapper( 'gfal_prestage',
gfalDict,
timeout_sendreceive = self.fileTimeout * len( urls ) )
gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfalturlsfromsurls_wrapper( self, urls, listProtocols ):
""" This is a function that can be reused everywhere to perform the gfal_turlsfromsurls
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
gfalDict['protocols'] = listProtocols
gfalDict['srmv2_spacetokendesc'] = self.spaceToken
allResults = []
failed = {}
listOfLists = breakListIntoChunks( urls.keys(), self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_turlsfromsurls', gfalDict )
gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfaldeletesurls_wrapper( self, urls ):
""" This is a function that can be reused everywhere to perform the gfal_deletesurls
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
allResults = []
failed = {}
listOfLists = breakListIntoChunks( urls.keys(), self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_deletesurls', gfalDict )
gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_removedir_wrapper( self, urls ):
""" This is a function that can be reused everywhere to perform the gfal_removedir
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
gfalDict['srmv2_spacetokendesc'] = self.spaceToken
allResults = []
failed = {}
listOfLists = breakListIntoChunks( urls.keys(), self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_removedir', gfalDict )
gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_pin_wrapper( self, urls, lifetime ):
""" gfal_pin wrapper
:param self: self reference
:param dict urls: dict { url : srmRequestID }
:param int lifetime: pin lifetime in seconds
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 0
gfalDict['srmv2_spacetokendesc'] = self.spaceToken
gfalDict['srmv2_desiredpintime'] = lifetime
allResults = []
failed = {}
srmRequestFiles = {}
for url, srmRequestID in urls.items():
if srmRequestID not in srmRequestFiles:
srmRequestFiles[srmRequestID] = []
srmRequestFiles[srmRequestID].append( url )
for srmRequestID, urls in srmRequestFiles.items():
listOfLists = breakListIntoChunks( urls, self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_pin', gfalDict, srmRequestID = srmRequestID )
gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_prestagestatus_wrapper( self, urls ):
""" gfal_prestagestatus wrapper
:param self: self reference
:param dict urls: dict { srmRequestID : [ url, url ] }
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 0
gfalDict['srmv2_spacetokendesc'] = self.spaceToken
allResults = []
failed = {}
srmRequestFiles = {}
for url, srmRequestID in urls.items():
if srmRequestID not in srmRequestFiles:
srmRequestFiles[srmRequestID] = []
srmRequestFiles[srmRequestID].append( url )
for srmRequestID, urls in srmRequestFiles.items():
listOfLists = breakListIntoChunks( urls, self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_prestagestatus', gfalDict, srmRequestID = srmRequestID )
gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_release_wrapper( self, urls ):
""" gfal_release wrapper
:param self: self reference
:param dict urls: dict { url : srmRequestID }
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 0
allResults = []
failed = {}
srmRequestFiles = {}
for url, srmRequestID in urls.items():
if srmRequestID not in srmRequestFiles:
srmRequestFiles[srmRequestID] = []
srmRequestFiles[srmRequestID].append( url )
for srmRequestID, urls in srmRequestFiles.items():
listOfLists = breakListIntoChunks( urls, self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_release', gfalDict, srmRequestID = srmRequestID )
gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_operation_wrapper( self, operation, gfalDict, srmRequestID = None, timeout_sendreceive = None ):
""" gfal fcn call wrapper
:param self: self reference
:param str operation: gfal fcn name
:param dict gfalDict: gfal dict passed to create gfal object
:param srmRequestID: srmRequestID
:param int timeout_sendreceive: gfal sendreceive timeout in seconds
"""
# Create an accounting DataOperation record for each operation
oDataOperation = self.__initialiseAccountingObject( operation, self.name, gfalDict['nbfiles'] )
oDataOperation.setStartTime()
start = time.time()
res = self.__importExternals()
if not res['OK']:
oDataOperation.setEndTime()
oDataOperation.setValueByKey( 'TransferTime', 0. )
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
res['AccountingOperation'] = oDataOperation
return res
# # timeout for one gfal_exec call
timeout = gfalDict['timeout'] if not timeout_sendreceive else timeout_sendreceive
# # pythonCall timeout ( const + timeout * ( 2 ** retry )
pyTimeout = 300 + ( timeout * ( 2 ** self.gfalRetry ) )
res = pythonCall( pyTimeout, self.__gfal_wrapper, operation, gfalDict, srmRequestID, timeout_sendreceive )
end = time.time()
oDataOperation.setEndTime()
oDataOperation.setValueByKey( 'TransferTime', end - start )
if not res['OK']:
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
res['AccountingOperation'] = oDataOperation
return res
res = res['Value']
if not res['OK']:
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
else:
for urlDict in res['Value']:
if 'surl' in urlDict:
urlDict['surl'] = self.__convertRandomSRMOutputIntoAFullURL( urlDict['surl'] )['Value']
res['AccountingOperation'] = oDataOperation
return res
def __gfal_wrapper( self, operation, gfalDict, srmRequestID = None, timeout_sendreceive = None ):
""" execute gfal :operation:
1. create gfalObject from gfalDict
2. set srmRequestID
3. call __gfal_exec
4. get gfal ids
5. get gfal results
6. destroy gfal object
:param self: self reference
:param str operation: fcn to call
:param dict gfalDict: gfal config dict
:param srmRequestID: srm request id
:param int timeout_sendrecieve: timeout for gfal send request and recieve results in seconds
"""
gfalObject = self.__create_gfal_object( gfalDict )
if not gfalObject["OK"]:
return gfalObject
gfalObject = gfalObject['Value']
if srmRequestID:
res = self.__gfal_set_ids( gfalObject, srmRequestID )
if not res['OK']:
return res
res = self.__gfal_exec( gfalObject, operation, timeout_sendreceive )
if not res['OK']:
return res
gfalObject = res['Value']
res = self.__gfal_get_ids( gfalObject )
if not res['OK']:
newSRMRequestID = srmRequestID
else:
newSRMRequestID = res['Value']
res = self.__get_results( gfalObject )
if not res['OK']:
return res
resultList = []
pfnRes = res['Value']
for myDict in pfnRes:
myDict['SRMReqID'] = newSRMRequestID
resultList.append( myDict )
self.__destroy_gfal_object( gfalObject )
return S_OK( resultList )
@staticmethod
def __initialiseAccountingObject( operation, se, files ):
""" create DataOperation accounting object
:param str operation: operation performed
:param str se: destination SE name
:param int files: nb of files
"""
import DIRAC
accountingDict = {}
accountingDict['OperationType'] = operation
result = getProxyInfo()
if not result['OK']:
userName = 'system'
else:
userName = result['Value'].get( 'username', 'unknown' )
accountingDict['User'] = userName
accountingDict['Protocol'] = 'gfal'
accountingDict['RegistrationTime'] = 0.0
accountingDict['RegistrationOK'] = 0
accountingDict['RegistrationTotal'] = 0
accountingDict['Destination'] = se
accountingDict['TransferTotal'] = files
accountingDict['TransferOK'] = files
accountingDict['TransferSize'] = files
accountingDict['TransferTime'] = 0.0
accountingDict['FinalStatus'] = 'Successful'
accountingDict['Source'] = DIRAC.siteName()
oDataOperation = DataOperation()
oDataOperation.setValuesFromDict( accountingDict )
return oDataOperation
#######################################################################
#
# The following methods provide the interaction with gfal functionality
#
#######################################################################
def __create_gfal_object( self, gfalDict ):
""" create gfal object by calling gfal.gfal_init
:param self: self reference
:param dict gfalDict: gfal params dict
"""
self.log.debug( "SRM2Storage.__create_gfal_object: Performing gfal_init." )
errCode, gfalObject, errMessage = self.gfal.gfal_init( gfalDict )
if not errCode == 0:
errStr = "SRM2Storage.__create_gfal_object: Failed to perform gfal_init."
if not errMessage:
errMessage = os.strerror( self.gfal.gfal_get_errno() )
self.log.error( errStr, errMessage )
return DError( self.gfal.gfal_get_errno(), errMessage )
else:
self.log.debug( "SRM2Storage.__create_gfal_object: Successfully performed gfal_init." )
return S_OK( gfalObject )
def __gfal_set_ids( self, gfalObject, srmRequestID ):
""" set :srmRequestID:
:param self: self reference
:param gfalObject: gfal object
:param str srmRequestID: srm request id
"""
self.log.debug( "SRM2Storage.__gfal_set_ids: Performing gfal_set_ids." )
errCode, gfalObject, errMessage = self.gfal.gfal_set_ids( gfalObject, None, 0, str( srmRequestID ) )
if not errCode == 0:
errStr = "SRM2Storage.__gfal_set_ids: Failed to perform gfal_set_ids."
if not errMessage:
errMessage = os.strerror( errCode )
self.log.error( errStr, errMessage )
return DError( errCode, errMessage )
else:
self.log.debug( "SRM2Storage.__gfal_set_ids: Successfully performed gfal_set_ids." )
return S_OK( gfalObject )
def __gfal_exec( self, gfalObject, method, timeout_sendreceive = None ):
"""
In gfal, for every method (synchronous or asynchronous), you can define a sendreceive timeout and a connect timeout.
The connect timeout sets the maximum amount of time a client accepts to wait before establishing a successful TCP
connection to SRM (default 60 seconds).
The sendreceive timeout, allows a client to set the maximum time the send
of a request to SRM can take (normally all send operations return immediately unless there is no free TCP buffer)
and the maximum time to receive a reply (a token for example). Default 0, i.e. no timeout.
The srm timeout for asynchronous requests default to 3600 seconds
gfal_set_timeout_connect (int value)
gfal_set_timeout_sendreceive (int value)
gfal_set_timeout_bdii (int value)
gfal_set_timeout_srm (int value)
"""
self.log.debug( "SRM2Storage.__gfal_exec(%s): Starting" % method )
fcn = None
if hasattr( self.gfal, method ) and callable( getattr( self.gfal, method ) ):
fcn = getattr( self.gfal, method )
if not fcn:
return DError( DErrno.ENOMETH, "%s is not a member function of gfal" % method )
# return S_ERROR( "Unable to invoke %s for gfal, it isn't a member function" % method )
# # retry
retry = self.gfalRetry if self.gfalRetry else 1
# # initial timeout
timeout = timeout_sendreceive if timeout_sendreceive else self.gfalTimeout
# # errCode, errMessage, errNo
errCode, errMessage, errNo = 0, "", 0
for _i in range( retry ):
self.gfal.gfal_set_timeout_sendreceive( timeout )
errCode, gfalObject, errMessage = fcn( gfalObject )
if not errCode:
break
errNo = self.gfal.gfal_get_errno()
if errCode == -1 and errNo == errno.ECOMM:
timeout *= 2
self.log.debug( "SRM2Storage.__gfal_exec(%s): got ECOMM, extending timeout to %s s" % ( method, timeout ) )
if errCode:
errStr = "SRM2Storage.__gfal_exec(%s): Execution failed." % method
if not errMessage:
errMessage = os.strerror( errNo ) if errNo else "UNKNOWN ERROR"
self.log.error( errStr, errMessage )
return DError( errCode, errMessage )
self.log.debug( "SRM2Storage.__gfal_exec(%s): Successfully invoked." % method )
return S_OK( gfalObject )
def __get_results( self, gfalObject ):
""" retrive gfal results
:param self: self reference
:param gfalObject: gfal object
"""
self.log.debug( "SRM2Storage.__get_results: Performing gfal_get_results" )
numberOfResults, gfalObject, listOfResults = self.gfal.gfal_get_results( gfalObject )
if numberOfResults <= 0:
errObj = DError( DErrno.EGFAL, "SRM2Storage.__get_results: Did not obtain results with gfal_get_results." )
self.log.error( errObj )
return errObj
else:
self.log.debug( "SRM2Storage.__get_results: Retrieved %s results from gfal_get_results." % numberOfResults )
for result in listOfResults:
if result['status'] != 0:
if result['explanation']:
errMessage = result['explanation']
elif result['status'] > 0:
errMessage = os.strerror( result['status'] )
result['ErrorMessage'] = errMessage
return S_OK( listOfResults )
def __gfal_get_ids( self, gfalObject ):
""" get srmRequestToken
:param self: self reference
:param gfalObject: gfalObject
"""
self.log.debug( "SRM2Storage.__gfal_get_ids: Performing gfal_get_ids." )
numberOfResults, gfalObject, _srm1RequestID, _srm1FileIDs, srmRequestToken = self.gfal.gfal_get_ids( gfalObject )
if numberOfResults <= 0:
errObj = DError( DErrno.EGFAL, "__gfal_get_ids could not obtain request ID" )
self.log.error( errObj )
return errObj
else:
self.log.debug( "SRM2Storage.__get_gfal_ids: Retrieved SRM request ID %s." % srmRequestToken )
return S_OK( srmRequestToken )
def __destroy_gfal_object( self, gfalObject ):
""" del gfal object by calling gfal.gfal_internal_free
:param self: self reference
:param gfalObject: gfalObject
"""
self.log.debug( "SRM2Storage.__destroy_gfal_object: Performing gfal_internal_free." )
self.gfal.gfal_internal_free( gfalObject )
return S_OK()
|
vmendez/DIRAC
|
Resources/Storage/SRM2Storage.py
|
Python
|
gpl-3.0
| 84,297
|
[
"DIRAC"
] |
db4d1d55db8d4b15635e6ff7febafc02a38759022e52f4acf80b5d64acd0b447
|
u"""
setup.py: Install ODIN
"""
import os, sys,re
from os.path import join as pjoin
from glob import glob
#try:
#from setuptools import Extension, setup
#except:
from distutils.extension import Extension
from distutils.core import setup
from Cython.Distutils import build_ext
import numpy
import subprocess
from subprocess import CalledProcessError
# ------------------------------------------------------------------------------
# HEADER
#
VERSION = "0.0.1"
ISRELEASED = False
__author__ = "TJ Lane"
__version__ = VERSION
metadata = {
'name': 'odin',
'version': VERSION,
'author': __author__,
'author_email': 'tjlane@stanford.edu',
'license': 'GPL v3.0',
'url': 'https://github.com/tjlane/odin',
'download_url': 'https://github.com/tjlane/odin',
'install_requires': ['numpy', 'scipy', 'matplotlib', 'pyyaml', 'mdtraj',
'nose', 'cython>=0.16', 'tables'],
'dependency_links' : ['https://github.com/kif/fabio/tarball/master#egg=fabio-0.1.3'],
'platforms': ['Linux', 'OSX'],
'zip_safe': False,
'test_suite': "nose.collector",
'description': "Code for Structure Determination",
'long_description': """ODIN is a simulation toolpackage for producing
models of biomolecular structures consistent with a large set of experimental
data."""}
# ------------------------------------------------------------------------------
# HELPER FUNCTIONS -- path finding, git, python version, readthedocs
#
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def print_warning(string):
print bcolors.WARNING + string + bcolors.ENDC
def find_in_path(name, path):
"Find a file in a search path"
#adapted fom http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
def git_version():
"""
Return the git revision as a string.
Copied from numpy setup.py
"""
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout = subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
# ------------------------------------------------------------------------------
# GPU FUNCTION WRAPPING -- nvcc support
# python distutils doesn't have NVCC by default
#
def locate_cuda():
"""
Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDA_HOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
# first check if the CUDA_HOME env variable is in use
if 'CUDA_HOME' in os.environ:
home = os.environ['CUDA_HOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
nvcc = find_in_path('nvcc', os.environ['PATH'])
if nvcc is None:
print_warning('The nvcc binary could not be located in your $PATH. '
'add it to your path, or set $CUDA_HOME.')
return False
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home':home, 'nvcc':nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
print "CUDA config:", cudaconfig
for k, v in cudaconfig.iteritems():
if not os.path.exists(v):
print_warning('The CUDA %s path could not be located in %s' % (k, v))
return False
return cudaconfig
CUDA = locate_cuda()
if CUDA == False:
CUDA_SUCCESS = False
else:
CUDA_SUCCESS = True
def customize_compiler_for_nvcc(self):
"""
Inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on.
"""
# tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
# ------------------------------------------------------------------------------
metadata['packages'] = ['odin', 'odin.scripts', 'odin.smfret']
metadata['package_dir'] = {'odin' : 'src/python',
'odin.scripts' : 'scripts',
'odin.smfret' : 'src/python/smfret'}
metadata['ext_modules'] = []
metadata['scripts'] = [s for s in glob('scripts/*') if not s.endswith('__.py')]
metadata['data_files'] = [('reference', glob('./reference/*'))]
metadata['cmdclass'] = {'build_ext': custom_build_ext}
# ------------------------------------------------------------------------------
if __name__ == '__main__':
setup(**metadata) # ** will unpack dictionary 'metadata' providing the values as arguments
|
tjlane/odin
|
setup.py
|
Python
|
gpl-2.0
| 7,114
|
[
"MDTraj"
] |
facb8bbc2705ca00e4416313ad634cf2339a438bdee2f04498777cce3c2876f6
|
# CREATED:2013-08-13 12:02:42 by Brian McFee <brm2132@columbia.edu>
'''
Evaluation criteria for structural segmentation fall into two categories:
boundary annotation and structural annotation. Boundary annotation is the task
of predicting the times at which structural changes occur, such as when a verse
transitions to a refrain. Metrics for boundary annotation compare estimated
segment boundaries to reference boundaries. Structural annotation is the task
of assigning labels to detected segments. The estimated labels may be
arbitrary strings - such as A, B, C, - and they need not describe functional
concepts. Metrics for structural annotation are similar to those use for
clustering data.
Conventions
-----------
Both boundary and structural annotation metrics require two dimensional arrays
with two columns, one for boundary start times and one for boundary end times.
Structural annotation further require lists of reference and estimated segment
labels which must have a length which is equal to the number of rows in the
corresponding list of boundary edges. In both tasks, we assume that
annotations express a partitioning of the track into intervals. The function
:func:`mir_eval.util.adjust_intervals` can be used to pad or crop the segment
boundaries to span the duration of the entire track.
Metrics
-------
* :func:`mir_eval.segment.detection`: An estimated boundary is considered
correct if it falls within a window around a reference boundary
* :func:`mir_eval.segment.deviation`: Computes the median absolute time
difference from a reference boundary to its nearest estimated boundary, and
vice versa
* :func:`mir_eval.segment.pairwise`: For classifying pairs of sampled time
instants as belonging to the same structural component
* :func:`mir_eval.segment.rand_index`: Clusters reference and estimated
annotations and compares them by the Rand Index
* :func:`mir_eval.segment.ari`: Computes the Rand index, adjusted for chance
* :func:`mir_eval.segment.nce`: Interprets sampled reference and estimated
labels as samples of random variables :math:`Y_R, Y_E` from which the
conditional entropy of :math:`Y_R` given :math:`Y_E` (Under-Segmentation) and
:math:`Y_E` given :math:`Y_R` (Over-Segmentation) are estimated
* :func:`mir_eval.segment.mutual_information`: Computes the standard,
normalized, and adjusted mutual information of sampled reference and
estimated segments
'''
import numpy as np
import scipy.stats
import scipy.sparse
import scipy.misc
import scipy.special
import collections
import warnings
from mir_eval import util
# For hierarchical eval
import tree
def validate_boundary(reference_intervals, estimated_intervals, trim):
'''Checks that the input annotations to a segment boundary estimation
metric (i.e. one that only takes in segment intervals) look like valid
segment times, and throws helpful errors if not.
:parameters:
- reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
- trim : bool
will the start and end events be trimmed?
:raises:
- ValueError
Thrown when the provided annotations are not valid.
'''
if trim:
# If we're trimming, then we need at least 2 intervals
min_size = 2
else:
# If we're not trimming, then we only need one interval
min_size = 1
if len(reference_intervals) < min_size:
warnings.warn("Reference intervals are empty.")
if len(estimated_intervals) < min_size:
warnings.warn("Estimated intervals are empty.")
for intervals in [reference_intervals, estimated_intervals]:
util.validate_intervals(intervals)
def validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels):
'''Checks that the input annotations to a structure estimation metric (i.e.
one that takes in both segment boundaries and their labels) look like valid
segment times and labels, and throws helpful errors if not.
:parameters:
- reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
:raises:
- ValueError
Thrown when the provided annotations are not valid.
'''
for (intervals, labels) in [(reference_intervals, reference_labels),
(estimated_intervals, estimated_labels)]:
util.validate_intervals(intervals)
if intervals.shape[0] != len(labels):
raise ValueError('Number of intervals does not match number '
'of labels')
# Check only when intervals are non-empty
if intervals.size > 0:
# Make sure intervals start at 0
if not np.allclose(intervals[0, 0], 0.0):
raise ValueError('Segment intervals do not start at 0')
if reference_intervals.size == 0:
warnings.warn("Reference intervals are empty.")
if estimated_intervals.size == 0:
warnings.warn("Estimated intervals are empty.")
# Check only when intervals are non-empty
if reference_intervals.size > 0 and estimated_intervals.size > 0:
if not np.allclose(reference_intervals[-1, 1],
estimated_intervals[-1, 1]):
raise ValueError('End times do not match')
def detection(reference_intervals, estimated_intervals,
window=0.5, beta=1.0, trim=False):
'''Boundary detection hit-rate.
A hit is counted whenever an reference boundary is within ``window`` of a
estimated boundary. Note that each boundary is matched at most once: this
is achieved by computing the size of a maximal matching between reference
and estimated boundary points, subject to the window constraint.
:usage:
>>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')
>>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')
>>> # With 0.5s windowing
>>> P05, R05, F05 = mir_eval.boundary.detection(ref_intervals,
est_intervals,
window=0.5)
>>> # With 3s windowing
>>> P3, R3, F3 = mir_eval.boundary.detection(ref_intervals,
est_intervals,
window=3)
>>> # Ignoring hits for the beginning and end of track
>>> P, R, F = mir_eval.boundary.detection(ref_intervals,
est_intervals,
window=0.5,
trim=True)
:parameters:
- reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
- window : float > 0
size of the window of 'correctness' around ground-truth beats
(in seconds)
- beta : float > 0
weighting constant for F-measure.
- trim : boolean
if ``True``, the first and last boundary times are ignored.
Typically, these denote start (0) and end-markers.
:returns:
- precision : float
precision of estimated predictions
- recall : float
recall of reference reference boundaries
- f_measure : float
F-measure (weighted harmonic mean of ``precision`` and ``recall``)
:raises:
- ValueError
Thrown when the provided annotations are not valid.
:references:
.. [#] D. Turnbull, G. Lanckriet, E. Pampalk, and M. Goto. A supervised
approach for detecting boundaries in music using difference
features and boosting. In Proceedings of the 8th International
Society for Music Information Retrieval Conference (ISMIR), pages
51-54, 2007.
'''
validate_boundary(reference_intervals, estimated_intervals, trim)
# Convert intervals to boundaries
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
# Suppress the first and last intervals
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
# If we have no boundaries, we get no score.
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return 0.0, 0.0, 0.0
matching = util.match_events(reference_boundaries,
estimated_boundaries,
window)
precision = float(len(matching)) / len(estimated_boundaries)
recall = float(len(matching)) / len(reference_boundaries)
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure
def deviation(reference_intervals, estimated_intervals, trim=False):
'''Compute the median deviations between reference
and estimated boundary times.
:usage:
>>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')
>>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')
>>> r_to_e, e_to_r = mir_eval.boundary.deviation(ref_intervals,
est_intervals)
:parameters:
- reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
- trim : boolean
if ``True``, the first and last intervals are ignored.
Typically, these denote start (0.0) and end-of-track markers.
:returns:
- reference_to_estimated : float
median time from each reference boundary to the
closest estimated boundary
- estimated_to_reference : float
median time from each estimated boundary to the
closest reference boundary
:raises:
- ValueError
Thrown when the provided annotations are not valid.
:references:
.. [#] D. Turnbull, G. Lanckriet, E. Pampalk, and M. Goto. A supervised
approach for detecting boundaries in music using difference
features and boosting. In Proceedings of the 8th International
Society for Music Information Retrieval Conference (ISMIR), pages
51-54, 2007.
'''
validate_boundary(reference_intervals, estimated_intervals, trim)
# Convert intervals to boundaries
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
# Suppress the first and last intervals
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
# If we have no boundaries, we get no score.
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return np.nan, np.nan
dist = np.abs(np.subtract.outer(reference_boundaries,
estimated_boundaries))
estimated_to_reference = np.median(dist.min(axis=0))
reference_to_estimated = np.median(dist.min(axis=1))
return reference_to_estimated, estimated_to_reference
def pairwise(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1, beta=1.0):
'''Frame-clustering segmentation evaluation by pair-wise agreement.
:usage:
>>> (ref_intervals,
ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
ref_labels,
t_min=0)
>>> (est_intervals,
est_labels) = mir_eval.util.adjust_intervals(est_intervals,
est_labels,
t_min=0,
t_max=ref_intervals.max())
>>> precision, recall, f = mir_eval.structure.pairwise(ref_intervals,
ref_labels,
est_intervals,
est_labels)
:parameters:
- reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- frame_size : float > 0
length (in seconds) of frames for clustering
- beta : float > 0
beta value for F-measure
:returns:
- precision : float > 0
Precision of detecting whether frames belong in the same cluster
- recall : float > 0
Recall of detecting whether frames belong in the same cluster
- f : float > 0
F-measure of detecting whether frames belong in the same cluster
:raises:
- ValueError
Thrown when the provided annotations are not valid.
:references:
.. [#] M. Levy and M. Sandler. Structural segmentation of musical audio
by constrained clustering. IEEE Transactions on Audio, Speech, and
Language Processing, 16(2):318-326, 2008.
'''
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Build the reference label agreement matrix
agree_ref = np.equal.outer(y_ref, y_ref)
# Count the unique pairs
n_agree_ref = (agree_ref.sum() - len(y_ref)) / 2.0
# Repeat for estimate
agree_est = np.equal.outer(y_est, y_est)
n_agree_est = (agree_est.sum() - len(y_est)) / 2.0
# Find where they agree
matches = np.logical_and(agree_ref, agree_est)
n_matches = (matches.sum() - len(y_ref)) / 2.0
precision = n_matches / n_agree_est
recall = n_matches / n_agree_ref
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure
def rand_index(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1, beta=1.0):
'''(Non-adjusted) Rand index.
:usage:
>>> (ref_intervals,
ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
ref_labels,
t_min=0)
>>> (est_intervals,
est_labels) = mir_eval.util.adjust_intervals(est_intervals,
est_labels,
t_min=0,
t_max=ref_intervals.max())
>>> rand_index = mir_eval.structure.rand_index(ref_intervals,
ref_labels,
est_intervals,
est_labels)
:parameters:
- reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- frame_size : float > 0
length (in seconds) of frames for clustering
- beta : float > 0
beta value for F-measure
:returns:
- rand_index : float > 0
Rand index
:raises:
- ValueError
Thrown when the provided annotations are not valid.
:references:
.. [#] W. M. Rand. Objective criteria for the evaluation of clustering
methods. Journal of the American Statistical association,
66(336):846-850, 1971.
'''
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Build the reference label agreement matrix
agree_ref = np.equal.outer(y_ref, y_ref)
# Repeat for estimate
agree_est = np.equal.outer(y_est, y_est)
# Find where they agree
matches_pos = np.logical_and(agree_ref, agree_est)
# Find where they disagree
matches_neg = np.logical_and(~agree_ref, ~agree_est)
n_pairs = len(y_ref) * (len(y_ref) - 1) / 2.0
n_matches_pos = (matches_pos.sum() - len(y_ref)) / 2.0
n_matches_neg = matches_neg.sum() / 2.0
rand = (n_matches_pos + n_matches_neg) / n_pairs
return rand
def _contingency_matrix(reference_indices, estimated_indices):
'''
Computes the contingency matrix of a true labeling vs an estimated one.
:parameters:
- reference_indices : np.ndarray
Array of reference indices
- estimated_indices : np.ndarray
Array of estimated indices
:returns:
- contingency_matrix : np.ndarray
Contingency matrix, shape=(#reference indices, #estimated indices)
.. note:: Based on sklearn.metrics.cluster.contingency_matrix
'''
ref_classes, ref_class_idx = np.unique(reference_indices,
return_inverse=True)
est_classes, est_class_idx = np.unique(estimated_indices,
return_inverse=True)
n_ref_classes = ref_classes.shape[0]
n_est_classes = est_classes.shape[0]
# Using coo_matrix is faster than histogram2d
return scipy.sparse.coo_matrix((np.ones(ref_class_idx.shape[0]),
(ref_class_idx, est_class_idx)),
shape=(n_ref_classes, n_est_classes),
dtype=np.int).toarray()
def _adjusted_rand_index(reference_indices, estimated_indices):
'''
Compute the Rand index, adjusted for change.
:parameters:
- reference_indices : np.ndarray
Array of reference indices
- estimated_indices : np.ndarray
Array of estimated indices
:returns:
- ari : float
Adjusted Rand index
.. note:: Based on sklearn.metrics.cluster.adjusted_rand_score
'''
n_samples = len(reference_indices)
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1
or ref_classes.shape[0] == est_classes.shape[0] == 0
or (ref_classes.shape[0] == est_classes.shape[0] ==
len(reference_indices))):
return 1.0
contingency = _contingency_matrix(reference_indices, estimated_indices)
# Compute the ARI using the contingency data
sum_comb_c = sum(scipy.misc.comb(n_c, 2, exact=1) for n_c in
contingency.sum(axis=1))
sum_comb_k = sum(scipy.misc.comb(n_k, 2, exact=1) for n_k in
contingency.sum(axis=0))
sum_comb = sum((scipy.misc.comb(n_ij, 2, exact=1) for n_ij in
contingency.flatten()))
prod_comb = (sum_comb_c * sum_comb_k)/float(scipy.misc.comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c)/2.
return ((sum_comb - prod_comb)/(mean_comb - prod_comb))
def ari(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1):
'''Adjusted Rand Index (ARI) for frame clustering segmentation evaluation.
:usage:
>>> (ref_intervals,
ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
ref_labels,
t_min=0)
>>> (est_intervals,
est_labels) = mir_eval.util.adjust_intervals(est_intervals,
est_labels,
t_min=0,
t_max=ref_intervals.max())
>>> ari_score = mir_eval.structure.ari(ref_intervals, ref_labels,
est_intervals, est_labels)
:parameters:
- reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- frame_size : float > 0
length (in seconds) of frames for clustering
:returns:
- ari_score : float > 0
Adjusted Rand index between segmentations.
:raises:
- ValueError
Thrown when the provided annotations are not valid.
:references:
.. [#] W. M. Rand. Objective criteria for the evaluation of clustering
methods. Journal of the American Statistical association,
66(336):846-850, 1971.
.. note::
It is assumed that ``intervals[-1]`` == length of song
.. note::
Segment intervals will be rounded down to the nearest multiple
of frame_size.
'''
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
return _adjusted_rand_index(y_ref, y_est)
def _mutual_info_score(reference_indices, estimated_indices, contingency=None):
'''
Compute the mutual information between two sequence labelings.
:parameters:
- reference_indices : np.ndarray
Array of reference indices
- estimated_indices : np.ndarray
Array of estimated indices
- contingency : np.ndarray
Pre-computed contingency matrix. If None, one will be computed.
:returns:
- mi : float
Mutual information
.. note:: Based on sklearn.metrics.cluster.mutual_info_score
'''
if contingency is None:
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + np.log(pi.sum()) + np.log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - np.log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def _entropy(labels):
'''
Calculates the entropy for a labeling.
:parameters:
- labels : list-like
List of labels.
:returns:
- entropy : float
Entropy of the labeling.
.. note:: Based on sklearn.metrics.cluster.entropy
'''
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = np.bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - np.log(pi_sum)))
def _adjusted_mutual_info_score(reference_indices, estimated_indices):
'''
Compute the mutual information between two sequence labelings, adjusted for
chance.
:parameters:
- reference_indices : np.ndarray
Array of reference indices
- estimated_indices : np.ndarray
Array of estimated indices
:returns:
- ami : float <= 1.0
Mutual information
.. note:: Based on sklearn.metrics.cluster.adjusted_mutual_info_score
and sklearn.metrics.cluster.expected_mutual_info_score
'''
n_samples = len(reference_indices)
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1
or ref_classes.shape[0] == est_classes.shape[0] == 0):
return 1.0
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
# Calculate the MI for the two clusterings
mi = _mutual_info_score(reference_indices, estimated_indices,
contingency=contingency)
# The following code is based on
# sklearn.metrics.cluster.expected_mutual_information
R, C = contingency.shape
N = float(n_samples)
a = np.sum(contingency, axis=1).astype(np.int32)
b = np.sum(contingency, axis=0).astype(np.int32)
# There are three major terms to the EMI equation, which are multiplied to
# and then summed over varying nij values.
# While nijs[0] will never be used, having it simplifies the indexing.
nijs = np.arange(0, max(np.max(a), np.max(b)) + 1, dtype='float')
# Stops divide by zero warnings. As its not used, no issue.
nijs[0] = 1
# term1 is nij / N
term1 = nijs / N
# term2 is log((N*nij) / (a * b)) == log(N * nij) - log(a * b)
# term2 uses the outer product
log_ab_outer = np.log(np.outer(a, b))
# term2 uses N * nij
log_Nnij = np.log(N * nijs)
# term3 is large, and involved many factorials. Calculate these in log
# space to stop overflows.
gln_a = scipy.special.gammaln(a + 1)
gln_b = scipy.special.gammaln(b + 1)
gln_Na = scipy.special.gammaln(N - a + 1)
gln_Nb = scipy.special.gammaln(N - b + 1)
gln_N = scipy.special.gammaln(N + 1)
gln_nij = scipy.special.gammaln(nijs + 1)
# start and end values for nij terms for each summation.
start = np.array([[v - N + w for w in b] for v in a], dtype='int')
start = np.maximum(start, 1)
end = np.minimum(np.resize(a, (C, R)).T, np.resize(b, (R, C))) + 1
# emi itself is a summation over the various values.
emi = 0
for i in range(R):
for j in range(C):
for nij in range(start[i, j], end[i, j]):
term2 = log_Nnij[nij] - log_ab_outer[i, j]
# Numerators are positive, denominators are negative.
gln = (gln_a[i] + gln_b[j] + gln_Na[i] + gln_Nb[j]
- gln_N - gln_nij[nij]
- scipy.special.gammaln(a[i] - nij + 1)
- scipy.special.gammaln(b[j] - nij + 1)
- scipy.special.gammaln(N - a[i] - b[j] + nij + 1))
term3 = np.exp(gln)
emi += (term1[nij] * term2 * term3)
# Calculate entropy for each labeling
h_true, h_pred = _entropy(reference_indices), _entropy(estimated_indices)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def _normalized_mutual_info_score(reference_indices, estimated_indices):
'''
Compute the mutual information between two sequence labelings, adjusted for
chance.
:parameters:
- reference_indices : np.ndarray
Array of reference indices
- estimated_indices : np.ndarray
Array of estimated indices
:returns:
- nmi : float <= 1.0
Normalized mutual information
.. note:: Based on sklearn.metrics.cluster.normalized_mutual_info_score
'''
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1
or ref_classes.shape[0] == est_classes.shape[0] == 0):
return 1.0
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = _mutual_info_score(reference_indices, estimated_indices,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = _entropy(reference_indices), _entropy(estimated_indices)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def mutual_information(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1):
'''Frame-clustering segmentation: mutual information metrics.
:usage:
>>> (ref_intervals,
ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
ref_labels,
t_min=0)
>>> (est_intervals,
est_labels) = mir_eval.util.adjust_intervals(est_intervals,
est_labels,
t_min=0,
t_max=ref_intervals.max())
>>> mi, ami, nmi = mir_eval.structure.mutual_information(ref_intervals,
ref_labels,
est_intervals,
est_labels)
:parameters:
- reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- frame_size : float > 0
length (in seconds) of frames for clustering
:returns:
- MI : float > 0
Mutual information between segmentations
- AMI : float
Adjusted mutual information between segmentations.
- NMI : float > 0
Normalize mutual information between segmentations
:raises:
- ValueError
Thrown when the provided annotations are not valid.
.. note::
It is assumed that `intervals[-1] == length of song`
.. note::
Segment intervals will be rounded down to the nearest multiple
of frame_size.
'''
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Mutual information
mutual_info = _mutual_info_score(y_ref, y_est)
# Adjusted mutual information
adj_mutual_info = _adjusted_mutual_info_score(y_ref, y_est)
# Normalized mutual information
norm_mutual_info = _normalized_mutual_info_score(y_ref, y_est)
return mutual_info, adj_mutual_info, norm_mutual_info
def nce(reference_intervals, reference_labels, estimated_intervals,
estimated_labels, frame_size=0.1, beta=1.0):
'''Frame-clustering segmentation: normalized conditional entropy
Computes cross-entropy of cluster assignment, normalized by the
max-entropy.
:usage:
>>> (ref_intervals,
ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
ref_labels,
t_min=0)
>>> (est_intervals,
est_labels) = mir_eval.util.adjust_intervals(est_intervals,
est_labels,
t_min=0,
t_max=ref_intervals.max())
>>> S_over, S_under, S_F = mir_eval.structure.nce(ref_intervals,
ref_labels,
est_intervals,
est_labels)
:parameters:
- reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- frame_size : float > 0
length (in seconds) of frames for clustering
- beta : float > 0
beta for F-measure
:returns:
- S_over
Over-clustering score:
``1 - H(y_est | y_ref) / log(|y_est|)``
If `|y_est|==1`, then `S_over` will be 0.
- S_under
Under-clustering score:
``1 - H(y_ref | y_est) / log(|y_ref|)``
If `|y_ref|==1`, then `S_under` will be 0.
- S_F
F-measure for (S_over, S_under)
:raises:
- ValueError
Thrown when the provided annotations are not valid.
:references:
.. [#] Hanna M. Lukashevich. "Towards Quantitative Measures of
Evaluating Song Segmentation," in Proceedings of the 9th
International Society for Music Information Retrieval Conference,
2007, pp. 375-380.
'''
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Make the contingency table: shape = (n_ref, n_est)
contingency = _contingency_matrix(y_ref, y_est).astype(float)
# Normalize by the number of frames
contingency = contingency / len(y_ref)
# Compute the marginals
p_est = contingency.sum(axis=0)
p_ref = contingency.sum(axis=1)
# H(true | prediction) = sum_j P[estimated = j] *
# sum_i P[true = i | estimated = j] log P[true = i | estimated = j]
# entropy sums over axis=0, which is true labels
true_given_est = p_est.dot(scipy.stats.entropy(contingency, base=2))
pred_given_ref = p_ref.dot(scipy.stats.entropy(contingency.T, base=2))
score_under = 0.0
if contingency.shape[0] > 1:
score_under = 1. - true_given_est / np.log2(contingency.shape[0])
score_over = 0.0
if contingency.shape[1] > 1:
score_over = 1. - pred_given_ref / np.log2(contingency.shape[1])
f_measure = util.f_measure(score_over, score_under, beta=beta)
return score_over, score_under, f_measure
def evaluate(ref_intervals, ref_labels, est_intervals, est_labels, **kwargs):
'''
Compute all metrics for the given reference and estimated annotations.
:usage:
>>> (ref_intervals,
ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> scores = mir_eval.segment.evaluate(ref_intervals, ref_labels,
est_intervals, est_labels)
:parameters:
- ref_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- ref_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- est_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- est_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
:returns:
- scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
:raises:
- ValueError
Thrown when the provided annotations are not valid.
'''
# Adjust timespan of estimations relative to ground truth
ref_intervals, ref_labels = \
util.adjust_intervals(ref_intervals, labels=ref_labels, t_min=0.0)
est_intervals, est_labels = \
util.adjust_intervals(est_intervals, labels=est_labels, t_min=0.0,
t_max=ref_intervals[-1, -1])
# Now compute all the metrics
scores = collections.OrderedDict()
# Boundary detection
# Force these values for window
kwargs['window'] = .5
scores['Precision@0.5'], scores['Recall@0.5'], scores['F-measure@0.5'] = \
util.filter_kwargs(detection, ref_intervals, est_intervals, **kwargs)
kwargs['window'] = 3.0
scores['Precision@3.0'], scores['Recall@3.0'], scores['F-measure@3.0'] = \
util.filter_kwargs(detection, ref_intervals, est_intervals, **kwargs)
# Boundary deviation
scores['Ref-to-est deviation'], scores['Est-to-ref deviation'] = \
util.filter_kwargs(deviation, ref_intervals, est_intervals, **kwargs)
# Pairwise clustering
(scores['Pairwise Precision'],
scores['Pairwise Recall'],
scores['Pairwise F-measure']) = util.filter_kwargs(pairwise,
ref_intervals,
ref_labels,
est_intervals,
est_labels, **kwargs)
# Rand index
scores['Rand Index'] = util.filter_kwargs(rand_index, ref_intervals,
ref_labels, est_intervals,
est_labels, **kwargs)
# Adjusted rand index
scores['Adjusted Rand Index'] = util.filter_kwargs(ari, ref_intervals,
ref_labels,
est_intervals,
est_labels, **kwargs)
# Mutual information metrics
(scores['Mutual Information'],
scores['Adjusted Mutual Information'],
scores['Normalized Mutual Information']) = \
util.filter_kwargs(mutual_information, ref_intervals, ref_labels,
est_intervals, est_labels, **kwargs)
# Conditional entropy metrics
scores['NCE Over'], scores['NCE Under'], scores['NCE F-measure'] = \
util.filter_kwargs(nce, ref_intervals, ref_labels, est_intervals,
est_labels, **kwargs)
return scores
def hmeasure(ref_tree, est_tree, transitive=True, window=10, res=0.1,
beta=1.0):
'''
Computes the h-measure for the hierarchical segment annotations.
:parameters:
- ref_tree : tree.SegmentTree
reference hierarchical tree.
- est_tree : tree.SegmentTree
estimated hierarchical tree.
- transitive : bool
whether to compute the h-measures using transitivity or not.
- window : int
size of the window (in frames) to compute the h-measures.
- res : float > 0
frame rate in seconds.
- beta : float > 0
beta parameter for the F-measure.
:returns:
- h_over
Hierarchical oversegmentation score.
- h_under
Hierarchical undersegmentation score.
- h_measure
F-measure for (h_over, h_under)
'''
def round_time(t, res=0.1):
v = int(t / float(res)) * res
return v
def lca_matrix(tree, res):
'''
Input: a segment tree
Output: an n-by-n integer matrix indicating the height of the least
common ancestor of each pair of frames (i, j).
'''
# Figure out how many frames we need
n = int((round_time(tree.root.segment.end, res=res) -
round_time(tree.root.segment.start, res=res)) / res)
# Build a mapping of level->height
height_map = {}
# Initialize the LCA matrix
H = np.zeros( (n, n), dtype=np.uint8)
# Breadth-first traversal of the tree
queue = [tree.root]
while queue:
node = queue.pop(0)
# Get the node's level
if node.parent is not None:
height_map[node] = 1 + height_map[node.parent]
else:
height_map[node] = 0
s = int(round_time(node.segment.start, res=res) / res)
t = int(round_time(node.segment.end, res=res) / res)
H[s:t, s:t] = height_map[node]
queue.extend(node.children)
return H
def tree_gauc(H_ref, H_est, transitive, window, res):
# First, build the LCA matrices
# Make sure we have the right number of frames
assert H_ref.shape == H_est.shape
# How many frames?
n = H_ref.shape[0]
# By default, the window covers the entire track
if window is None:
window = n
# Initialize the score
score = 0.0
# Iterate over query frames
n_f = 0
for q in range(n):
# Find all pairs i,j such that H_ref[q, i] > H_ref[q, j]
R = H_ref[q, max(0, q-window):min(n, q+window)]
# And the same for the estimation
E = H_est[q, max(0, q-window):min(n, q+window)]
if transitive:
# Transitive: count comparisons across any level
S_ref = np.greater.outer(R, R)
else:
# Non-transitive: count comparisons only across immediate levels
S_ref = np.equal.outer(R, R+1)
S_est = np.greater.outer(E, E)
# Don't count (q,q) as a result
idx = min(q, window)
S_ref[idx, :] = False
S_ref[:, idx] = False
# Compute normalization constant
Z = float(S_ref.sum())
# Add up agreement for frames
if Z > 0:
score += np.sum(np.logical_and(S_ref, S_est)) / Z
n_f += 1.0
if n_f:
return score / n_f
# Convention: 0/0 = 0
return score
H_ref = lca_matrix(ref_tree, res)
H_est = lca_matrix(est_tree, res)
h_under = tree_gauc(H_ref, H_est, transitive, window, res)
h_over = tree_gauc(H_est, H_ref, transitive, window, res)
f_measure = util.f_measure(h_over, h_under, beta=beta)
return h_over, h_under, f_measure
|
urinieto/hier_eval
|
segment_tree.py
|
Python
|
gpl-3.0
| 51,887
|
[
"Brian"
] |
90c4c800da71e731e5212ec65b373376ef791e9675c74c2da3115c9ecadc273b
|
"""
Transfer functions with more complex dependencies.
$Id: basic.py 10790 2009-11-21 17:51:33Z antolikjan $
"""
__version__='$Revision: 10790 $'
import copy
import numpy, numpy.random
from numpy import ones
import param
import topo
import topo.base.functionfamily
from topo.base.arrayutil import clip_lower,array_argmax
from topo.base.patterngenerator import PatternGenerator,Constant
from topo.base.boundingregion import BoundingBox
from topo.base.sheetcoords import SheetCoordinateSystem
from topo.transferfn.basic import TransferFn,TransferFnWithState
from topo.pattern.basic import Gaussian
# Not suitable for basic.py due to its dependence on patterns.
class PatternCombine(TransferFn):
"""
Combine the supplied pattern with one generated using a
PatternGenerator.
Useful for operations like adding noise or masking out lesioned
items or around the edges of non-rectangular shapes.
"""
generator = param.ClassSelector(PatternGenerator,
default=Constant(), doc="""
Pattern to combine with the supplied matrix.""")
operator = param.Parameter(numpy.multiply,precedence=0.98,doc="""
Binary Numeric function used to combine the two patterns.
Any binary Numeric array "ufunc" returning the same type of
array as the operands and supporting the reduce operator is
allowed here. See topo.pattern.basic.Composite.operator for
more details.
""")
def __call__(self,x):
###JABHACKALERT: Need to set it up to be independent of
#density; right now only things like random numbers work
#reasonably
rows,cols = x.shape
bb = BoundingBox(points=((0,0), (rows,cols)))
generated_pattern = self.generator(bounds=bb,xdensity=1,ydensity=1).transpose()
new_pattern = self.operator(x, generated_pattern)
x *= 0.0
x += new_pattern
# Not suitable for basic.py due to its dependence on patterns.
class KernelMax(TransferFn):
"""
Replaces the given matrix with a kernel function centered around the maximum value.
This operation is usually part of the Kohonen SOM algorithm, and
approximates a series of lateral interactions resulting in a
single activity bubble.
The radius of the kernel (i.e. the surround) is specified by the
parameter 'radius', which should be set before using __call__.
The shape of the surround is determined by the
neighborhood_kernel_generator, and can be any PatternGenerator
instance, or any function accepting bounds, density, radius, and
height to return a kernel matrix.
"""
kernel_radius = param.Number(default=0.0,bounds=(0,None),doc="""
Kernel radius in Sheet coordinates.""")
neighborhood_kernel_generator = param.ClassSelector(PatternGenerator,
default=Gaussian(x=0.0,y=0.0,aspect_ratio=1.0),
doc="Neighborhood function")
crop_radius_multiplier = param.Number(default=3.0,doc="""
Factor by which the radius should be multiplied, when deciding
how far from the winner to keep evaluating the kernel.""")
density=param.Number(1.0,bounds=(0,None),doc="""
Density of the Sheet whose matrix we act on, for use
in converting from matrix to Sheet coordinates.""")
def __call__(self,x):
rows,cols = x.shape
radius = self.density*self.kernel_radius
crop_radius = int(max(1.25,radius*self.crop_radius_multiplier))
# find out the matrix coordinates of the winner
wr,wc = array_argmax(x)
# convert to sheet coordinates
wy = rows-wr-1
# Optimization: Calculate the bounding box around the winner
# in which weights will be changed
cmin = max(wc-crop_radius, 0)
cmax = min(wc+crop_radius+1,cols)
rmin = max(wr-crop_radius, 0)
rmax = min(wr+crop_radius+1,rows)
ymin = max(wy-crop_radius, 0)
ymax = min(wy+crop_radius+1,rows)
bb = BoundingBox(points=((cmin,ymin), (cmax,ymax)))
# generate the kernel matrix and insert it into the correct
# part of the output array
kernel = self.neighborhood_kernel_generator(bounds=bb,xdensity=1,ydensity=1,
size=2*radius,x=wc+0.5,y=wy+0.5)
x *= 0.0
x[rmin:rmax,cmin:cmax] = kernel
class HalfRectify(TransferFn):
"""
Transfer function that applies a half-wave rectification (clips at zero)
"""
t_init = param.Number(default=0.0,doc="""The initial value of threshold at which output becomes non-zero..""")
gain = param.Number(default=1.0,doc="""The neuronal gain""")
randomized_init = param.Boolean(False,doc="""
Whether to randomize the initial t parameter.""")
noise_magnitude = param.Number(default=0.1,doc="""
The magnitude of the additive noise to apply to the t_init
parameter at initialization.""")
def __init__(self,**params):
super(TransferFn,self).__init__(**params)
self.first_call = True
def __call__(self,x):
if self.first_call:
self.first_call = False
if self.randomized_init:
self.t = ones(x.shape, x.dtype.char) * self.t_init + \
(topo.pattern.random.UniformRandom()(xdensity=x.shape[0],ydensity=x.shape[1])-0.5)*self.noise_magnitude*2
else:
self.t = ones(x.shape, x.dtype.char) * self.t_init
x -= self.t
clip_lower(x,0)
x *= self.gain
class HomeostaticResponse(TransferFnWithState):
"""
Adapts the parameters of a linear threshold function to maintain a
constant desired average activity.
"""
target_activity = param.Number(default=0.024,doc="""
The target average activity.""")
linear_slope = param.Number(default=1.0,doc="""
Slope of the linear portion above threshold.""")
t_init = param.Number(default=0.15,doc="""
Initial value of the threshold.""")
learning_rate = param.Number(default=0.001,doc="""
Learning rate for homeostatic plasticity.""")
smoothing = param.Number(default=0.999,doc="""
Weighting of previous activity vs. current activity when
calculating the average.""")
randomized_init = param.Boolean(False,doc="""
Whether to randomize the initial t parameter.""")
noise_magnitude = param.Number(default=0.1,doc="""
The magnitude of the additive noise to apply to the t_init
parameter at initialization.""")
def __init__(self,**params):
super(HomeostaticResponse,self).__init__(**params)
self.first_call = True
self.__current_state_stack=[]
self.t=None # To allow state_push at init
self.y_avg=None # To allow state_push at init
def __call__(self,x):
if self.first_call:
self.first_call = False
if self.randomized_init:
# CEBALERT: UniformRandom's seed should be available
# as a parameter.
self.t = ones(x.shape, x.dtype.char) * self.t_init + \
(topo.pattern.random.UniformRandom() \
(xdensity=x.shape[0],ydensity=x.shape[1]) \
-0.5)*self.noise_magnitude*2
else:
self.t = ones(x.shape, x.dtype.char) * self.t_init
self.y_avg = ones(x.shape, x.dtype.char) * self.target_activity
x_orig = copy.copy(x)
x -= self.t
clip_lower(x,0)
x *= self.linear_slope
# CEBALERT: this line is at best confusing; needs to be
# commented or simplified!
if self.plastic & (float(topo.sim.time()) % 1.0 >= 0.54):
self.y_avg = (1.0-self.smoothing)*x + self.smoothing*self.y_avg
self.t += self.learning_rate * (self.y_avg - self.target_activity)
def state_push(self):
self.__current_state_stack.append((copy.copy(self.t),
copy.copy(self.y_avg),
copy.copy(self.first_call)))
super(HomeostaticResponse, self).state_push()
def state_pop(self):
self.t, self.y_avg, self.first_call = self.__current_state_stack.pop()
super(HomeostaticResponse, self).state_pop()
class AttributeTrackingTF(TransferFnWithState):
"""
Keeps track of attributes of a specified Parameterized over time, for analysis or plotting.
Useful objects to track include sheets (e.g. "topo.sim['V1']"),
projections ("topo.sim['V1'].projections['LateralInhibitory']"),
or an output_function.
Any attribute whose value is a matrix the same size as the
activity matrix can be tracked. Only specified units within this
matrix will be tracked.
If no object is specified, this function will keep track of the
incoming activity over time.
The results are stored in a dictionary named 'values', as (time,
value) pairs indexed by the parameter name and unit. For
instance, if the value of attribute 'x' is v for unit (0.0,0.0)
at time t, values['x'][(0.0,0.0)]=(t,v).
Updating of the tracked values can be disabled temporarily using
the plastic parameter.
"""
# ALERT: Need to make this read-only, because it can't be changed
# after instantiation unless _object is also changed. Or else
# need to make _object update whenever object is changed and
# _object has already been set.
object = param.Parameter(default=None, doc="""
Parameterized instance whose parameters will be tracked.
If this parameter's value is a string, it will be evaluated first
(by calling Python's eval() function). This feature is designed to
allow circular references, so that the OF can track the object that
owns it, without causing problems for recursive traversal (as for
script_repr()).""")
# There may be some way to achieve the above without using eval(), which would be better.
#JLALERT When using this function snapshots cannot be saved because of problem with eval()
attrib_names = param.List(default=[], doc="""
List of names of the function object's parameters that should be stored.""")
units = param.List(default=[(0.0,0.0)], doc="""
Sheet coordinates of the unit(s) for which parameter values will be stored.""")
step = param.Number(default=1, doc="""
How often to update the tracked values.
For instance, step=1 means to update them every time this OF is
called; step=2 means to update them every other time.""")
coordframe = param.Parameter(default=None,doc="""
The SheetCoordinateSystem to use to convert the position
into matrix coordinates. If this parameter's value is a string,
it will be evaluated first(by calling Python's eval() function).
This feature is designed to allow circular references,
so that the OF can track the object that
owns it, without causing problems for recursive traversal (as for
script_repr()).""")
def __init__(self,**params):
super(AttributeTrackingTF,self).__init__(**params)
self.values={}
self.n_step = 0
self._object=None
self._coordframe=None
for p in self.attrib_names:
self.values[p]={}
for u in self.units:
self.values[p][u]=[]
def __call__(self,x):
if self._object==None:
if isinstance(self.object,str):
self._object=eval(self.object)
else:
self._object=self.object
if self._coordframe == None:
if isinstance(self.coordframe,str) and isinstance(self._object,SheetCoordinateSystem):
raise ValueError(str(self._object)+"is already a coordframe, no need to specify coordframe")
elif isinstance(self._object,SheetCoordinateSystem):
self._coordframe=self._object
elif isinstance(self.coordframe,str):
self._coordframe=eval(self.coordframe)
else:
raise ValueError("A coordinate frame (e.g. coordframe=topo.sim['V1']) must be specified in order to track"+str(self._object))
#collect values on each appropriate step
self.n_step += 1
if self.n_step == self.step:
self.n_step = 0
if self.plastic:
for p in self.attrib_names:
if p=="x":
value_matrix=x
else:
value_matrix= getattr(self._object, p)
for u in self.units:
mat_u=self._coordframe.sheet2matrixidx(u[0],u[1])
self.values[p][u].append((topo.sim.time(),value_matrix[mat_u]))
__all__ = list(set([k for k,v in locals().items() if isinstance(v,type) and issubclass(v,TransferFn)]))
|
jesuscript/topo-mpi
|
topo/transferfn/misc.py
|
Python
|
bsd-3-clause
| 13,244
|
[
"Gaussian"
] |
fbf59cd43595673f14161cd47c46e9b05878710ad445a80bca460462d4ca8f3b
|
#!/usr/bin/env python
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""
Simple DCGAN implementation for generating MNIST images.
"""
import os
from datetime import datetime
from neon.callbacks.callbacks import Callbacks, GANCostCallback
from neon.callbacks.plotting_callbacks import GANPlotCallback
from neon.data.image import MNIST
from neon.initializers import Gaussian
from neon.layers import GeneralizedGANCost, Sequential, Conv, Deconv
from neon.layers.container import GenerativeAdversarial
from neon.models.model import GAN
from neon.optimizers import Adam
from neon.transforms import Rectlin, Logistic, GANCost
from neon.util.argparser import NeonArgparser
from neon.util.persist import ensure_dirs_exist
# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--kbatch', type=int, default=1,
help='number of data batches per noise batch in training')
parser.add_argument('--subset_pct', type=float, default=100,
help='subset percentage of training dataset to use')
args = parser.parse_args()
# load up the mnist data set
dataset = MNIST(path=args.data_dir, subset_pct=args.subset_pct, size=27)
train_set = dataset.train_iter
valid_set = dataset.valid_iter
# setup weight initialization function
init = Gaussian(scale=0.05)
# generator using "decovolution" layers
relu = Rectlin(slope=0) # relu for generator
conv = dict(init=init, batch_norm=True, activation=relu)
convp1 = dict(init=init, batch_norm=True, activation=relu, padding=1)
convp2 = dict(init=init, batch_norm=True, activation=relu, padding=2)
convp1s2 = dict(init=init, batch_norm=True, activation=relu, padding=1, strides=2)
G_layers = [Deconv((1, 1, 16), name="G11", **conv),
Deconv((3, 3, 192), name="G12", **convp1),
Deconv((3, 3, 192), name="G21", **convp1s2),
Deconv((3, 3, 192), name="G22", **convp1),
Deconv((3, 3, 96), name="G31", **convp1s2),
Deconv((3, 3, 96), name="G32", **conv),
Deconv((3, 3, 1), name="G_out",
init=init, batch_norm=False, padding=1,
activation=Logistic(shortcut=False))]
# discriminiator using convolution layers
lrelu = Rectlin(slope=0.1) # leaky relu for discriminator
conv = dict(init=init, batch_norm=True, activation=lrelu)
convp1 = dict(init=init, batch_norm=True, activation=lrelu, padding=1)
convp1s2 = dict(init=init, batch_norm=True, activation=lrelu, padding=1, strides=2)
D_layers = [Conv((3, 3, 96), name="D11", **convp1),
Conv((3, 3, 96), name="D12", **convp1s2),
Conv((3, 3, 192), name="D21", **convp1),
Conv((3, 3, 192), name="D22", **convp1s2),
Conv((3, 3, 192), name="D31", **convp1),
Conv((1, 1, 16), name="D32", **conv),
Conv((7, 7, 1), name="D_out",
init=init, batch_norm=False,
activation=Logistic(shortcut=False))]
layers = GenerativeAdversarial(generator=Sequential(G_layers, name="Generator"),
discriminator=Sequential(D_layers, name="Discriminator"))
# setup cost function as CrossEntropy
cost = GeneralizedGANCost(costfunc=GANCost(func="modified"))
# setup optimizer
optimizer = Adam(learning_rate=0.0005, beta_1=0.5)
# initialize model
noise_dim = (2, 7, 7)
gan = GAN(layers=layers, noise_dim=noise_dim, k=args.kbatch)
# configure callbacks
callbacks = Callbacks(gan, eval_set=valid_set, **args.callback_args)
fdir = ensure_dirs_exist(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'results/'))
fname = os.path.splitext(os.path.basename(__file__))[0] +\
'_[' + datetime.now().strftime('%Y-%m-%d-%H-%M-%S') + ']'
im_args = dict(filename=os.path.join(fdir, fname), hw=27,
num_samples=args.batch_size, nchan=1, sym_range=True)
callbacks.add_callback(GANPlotCallback(**im_args))
callbacks.add_callback(GANCostCallback())
# run fit
gan.fit(train_set, optimizer=optimizer,
num_epochs=args.epochs, cost=cost, callbacks=callbacks)
|
NervanaSystems/neon
|
examples/gan/mnist_dcgan.py
|
Python
|
apache-2.0
| 4,727
|
[
"Gaussian"
] |
28c4299ba749dad2f4c5b7a03919d7f9e2dcf3d4bb7539af0b5b3210a7e89e60
|
import nose
from pandas import DataFrame
import numpy as np
import pandas.util.testing as tm
from pandas.io.json import json_normalize, nested_to_record
def _assert_equal_data(left, right):
if not left.columns.equals(right.columns):
left = left.reindex(columns=right.columns)
tm.assert_frame_equal(left, right)
class TestJSONNormalize(tm.TestCase):
def setUp(self):
self.state_data = [
{'counties': [{'name': 'Dade', 'population': 12345},
{'name': 'Broward', 'population': 40000},
{'name': 'Palm Beach', 'population': 60000}],
'info': {'governor': 'Rick Scott'},
'shortname': 'FL',
'state': 'Florida'},
{'counties': [{'name': 'Summit', 'population': 1234},
{'name': 'Cuyahoga', 'population': 1337}],
'info': {'governor': 'John Kasich'},
'shortname': 'OH',
'state': 'Ohio'}]
def test_simple_records(self):
recs = [{'a': 1, 'b': 2, 'c': 3},
{'a': 4, 'b': 5, 'c': 6},
{'a': 7, 'b': 8, 'c': 9},
{'a': 10, 'b': 11, 'c': 12}]
result = json_normalize(recs)
expected = DataFrame(recs)
tm.assert_frame_equal(result, expected)
def test_simple_normalize(self):
result = json_normalize(self.state_data[0], 'counties')
expected = DataFrame(self.state_data[0]['counties'])
tm.assert_frame_equal(result, expected)
result = json_normalize(self.state_data, 'counties')
expected = []
for rec in self.state_data:
expected.extend(rec['counties'])
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
result = json_normalize(self.state_data, 'counties', meta='state')
expected['state'] = np.array(['Florida', 'Ohio']).repeat([3, 2])
tm.assert_frame_equal(result, expected)
def test_more_deeply_nested(self):
data = [{'country': 'USA',
'states': [{'name': 'California',
'cities': [{'name': 'San Francisco',
'pop': 12345},
{'name': 'Los Angeles',
'pop': 12346}]
},
{'name': 'Ohio',
'cities': [{'name': 'Columbus',
'pop': 1234},
{'name': 'Cleveland',
'pop': 1236}]}
]
},
{'country': 'Germany',
'states': [{'name': 'Bayern',
'cities': [{'name': 'Munich', 'pop': 12347}]
},
{'name': 'Nordrhein-Westfalen',
'cities': [{'name': 'Duesseldorf', 'pop': 1238},
{'name': 'Koeln', 'pop': 1239}]}
]
}
]
result = json_normalize(data, ['states', 'cities'],
meta=['country', ['states', 'name']])
# meta_prefix={'states': 'state_'})
ex_data = {'country': ['USA'] * 4 + ['Germany'] * 3,
'states.name': ['California', 'California', 'Ohio', 'Ohio',
'Bayern', 'Nordrhein-Westfalen',
'Nordrhein-Westfalen'],
'name': ['San Francisco', 'Los Angeles', 'Columbus',
'Cleveland', 'Munich', 'Duesseldorf', 'Koeln'],
'pop': [12345, 12346, 1234, 1236, 12347, 1238, 1239]}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_shallow_nested(self):
data = [{'state': 'Florida',
'shortname': 'FL',
'info': {
'governor': 'Rick Scott'
},
'counties': [{'name': 'Dade', 'population': 12345},
{'name': 'Broward', 'population': 40000},
{'name': 'Palm Beach', 'population': 60000}]},
{'state': 'Ohio',
'shortname': 'OH',
'info': {
'governor': 'John Kasich'
},
'counties': [{'name': 'Summit', 'population': 1234},
{'name': 'Cuyahoga', 'population': 1337}]}]
result = json_normalize(data, 'counties',
['state', 'shortname',
['info', 'governor']])
ex_data = {'name': ['Dade', 'Broward', 'Palm Beach', 'Summit',
'Cuyahoga'],
'state': ['Florida'] * 3 + ['Ohio'] * 2,
'shortname': ['FL', 'FL', 'FL', 'OH', 'OH'],
'info.governor': ['Rick Scott'] * 3 + ['John Kasich'] * 2,
'population': [12345, 40000, 60000, 1234, 1337]}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_meta_name_conflict(self):
data = [{'foo': 'hello',
'bar': 'there',
'data': [{'foo': 'something', 'bar': 'else'},
{'foo': 'something2', 'bar': 'else2'}]}]
self.assertRaises(ValueError, json_normalize, data,
'data', meta=['foo', 'bar'])
result = json_normalize(data, 'data', meta=['foo', 'bar'],
meta_prefix='meta')
for val in ['metafoo', 'metabar', 'foo', 'bar']:
self.assertTrue(val in result)
def test_record_prefix(self):
result = json_normalize(self.state_data[0], 'counties')
expected = DataFrame(self.state_data[0]['counties'])
tm.assert_frame_equal(result, expected)
result = json_normalize(self.state_data, 'counties',
meta='state',
record_prefix='county_')
expected = []
for rec in self.state_data:
expected.extend(rec['counties'])
expected = DataFrame(expected)
expected = expected.rename(columns=lambda x: 'county_' + x)
expected['state'] = np.array(['Florida', 'Ohio']).repeat([3, 2])
tm.assert_frame_equal(result, expected)
class TestNestedToRecord(tm.TestCase):
def test_flat_stays_flat(self):
recs = [dict(flat1=1, flat2=2),
dict(flat1=3, flat2=4),
]
result = nested_to_record(recs)
expected = recs
self.assertEqual(result, expected)
def test_one_level_deep_flattens(self):
data = dict(flat1=1,
dict1=dict(c=1, d=2))
result = nested_to_record(data)
expected = {'dict1.c': 1,
'dict1.d': 2,
'flat1': 1}
self.assertEqual(result, expected)
def test_nested_flattens(self):
data = dict(flat1=1,
dict1=dict(c=1, d=2),
nested=dict(e=dict(c=1, d=2),
d=2))
result = nested_to_record(data)
expected = {'dict1.c': 1,
'dict1.d': 2,
'flat1': 1,
'nested.d': 2,
'nested.e.c': 1,
'nested.e.d': 2}
self.assertEqual(result, expected)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb',
'--pdb-failure', '-s'], exit=False)
|
pjryan126/solid-start-careers
|
store/api/zillow/venv/lib/python2.7/site-packages/pandas/io/tests/test_json_norm.py
|
Python
|
gpl-2.0
| 7,841
|
[
"COLUMBUS"
] |
2ddf68a871536293983ea3a7707d20cb7979880952a36e8d6b77148f4e5646df
|
# Copyright 1999-2000 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
This module provides code to work with PubMed from the NCBI.
http://www.ncbi.nlm.nih.gov/PubMed/
Online documentation for linking to PubMed is available at:
http://www.ncbi.nlm.nih.gov/PubMed/linking.html
Classes:
Dictionary Access PubMed articles using a dictionary interface.
Functions:
search_for Search PubMed.
find_related Find related articles in PubMed.
download_many Download many articles from PubMed in batch mode.
"""
import string
import re
import sgmllib
from Bio import File
from Bio.WWW import RequestLimiter
from Bio.WWW import NCBI
from Bio import Medline
class Dictionary:
"""Access PubMed using a read-only dictionary interface.
Methods:
"""
def __init__(self, delay=5.0, parser=None):
"""Dictionary(delay=5.0, parser=None)
Create a new Dictionary to access PubMed. parser is an optional
parser (e.g. Medline.RecordParser) object to change the results
into another form. If set to None, then the raw contents of the
file will be returned. delay is the number of seconds to wait
between each query.
"""
self.parser = parser
self.limiter = RequestLimiter(delay)
def __len__(self):
raise NotImplementedError, "PubMed contains lots of entries"
def clear(self):
raise NotImplementedError, "This is a read-only dictionary"
def __setitem__(self, key, item):
raise NotImplementedError, "This is a read-only dictionary"
def update(self):
raise NotImplementedError, "This is a read-only dictionary"
def copy(self):
raise NotImplementedError, "You don't need to do this..."
def keys(self):
raise NotImplementedError, "You don't really want to do this..."
def items(self):
raise NotImplementedError, "You don't really want to do this..."
def values(self):
raise NotImplementedError, "You don't really want to do this..."
def has_key(self, id):
"""S.has_key(id) -> bool"""
try:
self[id]
except KeyError:
return 0
return 1
def get(self, id, failobj=None):
try:
return self[id]
except KeyError:
return failobj
raise "How did I get here?"
def __getitem__(self, id):
"""S.__getitem__(id) -> object
Return the Medline entry. id is either the Medline Unique ID
or the Pubmed ID of the article. Raises a KeyError if there's an
error.
"""
# First, check to see if enough time has passed since my
# last query.
self.limiter.wait()
try:
handle = NCBI.efetch(
db="pubmed", id=id, retmode='text', rettype='medlars')
except IOError, x:
# raise a KeyError instead of an IOError
# XXX I really should distinguish between a real IOError and
# if the id is not in the database.
raise KeyError, x
if self.parser is not None:
return self.parser.parse(handle)
return handle.read()
def search_for(search, reldate=None, mindate=None, maxdate=None,
batchsize=100, delay=2, callback_fn=None,
start_id=0, max_ids=None):
"""search_for(search[, reldate][, mindate][, maxdate]
[, batchsize][, delay][, callback_fn][, start_id][, max_ids]) -> ids
Search PubMed and return a list of the PMID's that match the
criteria. search is the search string used to search the
database. reldate is the number of dates prior to the current
date to restrict the search. mindate and maxdate are the dates to
restrict the search, e.g. 2002/01/01. batchsize specifies the
number of ids to return at one time. By default, it is set to
10000, the maximum. delay is the number of seconds to wait
between queries (default 2). callback_fn is an optional callback
function that will be called as passed a PMID as results are
retrieved. start_id specifies the index of the first id to
retrieve and max_ids specifies the maximum number of id's to
retrieve.
XXX The date parameters don't seem to be working with NCBI's
script. Please let me know if you can get it to work.
"""
class ResultParser(sgmllib.SGMLParser):
# Parse the ID's out of the XML-formatted page that PubMed
# returns. The format of the page is:
# [...]
# <Id>...</Id>
# [...]
def __init__(self):
sgmllib.SGMLParser.__init__(self)
self.ids = []
self.in_id = 0
def start_id(self, attributes):
self.in_id = 1
def end_id(self):
self.in_id = 0
_not_pmid_re = re.compile(r'\D')
def handle_data(self, data):
if not self.in_id:
return
# If data is just whitespace, then ignore it.
data = string.strip(data)
if not data:
return
# Everything here should be a PMID. Check and make sure
# data really is one. A PMID should be a string consisting
# of only integers. Should I check to make sure it
# meets a certain minimum length?
if self._not_pmid_re.search(data):
raise ValueError, \
"I expected an ID, but %s doesn't look like one." % \
repr(data)
self.ids.append(data)
params = {
'db' : 'pubmed',
'term' : search,
'reldate' : reldate,
'mindate' : mindate,
'maxdate' : maxdate
}
for k, v in params.items():
if v is None:
del params[k]
limiter = RequestLimiter(delay)
ids = []
while max_ids is None or len(ids) < max_ids:
parser = ResultParser()
# Check to make sure enough time has passed before my
# last search. If not, then wait.
limiter.wait()
start = start_id + len(ids)
max = batchsize
if max_ids is not None and max > max_ids - len(ids):
max = max_ids - len(ids)
params['retstart'] = start
params['retmax'] = max
h = NCBI.esearch(**params)
parser.feed(h.read())
ids.extend(parser.ids)
if callback_fn is not None:
# Call the callback function with each of the new ID's.
for id in parser.ids:
callback_fn(id)
if len(parser.ids) < max or not parser.ids: # no more id's to read
break
return ids
def find_related(pmid):
"""find_related(pmid) -> ids
Search PubMed for a list of citations related to pmid. pmid can
be a PubMed ID, a MEDLINE UID, or a list of those.
"""
class ResultParser(sgmllib.SGMLParser):
# Parse the ID's out of the HTML-formatted page that PubMed
# returns. The format of the page is:
# [...]
# <Link>
# <Id>######</Id>
# <Score>######</Score>
# [...]
# </Link>
# [...]
def __init__(self):
sgmllib.SGMLParser.__init__(self)
self.ids = []
self.in_link = 0
self.in_id = 0
def start_id(self, attributes):
self.in_id = 1
def end_id(self):
self.in_id = 0
def start_link(self, attributes):
self.in_link = 1
def end_link(self):
self.in_link = 0
_not_pmid_re = re.compile(r'\D')
def handle_data(self, data):
if not self.in_link or not self.in_id:
return
# Everything here should be a PMID. Check and make sure
# data really is one. A PMID should be a string consisting
# of only integers. Should I check to make sure it
# meets a certain minimum length?
if self._not_pmid_re.search(data):
raise ValueError, \
"I expected an ID, but '%s' doesn't look like one." % \
repr(data)
self.ids.append(data)
parser = ResultParser()
if type(pmid) is type([]):
pmid = string.join(pmid, ',')
h = NCBI.elink(dbfrom='pubmed', id=pmid)
parser.feed(h.read())
return parser.ids
def download_many(ids, callback_fn, broken_fn=None, delay=120.0, faildelay=5.0,
batchsize=500, parser=None):
"""download_many(ids, callback_fn[, broken_fn][, delay][, faildelay][, batchsize])
Download many records from PubMed. ids is a list of either the
Medline Unique ID or the PubMed ID's of the articles. Each time a
record is downloaded, callback_fn is called with the text of the
record. broken_fn is an optional function that is called with the
id of records that were not able to be downloaded. delay is the
number of seconds to wait between requests. batchsize is the
number of records to request each time.
"""
# parser is an undocumented parameter that allows people to
# specify an optional parser to handle each record. This is
# dangerous because the results may be malformed, and exceptions
# in the parser may disrupt the whole download process.
if batchsize > 500 or batchsize < 1:
raise ValueError, "batchsize must be between 1 and 500"
limiter = RequestLimiter(delay)
current_batchsize = batchsize
# Loop until all the ids are processed. We want to process as
# many as possible with each request. Unfortunately, errors can
# occur. Some id may be incorrect, or the server may be
# unresponsive. In addition, one broken id out of a list of id's
# can cause a non-specific error. Thus, the strategy I'm going to
# take, is to start by downloading as many as I can. If the
# request fails, I'm going to half the number of records I try to
# get. If there's only one more record, then I'll report it as
# broken and move on. If the request succeeds, I'll double the
# number of records until I get back up to the batchsize.
nsuccesses = 0
while ids:
if current_batchsize > len(ids):
current_batchsize = len(ids)
id_str = ','.join(ids[:current_batchsize])
# Make sure enough time has passed before I do another query.
if not nsuccesses:
limiter.wait(faildelay)
else:
limiter.wait()
try:
# Query PubMed. If one or more of the id's are broken,
# this will raise an IOError.
handle = NCBI.efetch(
db="pubmed", id=id_str, retmode='text', rettype='medlars')
# I'm going to check to make sure PubMed returned the same
# number of id's as I requested. If it didn't then I'm going
# to raise an exception. This could take a lot of memory if
# the batchsize is large.
results = handle.read()
num_ids = 0
for x in Medline.Iterator(File.StringHandle(results)):
num_ids = num_ids + 1
if num_ids != current_batchsize:
raise IOError
handle = File.StringHandle(results)
except IOError: # Query did not work.
if current_batchsize == 1:
# There was only 1 id in the query. Report it as
# broken and move on.
id = ids.pop(0)
if broken_fn is not None:
broken_fn(id)
else:
# I don't know which one is broken. Try again with
# fewer id's.
current_batchsize = current_batchsize / 2
nsuccesses = 0
continue
nsuccesses = nsuccesses + 1
# Iterate through the results and pass the records to the
# callback.
idnum = 0
for rec in Medline.Iterator(handle, parser):
callback_fn(ids[idnum], rec)
idnum = idnum + 1
ids = ids[current_batchsize:]
# If I'm not downloading the maximum number of articles,
# double the number for next time.
if nsuccesses >= 2 and current_batchsize < batchsize:
current_batchsize = current_batchsize * 2
if current_batchsize > batchsize:
current_batchsize = batchsize
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/PubMed.py
|
Python
|
apache-2.0
| 12,647
|
[
"Biopython"
] |
f6d86d4ff998a49dc4151b5ac1060ea75225898d2ef7dfce5344162945699058
|
#!/usr/bin/python
import sys #need sys to use system variables
import numpy as np # need numpy for arrays and the like
import Bio.SeqIO
import Bio.Seq
#Array of single letter amino acid cods for use in arrays.
IDS = np.zeros([22,1],dtype=object)
IDS[:,0]=['G', 'P', 'A', 'V', 'L', 'I', 'M', 'C', 'F', 'Y', 'W', 'H', 'K', 'R', 'Q', 'N', 'E', 'D', 'S', 'T', '-', 'other']
#make a list of data types for suggested mutation tuples wt= wild type (given) residue, res=residue number, sug= suggested mutation (consensus residue), freq= frequency of consensus residue (0-1), wtfreq= frequency of wild type (given) residue (0-1)
TYPES = [('wt', 'S1'), ('res', int), ('sug', 'S1'),('freq', float),('wtfreq', float)]
#Given an alingment as a Bio SeqRecord object,
#Returns an array of sequences with each amino acid as an element.
def aaaray(sequences, filename=None):
entries = len(sequences) #how many sequences do we have
LENGTH = len(sequences[0].seq)
#make array from file with each AA as an element
AAA = np.zeros((entries,LENGTH), dtype=np.str)
for i in range(entries):
x=str(sequences[i].seq)
for position in range(len(x)):
AAA[i,position] = (x[position])
for index in range(len(AAA[0,:])) : #make sure everything is upper case
AAA[0,index] = AAA[0,index].upper()
return AAA
#Given an alignment of sequences as arrays with each amino acid as an element
#Returns array of amino acids with any gaps in the first sequence deleted from all sequences, i.e. all trimmed to the length of the first sequence. If filename and sequences (for sequence ids, given as list of SeqRecord objects) are both given, will write fasta formatted file with names from sequences.
def trimmer(AAA, sequenceids=None, filename=None):
print('\nTrimming gaps from alignment')
entries,LENGTH=(AAA.shape)
# delete all positions that corrispond to gaps in the first sequence
for index in range(LENGTH):#at each position
if AAA[0,(LENGTH-1-index)] == "-":#LENGTH-1-index will start at the end, -1 to account for 0 based indexing, and find gaps
AAA = np.delete(AAA, (LENGTH-1-index), 1)#delete the gaps
# if filename and sequences given, format array of amino acids as list of fasta formatted sequences and save
if filename and sequenceids:
for index in range(len(AAA[:,0])): #alternate ">[GI number]" and sequences
sequenceids[index].seq = Bio.Seq.Seq(''.join(AAA[(index),:]))
Bio.SeqIO.write(sequenceids, filename, "fasta")
return AAA
#Returns an array of amino acid counts given an array of aligned sequences with each element a single position.
#If given a filename, counts array is exported as a csv.
def aacounts(AAA, filename=None):
COUNTS = np.zeros([22, len(AAA[0,:])],dtype=object) #makes array the length of the alingment with 22 rows (20AAs + "-" + "other")
for index in range(len(AAA[0,:])): # for each position along the alingment, count occourances of each AA
COUNTS[0,index]=AAA[:,index].tolist().count("G")
COUNTS[1,index]=AAA[:,index].tolist().count("P")
COUNTS[2,index]=AAA[:,index].tolist().count("A")
COUNTS[3,index]=AAA[:,index].tolist().count("V")
COUNTS[4,index]=AAA[:,index].tolist().count("L")
COUNTS[5,index]=AAA[:,index].tolist().count("I")
COUNTS[6,index]=AAA[:,index].tolist().count("M")
COUNTS[7,index]=AAA[:,index].tolist().count("C")
COUNTS[8,index]=AAA[:,index].tolist().count("F")
COUNTS[9,index]=AAA[:,index].tolist().count("Y")
COUNTS[10,index]=AAA[:,index].tolist().count("W")
COUNTS[11,index]=AAA[:,index].tolist().count("H")
COUNTS[12,index]=AAA[:,index].tolist().count("K")
COUNTS[13,index]=AAA[:,index].tolist().count("R")
COUNTS[14,index]=AAA[:,index].tolist().count("Q")
COUNTS[15,index]=AAA[:,index].tolist().count("N")
COUNTS[16,index]=AAA[:,index].tolist().count("E")
COUNTS[17,index]=AAA[:,index].tolist().count("D")
COUNTS[18,index]=AAA[:,index].tolist().count("S")
COUNTS[19,index]=AAA[:,index].tolist().count("T")
COUNTS[20,index]=AAA[:,index].tolist().count("-") #empty spaces
COUNTS[21,index]=(len(AAA[:,index]) - sum(COUNTS[:,index].tolist())) #other, not counted above
IDCOUNTS = np.hstack((IDS,COUNTS)) #make list with AA counts and names of AAs
if filename:
np.savetxt((filename),IDCOUNTS,delimiter=",",fmt="%s") #save file with AA names and counts
return COUNTS
#Returns a frequency array from an array of amino acid counts.
#Frequencies represented as a decimal.
#If given a filename, frequency array is exported as a csv.
def aafrequencies(COUNTS, filename=None):
print('\nCalculating amino acid frequencies')
FREQS = np.zeros_like(COUNTS) #make an array for calculating frequencies of each AA
FREQS = np.float64(FREQS) #it needs to be numbers
for index in range(len(FREQS[0,:])): # calculate the frequencey of each AA as [occurrences]/[occurrences of all AAs], "-" and "other" not counted in total
FREQS[0,index]=np.float64(COUNTS[0,index])/sum(np.float64(COUNTS[:20,index]))
FREQS[1,index]=np.float64(COUNTS[1,index])/sum(np.float64(COUNTS[:20,index]))
FREQS[2,index]=np.float64(COUNTS[2,index])/sum(np.float64(COUNTS[:20,index]))
FREQS[3,index]=np.float64(COUNTS[3,index])/sum(np.float64(COUNTS[:20,index]))
FREQS[4,index]=np.float64(COUNTS[4,index])/sum(np.float64(COUNTS[:20,index]))
FREQS[5,index]=np.float64(COUNTS[5,index])/sum(np.float64(COUNTS[:20,index]))
FREQS[6,index]=np.float64(COUNTS[6,index])/sum(np.float64(COUNTS[:20,index]))
FREQS[7,index]=np.float64(COUNTS[7,index])/sum(np.float64(COUNTS[:20,index]))
FREQS[8,index]=np.float64(COUNTS[8,index])/sum(np.float64(COUNTS[:20,index]))
FREQS[9,index]=np.float64(COUNTS[9,index])/sum(np.float64(COUNTS[:20,index]))
FREQS[10,index]=np.float64(COUNTS[10,index])/sum(np.float64(COUNTS[:20,index]))
FREQS[11,index]=np.float64(COUNTS[11,index])/sum(np.float64(COUNTS[:20,index]))
FREQS[12,index]=np.float64(COUNTS[12,index])/sum(np.float64(COUNTS[:20,index]))
FREQS[13,index]=np.float64(COUNTS[13,index])/sum(np.float64(COUNTS[:20,index]))
FREQS[14,index]=np.float64(COUNTS[14,index])/sum(np.float64(COUNTS[:20,index]))
FREQS[15,index]=np.float64(COUNTS[15,index])/sum(np.float64(COUNTS[:20,index]))
FREQS[16,index]=np.float64(COUNTS[16,index])/sum(np.float64(COUNTS[:20,index]))
FREQS[17,index]=np.float64(COUNTS[17,index])/sum(np.float64(COUNTS[:20,index]))
FREQS[18,index]=np.float64(COUNTS[18,index])/sum(np.float64(COUNTS[:20,index]))
FREQS[19,index]=np.float64(COUNTS[19,index])/sum(np.float64(COUNTS[:20,index]))
FREQS[20,index]=np.float64(COUNTS[20,index])/sum(np.float64(COUNTS[:,index])) #frequency of gaps "-" as fraction of all seqs
FREQS[21,index]=np.float64(COUNTS[21,index])/sum(np.float64(COUNTS[:,index])) #frequency of gaps "other" as fraction of all seqs
#IDS=aaletters()
IDFREQS = np.hstack((IDS,FREQS)) #make list with names and AA frequencies
if filename:
np.savetxt((filename),IDFREQS,delimiter=",",fmt="%s") #save file with AA names and frequencies
return FREQS
#Calculates the consensus sequence from given amino acid frequency array.
#Returns consensus sequence as an array of amino acid one letter codes.
#If given a filename, consensus sequence is saved in FASTA format to filename.
def consensus(FREQS, filename=None):
CONSENSUS_SEQ = np.zeros([1, len(FREQS[0,:])],dtype=object) #make an array to store consensus sequence
for index in range(len(FREQS[0,:])): #for each AA position
CONSENSUS_SEQ[0,index] = IDS[np.argmax(FREQS[:20,index]),0] #find the largest value, and get the corrisponding AA from IDS, and add it to CONSENSUS_SEQ
CONSENSUS=""
for index in range(len(CONSENSUS_SEQ[0,:])):
CONSENSUS=CONSENSUS+str(CONSENSUS_SEQ[0,index])
CONSENSUS=">consensus_sequence",CONSENSUS # add header for FASTA format
if filename:
np.savetxt((filename),CONSENSUS,delimiter="",fmt="%s") #save file with AA sequence of consensus sequence
return CONSENSUS_SEQ
#Returns a list of suggested amiono acid mutations when given a query sequence,
#frequency array, and ratio. Will suggest mutations to consensus when query amino
#acids that differ from the consensus (i.e. highest frequency) by at least the ratio.
#If given a filename, suggested mutations will be saved as txt file.
def ratioconsensus(query, FREQS, ratio):
MUTATIONS_ARRAY=np.empty([0,])
MUTATIONS_ARRAY=np.array(MUTATIONS_ARRAY, dtype=TYPES)
aalist = IDS.flatten().tolist()
for index in range(len(FREQS[0,:])): #for each AA position
wtaa = query[index]
consensus = IDS[np.argmax(FREQS[:20,index]),0]
if wtaa != consensus: #check if the consens residue is different than the query sequence
wtfreq = float(FREQS[(aalist.index(wtaa)),index])
consensusfreq = float(FREQS[(aalist.index(consensus)),index])
if (ratio * wtfreq) < consensusfreq: #if the consensus of a residue is greater than the threshold
print "Residue number " + str(int(index) + 1)
print str(int(100*consensusfreq)) + '% is at least ' + str(ratio) + ' times greater than ' + str(int(100*wtfreq)) + '%'
thissuggestion=np.array([(wtaa, (index + 1), consensus, consensusfreq, wtfreq)], dtype=TYPES)
MUTATIONS_ARRAY = np.append(MUTATIONS_ARRAY,thissuggestion, axis=0)#add new suggestion on to any existing "MUTATIONS_ARRAY"
return MUTATIONS_ARRAY
#Returns a list of suggested amiono acid mutations when given a query sequence,
#frequency array, and cutoff for the consensus threshold. Will suggest mutations to consensus when query amino
#acids that differ from the consensus and the consensus is at least cutoff.
#If given a filename, suggested mutations will be saved as txt file.
def cutoffconsensus(query, FREQS, cutoff):
MUTATIONS_ARRAY=np.empty([0,])
MUTATIONS_ARRAY=np.array(MUTATIONS_ARRAY, dtype=TYPES)
aalist = IDS.flatten().tolist()
for index in range(len(FREQS[0,:])): #for each AA position
wtaa = query[index]
consensus = IDS[np.argmax(FREQS[:20,index]),0]
if wtaa != consensus: #cehck if the consens residue is different than the query sequence
wtfreq = float(FREQS[(aalist.index(wtaa)),index])
consensusfreq = float(FREQS[(aalist.index(consensus)),index])
if float(cutoff) < consensusfreq: #if the consensus of a residue is greater than the threshold
print "Residue number " + str(int(index) + 1)
print str(int(100*max(FREQS[:20,index]))) + "% is greater than or equal to " + str(int(100*float(cutoff))) + "%"
thissuggestion=np.array([(wtaa, (index + 1), consensus, consensusfreq, wtfreq)], dtype=TYPES)
MUTATIONS_ARRAY = np.append(MUTATIONS_ARRAY,thissuggestion, axis=0)#add new suggestion on to any existing "MUTATIONS_ARRAY"
return MUTATIONS_ARRAY
#Takes array of suggested mutations in TYPES format, sorts by % conserved, removes duplicates.
#Returns modified mutations array in with TYPE data types, and human readable suggested mutations list.
#If given filename, will save human readable suggested mutation list as text file.
def formatmutations(MUTATIONS_ARRAY):
MUTATIONS_ARRAY = np.unique(MUTATIONS_ARRAY) #remove duplicate entries
MUTATIONS_ARRAY[::-1].sort(order = 'freq')
SUGGESTED_MUTATIONS=[]
SUGGESTED_MUTATIONS.append("These mutations may stabilize your protein since they differ from the consensus residue")
if not len(MUTATIONS_ARRAY[:,]):
SUGGESTED_MUTATIONS.append("No mutations found. Try reducing the ConsensusRatio or ConsensusThreshold in the config file. You could also try changing the BLAST parameters to adjust the number of sequences being returned (MaximumSequences and BlastEValue).")
else:
for index in range(len(MUTATIONS_ARRAY[:,])): #for each suggested mutation
SUGGESTED_MUTATIONS.append("Change " + MUTATIONS_ARRAY[index,]['wt'] + " " + str(MUTATIONS_ARRAY[index,]['res']) + " to " + MUTATIONS_ARRAY[index,]['sug'] + " (" + str(int(100*MUTATIONS_ARRAY[index,]['freq'])) + "% of similar proteins have " + MUTATIONS_ARRAY[index,]['sug'] +", only " + str(int(100*MUTATIONS_ARRAY[index,]['wtfreq'])) + "% have "+ MUTATIONS_ARRAY[index,]['wt'] + ")" ) #add new suggestion on to any existing "SUGGESTED_MUTATIONS"
return MUTATIONS_ARRAY, SUGGESTED_MUTATIONS
#define mutation list based on settings attributes of RATIO and/or CONSESUSTHRESHOLD and using trimmed alignment of sequences to identify query sequence (first sequence in alignment), and array of amino acid frequencies matching amino acid positions.
def mutations(settings, alignment, freqs):
print('\nIdentifying suggested mutations')
if settings.RATIO:
ratiomutations = ratioconsensus(alignment[0], freqs, settings.RATIO)
if settings.CONSENSUSTHRESHOLD:
thresholdmutations = cutoffconsensus(alignment[0], freqs, settings.CONSENSUSTHRESHOLD)
mutations = thresholdmutations
if settings.RATIO:
mutations = np.append(mutations, ratiomutations)
else:
mutations = ratiomutations
mutations, output = formatmutations(mutations)
return mutations, output
#Save output suggestions file with any warnings that have been added
def saveoutput(settings, warnings, output, filename):
file = open(filename,'wb')
file.write(''.join(warnings) + '\n')# 's16\n')
np.savetxt(file, output, delimiter=",", fmt='%s')
|
bryanjjones/pyConsensusFinder
|
modules/analyze.py
|
Python
|
gpl-2.0
| 13,814
|
[
"BLAST"
] |
dc1f750b688ed61dc2b2ea094026530915c1d98e0ca556533d6c87ec86eae024
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
##
## (C) 2007, 2008, 2013 Muthiah Annamalai,
## Licensed under GPL Version 3
##
## Module has elements of PARSE-TREE AST
## in the classes Expr, ExprCall, ExprList, Stmt, ReturnStmt,
## BreakStmt, ContinueStmt, ElseStmt, IfStmt, WhileStmt,
## ForStmt, AssignStmt, PrintStmt, EvalStmt, ArgList,
## ValueList, Function, StmtList, Identifier, String, Number,
## Array, Dict, NoOp
import keyword
import copy
import math
import sys
PYTHON3 = (sys.version[0] == '3')
if PYTHON3:
unicode = str
## scanner for exprs language
from .scanner import Token, Lexeme, Lex
## runtime elements
from .runtime import Environment, BuiltinFunction, \
BlindBuiltins
## exceptions
from .errors import RuntimeException, ParseException
##
## ATOMS
##
class Identifier(object):
def __init__(self, id, l, c, dbg=False):
object.__init__(self)
self.id = id
self.debug = dbg
self.line = l
self.col = c
def __unicode__(self):
return u"" + self.id
def dbg_msg(self, msg):
""" handy to print debug messages """
if (self.debug):
print(u"## " + msg)
return
def __repr__(self):
return u"\n\t [Identifier [" + unicode(self.id) + u"]]"
def evaluate(self, env):
if (env.has_id(self.id)):
val = env.get_id(self.id)
if (hasattr(val, 'evaluate')):
val = val.evaluate(env)
elif (val.__class__ == str):
#val = val
pass
else:
#val = val
pass
self.dbg_msg(unicode(self) + " = val [" + unicode(val) + "]")
return val
note = ''
if self.id in keyword.kwlist:
note = 'Did you possibly confuse the Python english keyword %s for Ezhil keyword ?' % self.id
note = "Cannot Find Identifier %s at Line %d, col %d." % (
self.id, self.line, self.col) + ' ' + note
raise RuntimeException(note)
def visit(self, walker):
""" visitor - do something with a identifier """
walker.visit_identifier(self)
return
class String(object):
def __init__(self, s, l=0, c=-1, dbg=False):
object.__init__(self)
self.string = s
self.debug = dbg
self.line = l
self.col = c
def __repr__(self):
return u" [String [" + unicode(self.string) + u"]] "
def __str__(self):
return self.string
def __unicode__(self):
return (self.string)
def evaluate(self, env):
return self.string
def visit(self, walker):
walker.visit_string(self)
return
class Number(object):
def __init__(self, n, l=0, c=-1, dbg=False):
object.__init__(self)
self.num = n
self.debug = dbg
self.line = l
self.col = c
def __int__(self):
return int(self.num)
def __float__(self):
return float(self.num)
def __repr__(self):
return u" [Number [" + unicode(self.num) + u"]]"
def __str__(self):
return self.num.__str__()
def __unicode__(self):
return unicode(self.num)
def evaluate(self, env):
return self.num
def visit(self, walker):
walker.visit_number(self)
return
class Boolean(Number):
def __init__(self, n, l=0, c=-1, dbg=False):
Number.__init__(self, n, l, c, dbg)
def __unicode(self):
return self.__str__()
def __str__(self):
if (self.num):
return u"மெய் (T)"
return u"பொய் (F)"
class Dict(dict):
def __init__(self):
dict.__init__(self)
def base_evaluate(self, env):
rval = {}
for x, y in self.items():
rval.update({x.evaluate(env): y.evaluate(env)})
return rval
def __unicode__(self):
fmt = u"{"
for k, v in self.items():
fmt = fmt + unicode(k) + u" : " + unicode(v) + u",\n"
fmt = fmt + u"}"
return fmt
def visit(self, walker):
return walker.visit_dict(self)
def evaluate(self, env):
""" how do you evaluate dictionaries? just return the favor """
return self.base_evaluate(env)
class Array(list):
def __init__(self):
pass
def base_evaluate(self, env):
rval = []
for v in self:
rval.append(v.evaluate(env))
return rval
def __unicode__(self):
return u", ".join([unicode(item) for item in self])
def visit(self, walker):
return walker.visit_array(self)
def evaluate(self, env):
return self.base_evaluate(env)
class ExprCall(object):
"""handle function call statement etc."""
def __init__(self, func_id, arglist, l, c, dbg=False):
object.__init__(self)
self.func_id = func_id
self.fname = func_id.id
self.arglist = arglist
self.debug = dbg
self.line = l
self.col = c
self.parent = None
def dbg_msg(self, msg):
if (self.debug):
print("## ", msg)
return
def __unicode__(self):
return u"Line %d, Column %d : Function call [%s] with [%d] args" % (
self.line, self.col, unicode(self.fname), len(self.arglist))
def __repr__(self):
return u"\n\t [ExprCall[ "+unicode(self.fname)+u" (" \
+unicode(self.arglist)+u")]]"
def evaluate(self, env):
#self.dbg_msg( unicode(env) )
if (self.debug):
print(u"\n".join(env.builtin_map.keys()))
print("*" * 60)
print(u"\t".join(env.function_map.keys()))
print(self.fname, " ==?== ", env.builtin_map.get(self.fname, None))
if (env.has_function(self.fname)):
self.dbg_msg("calling function " + self.fname +
" with %d args" % len(self.arglist))
fval = env.get_function(self.fname)
#("Check arguments accepted by function against supplied by call site")
if hasattr(fval, 'arglist'):
# only check the argument matching for custom functions
# 'BlindBuiltins' or 'BlindFunctions' donot get argument checked.
expected_args = len(fval.arglist)
actual_args = len(self.arglist)
if expected_args != actual_args:
raise RuntimeException(
u"Function '%s' expects %d arguments but received only %d at site:\n\t %s\n"
% (self.fname, expected_args, actual_args,
unicode(self)))
## use applicative order evaluation.
eval_arglist = [i.evaluate(env) for i in self.arglist.get_list()]
env.set_args(eval_arglist)
try:
rval = fval.evaluate(env)
except Exception as e:
raise RuntimeException(str(e))
self.dbg_msg(u"function retval =" + unicode(rval) +
unicode(type(rval)))
else:
raise RuntimeException(u"undefined function: %s near ( %d, %d )" %
(self.fname, self.line, self.col))
return rval
def visit(self, walker):
walker.visit_expr_call(self)
return
class ExprList(object):
def __init__(self, exprs, l, c, dbg=False):
object.__init__(self)
self.exprs = exprs
self.debug = dbg
self.line = l
self.col = c
self.parent = None
def __repr__(self):
return u"\n\t [ExprList[ " + u", ".join(map(unicode,
self.exprs)) + u"]]"
def evaluate(self, env):
"""evaluate a, b, c ... z to a string w/o commas"""
z = []
for exp_itr in self.exprs:
z.append(exp_itr.evaluate(env))
return u" ".join(map(unicode, z))
def visit(self, walker):
walker.visit_expr_list(self)
return
class Stmt(object):
def __init__(self, l=0, c=0, dbg=False):
""" implements an empty statement"""
object.__init__(self)
self.line = l
self.col = c
self.class_name = u"Stmt"
self.debug = dbg
self.parent = None
def __unicode__(self):
self.dbg_msg(u" ".join([u"stmt => ",
unicode(self.__class__)
])) #we're headed toward assertion
return self.__repr__()
def __repr__(self):
print("//#//" * 50)
print(u"stmt => ",
unicode(self.__class__)) #we're headed toward assertion
self.dbg_msg(u"stmt => " + unicode(self.__class__))
raise Exception(
u"FATAL : Class %s did not implement the __repr__ method, nor inherits a concrete implementation."
% unicode(self.__class__))
def dbg_msg(self, msg):
""" handy to print debug messages """
if (self.debug):
print(msg)
return
def get_pos(self):
return u"line %d, col %d" % (self.line, self.col)
def evaluate(self, env):
""" empty statement """
return None
def is_true_value(self, val):
""" Decide if the val is agreeable to True.
Right now keep it simple however."""
rval = False
self.dbg_msg(u"is_true_value? " + unicode(val.__class__))
try:
if (hasattr(val, 'evaluate')):
fval = val.evaluate(None)
elif (isinstance(val, float) or isinstance(val, int)):
fval = val
else:
raise Exception(u"Unknown case, cannot identify truth @ " +
self.get_pos() + u" for value " + unicode(val))
if (fval > 0.0):
rval = True
## all other cases later.
except Exception as pyEx:
""" objects where is_true_value() is not supported """
print(pyEx)
raise RuntimeException(pyEx)
self.dbg_msg(u"Is True Value? " + unicode(rval) +
unicode(val.__class__))
return rval
def visit(self, walker):
walker.visit_stmt(self)
return
class DeclarationStmt(Stmt):
""" hold function declaration statements; have visit option,
but no evaluation options. """
def __init__(self, fcn):
if isinstance(fcn, Function):
Stmt.__init__(self, fcn.line, fcn.col, fcn.debug)
self.class_name = u"Declaration_Statement"
self.fcn = fcn #FunctionStmt object
else:
raise Exception(
u"declaration statement can only hold FunctionStmt objects")
def visit(self, walker):
""" delegate visitor to holding function """
walker.visit_function(self.fcn)
return
def __repr__(self):
return self.fcn.__repr__()
class ImportStmt(Stmt):
""" hold function declaration statements; have visit option,
but no evaluation options. """
def __init__(self, line, col, debug, fname):
Stmt.__init__(self, line, col, debug)
self.class_name = u"Import_Statement"
self.filename = fname
def evaluate(self, env):
# make a function call to ezhil_execute
#self.dbg_msg(" add call : execute(\"begin\")")
[l, c] = self.line, self.col
fname = ValueList([self.filename], l, c, self.debug)
import_via_execute = ExprCall(Identifier("execute", l, c), fname, l, c,
self.debug)
import_via_execute.evaluate(env)
return
def visit(self, walker):
""" delegate visitor to holding function """
walker.visit_import(self)
return
def __repr__(self):
return u"ImportStmt @ %s" % unicode(self.filename)
class UnaryExpr(Stmt):
def __init__(self, t, op, l, c, dbg=False):
Stmt.__init__(self, l, c, dbg)
self.term = t
self.unaryop = op
def __repr__(self):
return self.__unicode__()
def __unicode__(self):
return u"[UnaryExpr[" + unicode(self.unaryop) + "," + unicode(
self.term) + "]]"
def visit(self, walker):
""" delegate visitor to transformer/walker"""
walker.visit_unaryexpr(self)
return
def do_unaryop(self, tval):
if (self.unaryop.kind == Token.LOGICAL_NOT):
if not tval:
return Boolean(True)
else:
return Boolean(False)
elif (self.unaryop.kind == Token.BITWISE_COMPLEMENT):
# do the unary bitwise complement
return ~tval
else:
raise RuntimeException(" unknown Unary Operation - " +
unicode(self.unaryop) + " not supported")
return
def evaluate(self, env):
term = self.term.evaluate(env)
self.dbg_msg(u"unaryop=> " + unicode(term) + u" " +
unicode(term.__class__))
if self.unaryop.kind in Token.UNARYOP:
tval = Expr.normalize_values(self, term, env)
if (self.debug): print(tval, type(tval))
term = self.do_unaryop(tval)
else:
raise RuntimeException(" unknown Unary Operation - " +
unicode(self.unaryop) + " not supported")
self.dbg_msg(u"unaryop=> " + u"term = " + unicode(term) + u" " +
unicode(term.__class__))
return term
class Expr(Stmt):
One = (1)
Zero = (0)
def __init__(self, t, op, next_expr, l, c, dbg=False):
Stmt.__init__(self, l, c, dbg)
self.term = t
self.binop = op
self.next_expr = next_expr
def __len__(sef):
""" expr is unit length always, as opposed to ExprList which is a n-len thing """
return 1
def __repr__(self):
return u"\n\t [Expr[ "+ unicode(self.term)+ u"] " + \
Token.token_types[self.binop.kind] + \
u"\t NextExpr [" + unicode(self.next_expr) + u"]]"
def do_binop(self, opr1, opr2, binop):
self.dbg_msg(u" Doing binary operator " + Token.token_types[binop])
if binop == Token.PLUS:
self.dbg_msg("addition")
val = (opr1 + opr2)
elif binop == Token.MINUS:
self.dbg_msg("subtraction")
val = (opr1 - opr2)
elif binop == Token.PROD:
self.dbg_msg("multiplication")
val = (opr1 * opr2)
elif binop == Token.DIV:
self.dbg_msg("division")
val = (opr1 / opr2)
elif binop == Token.MOD:
self.dbg_msg("modulo")
val = (math.fmod(opr1, opr2))
elif binop == Token.EXP:
self.dbg_msg("exponent")
val = (math.pow(opr1, opr2))
elif binop == Token.GT:
self.dbg_msg("GT > ")
val = self.Zero
if (opr1 > opr2):
val = self.One
elif binop == Token.GTEQ:
self.dbg_msg("GTEQ >= ")
val = self.Zero
if (opr1 >= opr2):
val = self.One
elif binop == Token.LT:
self.dbg_msg("LT < ")
val = self.Zero
if (opr1 < opr2):
val = self.One
elif binop == Token.LTEQ:
self.dbg_msg("LT <= ")
val = self.Zero
if (opr1 <= opr2):
val = self.One
elif binop == Token.NEQ:
self.dbg_msg("NEQ != ")
val = self.Zero
if (opr1 != opr2):
val = self.One
elif binop == Token.EQUALITY:
## FIXME: do many equality tests than just value
## based tests.
self.dbg_msg("EQUALITY == ")
val = self.Zero
if (opr1 == opr2):
val = self.One
elif binop == Token.LOGICAL_AND:
self.dbg_msg("LOGICAL AND")
val = self.Zero
if (opr1 and opr2):
val = self.One
elif binop == Token.LOGICAL_OR:
self.dbg_msg("LOGICAL OR")
val = self.Zero
if (opr1 or opr2):
val = self.One
elif binop == Token.BITWISE_AND:
val = opr1 & opr2
elif binop == Token.BITWISE_OR:
val = opr1 | opr2
elif binop == Token.BITWISE_LSHIFT:
val = opr1 << opr2
elif binop == Token.BITWISE_RSHIFT:
val = opr1 >> opr2
else:
raise SyntaxError("Binary operator syntax not OK @ " +
self.get_pos())
self.dbg_msg("value = " + unicode(val))
return val
@staticmethod
def normalize_values(obj, term, env):
if (hasattr(term, 'evaluate')):
if (isinstance(
term,
Number)): #work for both Number, and derived Boolean class
tval = term.num
elif (isinstance(term, String)):
tval = term.string
else:
raise RuntimeException(
" cannot normalize token; unknown clause," +
unicode(term) + ", to evaluate @ " + obj.get_pos())
else:
tval = term #float cast not required.
return tval
def evaluate(self, env):
term = self.term.evaluate(env)
self.dbg_msg(u" " + unicode(term) + u" " + unicode(term.__class__))
if self.binop.kind in Token.BINOP:
tnext = self.next_expr.evaluate(env)
tval = Expr.normalize_values(self, term, env)
tval2 = Expr.normalize_values(self, tnext, env)
self.dbg_msg(u" " + unicode(tval) + " " + unicode(tval2) + u" " +
unicode(tval2.__class__))
try:
term = self.do_binop(tval, tval2, self.binop.kind)
except Exception as binOp_Except:
raise RuntimeException(u"binary operation " +
unicode(self.term) +
unicode(self.binop) +
unicode(self.next_expr) +
u" failed with exception " +
unicode(binOp_Except))
else:
raise RuntimeException(
u" unknown Binary Operation - Binary operation " +
unicode(self.binop) + u" not supported")
self.dbg_msg(u"term = " + unicode(term) + u" " +
unicode(term.__class__))
return term
def visit(self, walker):
if self.binop.kind in Token.BINOP:
walker.visit_binary_expr(self)
else:
walker.visit_expr(self)
return
class ReturnStmt(Stmt):
""" return expr """
def __init__(self, rval, l, c, dbg=False):
Stmt.__init__(self, l, c, dbg)
self.rvalue = rval
def __repr__(self):
return u"\n\t [ReturnStmt[ " + unicode(self.rvalue) + u"]]\n"
def evaluate(self, env):
rhs = self.rvalue.evaluate(env)
self.dbg_msg(u"return statement evaluated to " + unicode(rhs))
env.set_retval(rhs)
return rhs
def visit(self, walker):
walker.visit_return_stmt(self)
return
class BreakStmt(Stmt):
""" return expr """
def __init__(self, l, c, dbg=False):
Stmt.__init__(self, l, c, dbg)
def __repr__(self):
return u"\n\t [BreakStmt]\n"
def evaluate(self, env):
self.dbg_msg(u"break statement")
env.set_break()
return None
def visit(self, walker):
walker.visit_break_stmt(self)
return
class ContinueStmt(Stmt):
""" return expr """
def __init__(self, l, c, dbg=False):
Stmt.__init__(self, l, c, dbg)
def __repr__(self):
return u"\n\t [ContinueStmt]\n"
def evaluate(self, env):
self.dbg_msg(u"continue statement")
env.set_continue()
return None
def visit(self, walker):
walker.visit_continue_stmt(self)
return
class ElseStmt(Stmt):
def __init__(self, stmt, l, c, dbg):
Stmt.__init__(self, l, c, dbg)
self.stmt = stmt
self.class_name = u"ElseStmt"
def __repr__(self):
return u"\t [ElseStmt [" + unicode(self.stmt) + u"]]\n"
def evaluate(self, env):
return self.stmt.evaluate(env)
def visit(self, walker):
walker.visit_else_stmt(self)
return
class IfStmt(Stmt):
""" if ( op ) stmtlist {else | elseif ( op )| stmt } end"""
def __init__(self, expr, body, next_stmt, l, c, dbg=False):
Stmt.__init__(self, l, c, dbg)
self.expr = expr
self.body = body
self.class_name = "IfStmt"
## this is either another IfStmt or an Else Stmt.
if not next_stmt:
self.next_stmt = []
else:
self.next_stmt = next_stmt
def __repr__(self):
rval = u"\t\n [IfStmt[[" + unicode(self.expr) + u"]] " + unicode(
self.body)
if (self.next_stmt):
try:
self.dbg_msg(u" ".join([
unicode(self.next_stmt),
unicode(self.next_stmt.__class__), u"***"
]))
rval = rval + u"<<Nxt>>" + unicode(self.next_stmt)
except UnicodeEncodeError as uc_err:
print(unicode(uc_err))
raise uc_err
pass
rval = rval + u"]"
return rval
def set_body(self, body):
self.body = body
def append_stmt(self, stmt):
self.next_stmt.append(stmt)
return
def set_next_stmt(self, stmt):
self.next_stmt = stmt
def evaluate(self, env):
self.dbg_msg(u"Eval-if-stmt" + unicode(self.expr))
rval = None
self.dbg_msg(u"eval-if stmt")
if (self.is_true_value(self.expr.evaluate(env))):
self.dbg_msg(u"ifstmt: true condition")
rval = self.body.evaluate(env)
return rval
self.dbg_msg(u"ifstmt: false condition")
for elseif_or_else in self.next_stmt:
if (isinstance(elseif_or_else, IfStmt)):
if (self.is_true_value(elseif_or_else.expr.evaluate(env))):
rval = elseif_or_else.body.evaluate(env)
return rval
else:
# elseif branch was found to be false. Continue
continue
elif (isinstance(elseif_or_else, ElseStmt)):
rval = elseif_or_else.evaluate(env)
return rval
else:
raise RuntimeException(
"IF-ELSEIF-ELSE was parsed wrongly, unknown construct found"
)
# its perfectly legal to not have an else statement
return rval
def visit(self, walker):
walker.visit_if_elseif_stmt(self)
return
class WhileStmt(Stmt):
""" while ( exp ) stmtlist end"""
def __init__(self, expr, body, l, c, dbg=False):
Stmt.__init__(self, l, c, dbg)
self.expr = expr
self.body = body
self.class_name = "WhileStmt"
def __repr__(self):
rval = u"\t\n [%s[[" % unicode('WhileStmt') + unicode(
self.expr) + u"]] " + unicode(self.body) + u"]"
return rval
def evaluate(self, env):
rval = None
self.dbg_msg("eval-While stmt")
while (self.is_true_value(self.expr.evaluate(env))
and not env.get_break_return()):
## everytime of loop clear any continues
env.clear_continue()
self.dbg_msg("ifstmt: true condition")
rval = self.body.evaluate(env)
## clear break if-any
env.clear_break()
self.dbg_msg("exiting While-stmt with rval=" + unicode(rval))
return rval
def visit(self, walker):
walker.visit_while_stmt(self)
return
class DoWhileStmt(WhileStmt):
""" do stmtlist while ( exp )"""
def __init__(self, expr, body, l, c, dbg=False):
WhileStmt.__init__(self, expr, body, l, c, dbg)
def __repr__(self):
return u"[DoWhileStmt[expr=" + unicode(
self.expr) + u",body=" + unicode(self.body) + "]]"
def evaluate(self, env):
""" first run is on the house, but then we keep count. Dog bites American style """
rval = None
first_time = True
self.dbg_msg("eval-Do-While stmt")
while (first_time or self.is_true_value(self.expr.evaluate(env))
and not env.get_break_return()):
## everytime of loop clear any continues
env.clear_continue()
self.dbg_msg("ifstmt: true condition")
rval = self.body.evaluate(env)
first_time = False
## clear break if-any
env.clear_break()
self.dbg_msg("exiting Do-While-stmt with rval=" + unicode(rval))
return rval
class ForStmt(Stmt):
""" For ( exp1 ; exp2 ; exp3 ) stmtlist end"""
def __init__(self, expr1, expr2, expr3, body, l, c, dbg=False):
Stmt.__init__(self, l, c, dbg)
self.expr_init = expr1
self.expr_cond = expr2
self.expr_update = expr3
self.body = body
self.class_name = "ForStmt"
def __repr__(self):
rval = u"\t\n [ForStmt[[ ("+unicode(self.expr_init)+"; "+\
unicode(self.expr_cond) + "; " +\
unicode(self.expr_update)+") ]] " + unicode(self.body) +"]"
return rval
def evaluate(self, env):
self.dbg_msg("Eval-For-stmt: ")
rval = None
self.dbg_msg("eval-For-stmt")
rval = self.expr_init.evaluate(env)
while (self.is_true_value(self.expr_cond.evaluate(env))
and not env.get_break_return()):
## everytime of loop clear any continues
env.clear_continue()
rval = self.body.evaluate(env)
# update happens after body evaluates - this is C-style
self.expr_update.evaluate(env)
## clear break if-any
env.clear_break()
self.dbg_msg(u"exiting For-stmt with rval=" + unicode(rval))
return rval
def visit(self, walker):
walker.visit_for_stmt(self)
return
class AssignStmt(Stmt):
""" lhs = rhs """
def __init__(self, lvalue, op, rvalue, l, c, dbg=False):
Stmt.__init__(self, l, c, dbg)
self.lvalue = lvalue
self.assignop = op
self.rvalue = rvalue
self.class_name = "AssignStmt"
def __repr__(self):
return u"\n\t [AssignStmt[ "+ unicode(self.lvalue)+u"] " + \
Token.token_types[self.assignop.kind] + \
u"\t Expr [" + unicode(self.rvalue) + u"]]"
def do_assignop(self, lvalue, rhs, kind, env):
rval = None
if (kind == Token.EQUALS):
env.set_id(lvalue.id, rhs)
rval = rhs
else:
raise Exception("Unknown assign operator @ " + self.get_pos())
return rval
def evaluate(self, env):
if self.assignop.kind in Token.ASSIGNOP:
self.dbg_msg(u"assignop: rhs = " + unicode(self.rvalue))
rhs = self.rvalue.evaluate(env)
self.do_assignop(self.lvalue, rhs, self.assignop.kind, env)
self.dbg_msg(u"assignop lvalue ["+unicode(self.lvalue) \
+u"] = ["+unicode(rhs) + \
u"( saved as ) " + \
unicode(self.lvalue))
#unicode(env.get_id(self.lvalue.id)) )
return rhs
raise Exception("Unknown assign operator @ " + self.get_pos())
def visit(self, walker):
walker.visit_assign_stmt(self)
return
class PrintStmt(Stmt):
""" print EXPR """
def __init__(self, exprlst, l, c, dbg):
Stmt.__init__(self, l, c, dbg)
self.exprlst = exprlst
def __repr__(self):
return u"\n\t [PrintStmt[ " + unicode(self.exprlst) + u"]]"
def do_printop(self, env):
val = self.exprlst.evaluate(env)
print(val) #this prints to output
return val
def evaluate(self, env):
self.do_printop(env)
return None
def visit(self, walker):
walker.visit_print_stmt(self)
return
class EvalStmt(Stmt):
""" EXPR """
def __init__(self, expr, l, c, dbg=False):
Stmt.__init__(self, l, c, dbg)
self.expr = expr
def __repr__(self):
return u"\n\t [EvalStmt[ " + unicode(self.expr) + u"/" + unicode(
(self.expr.__class__)) + u"]]"
def evaluate(self, env):
if (self.debug):
print(u"evaluating EvalStmt : %s" % str(self.expr))
print(self.expr.__class__)
return self.expr.evaluate(env)
def visit(self, walker):
walker.visit_eval_stmt(self)
return
## PLACEHOLDER
class ArgList:
""" defines argument list in a function definition """
def __init__(self, argvals, l, c, dbg=False):
""" to get self.args, use get_list() method """
self.args = argvals
self.line = l
self.col = c
self.parent = None
def __len__(self):
return len(self.args)
def get_list(self):
return self.args
def __repr__(self):
return "\n\t [ArgList[" + ",".join(map(unicode, self.args)) + "]]"
def visit(self, walker):
walker.visit_arg_list(self)
return
#TODO : derive from 'list' and 'Stmt' class and update code
class ValueList:
""" defines value list in a function definition """
def __init__(self, argvals, l, c, dbg=False):
""" to get self.args, use get_list() method """
self.args = argvals
self.debug = dbg
self.line = l
self.col = c
self.parent = None
def append(self, obj):
self.args.append(obj)
return
def __len__(self):
return len(self.args)
def __getitem__(self, idx):
""" index into the object like a list : @idx - caveat emptor """
return self.args[idx]
def get_list(self):
return self.args
#def evaluate(self):
# if ( isinstance(self.args,list) and len(self.args) == 1):
# return self.args[0]
# return self.args
def __repr__(self):
return "\n\t [ValueList[" + ",".join(map(unicode, self.args)) + "]]"
def visit(self, walker):
walker.visit_value_list(self)
return
class StmtList(Stmt):
def __init__(self, stmt=[], dbg=False, istoplevel=False):
Stmt.__init__(self, 0, 0, dbg)
self.List = copy.copy(stmt)
self.toplevel = istoplevel
def __len__(self):
return len(self.List)
def append(self, stmt_x):
self.dbg_msg(u"==>" + unicode(stmt_x.__class__))
self.dbg_msg(u"adding new statement " + unicode(stmt_x.__class__))
self.List.append(stmt_x)
return
def __repr__(self):
rval = u"\t [StmtList[ " + u"\n ".join(map(unicode,
self.List)) + u"]]\n"
return rval
def evaluate(self, env):
rval = None
for stmt in self.List:
self.dbg_msg(u"STMTLIST => STMT")
if (env.break_return_continue()):
break
self.dbg_msg(stmt.__class__)
rval = stmt.evaluate(env)
return rval
def visit(self, walker):
""" visit stmt list method """
if self.toplevel:
walker.visit_program_or_script(self)
else:
walker.visit_stmt_list(self)
return
class Function(Stmt):
""" function definition itself """
def __init__(self, fname, arglist, body, l, c, dbg=False):
Stmt.__init__(self, l, c, dbg)
self.name = fname
self.arglist = arglist
self.body = body
self.dbg_msg(u"function " + fname + u" was defined")
def dbg_msg(self, msg):
if (self.debug):
print(u"## ", msg)
return
def __repr__(self):
return u"\n\t [Function[ "+ unicode(self.name)+u"( " + \
unicode(self.arglist) + u")]\n" + \
u"\t Body [" + unicode(self.body) + u"]]\n"
def evaluate(self, env):
## push stuff into the call-stack
env.call_function(u"%s" % (self.name), u" at %s" % (self.get_pos()))
## check arguments match, otherwise raise error
args = env.get_args() #.get_list()
fargs = self.arglist.get_list()
if (len(args) != len(fargs)):
raise Exception("Call Arguments donot match with" + \
"function definition @ "+self.get_pos())
## create local variables on the stack in order of definitions
lut = {}
for idx in range(0, len(fargs)):
varname = fargs[idx]
value = args[idx]
lut[varname] = value
env.set_local(lut)
## invoke the function
rval = self.body.evaluate(env)
## get the value from the stack.
rval = env.get_retval()
env.clear_call()
## pop stuff into the call-stack
env.return_function(self.name)
return rval
def visit(self, walker):
walker.visit_function(self)
return
|
arcturusannamalai/Ezhil-Lang
|
ezhil/ast.py
|
Python
|
gpl-3.0
| 33,386
|
[
"VisIt"
] |
e9789f5f0f00af0b088a839a4169103ff60cdc15bbaa3adceaa4f71757c790da
|
from __future__ import unicode_literals
from django.db import models
from django.conf import settings
import pprint
from Bio.Phylo.TreeConstruction import _DistanceMatrix, DistanceTreeConstructor
from Bio import Phylo
import StringIO
import json
STATUS = {'PENDING':1,'RUNNING':2,'ERROR':3,'COMPLETE':4}
STATUS_CHOICES = [
(STATUS['PENDING'], 'Pending'),
(STATUS['RUNNING'], 'Running'),
(STATUS['ERROR'], 'Error'),
(STATUS['COMPLETE'], 'Complete'),
]
VIRULENCE_FACTORS = {
'VFDB': 'Virulence factors',
'ARDB': 'Resistance genes',
'PAG': 'Pathogen-associated genes'
}
VIRULENCE_FACTOR_CATEGORIES = {
'VFDB': 'VFDB',
'Victors': 'VFDB',
'PATRIC_VF': 'VFDB',
'ARDB': 'ARDB',
'CARD': 'ARDB',
'BLAST': 'BLAST',
'RGI': 'RGI',
'PAG': 'PAG'
}
MODULES = ['Prepare', 'Distance', 'Sigi', 'Dimob', 'Islandpick', 'Virulence', 'Summary']
class CustomGenome(models.Model):
cid = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
cds_num = models.IntegerField(default=0)
rep_size = models.IntegerField(default=0)
filename = models.CharField(max_length=60)
formats = models.CharField(max_length=50)
contigs = models.IntegerField(default=1)
genome_status = models.IntegerField()
submit_date = models.DateTimeField('date submitted')
class Meta:
db_table = "CustomGenome"
class NameCache(models.Model):
cid = models.CharField(max_length=15)
name = models.CharField(max_length=60)
cds_num = models.IntegerField(default=0)
rep_size = models.IntegerField(default=0)
isvalid = models.IntegerField(default=1)
class Meta:
db_table = "NameCache"
class Analysis(models.Model):
CUSTOM = 1
MICROBEDB = 2
ATYPE_CHOICES = (
(CUSTOM, 'Custom'),
(MICROBEDB, 'MicrobeDB'),
)
aid = models.AutoField(primary_key=True)
atype = models.IntegerField(choices=ATYPE_CHOICES,
default=CUSTOM)
ext_id = models.CharField(max_length=15)
owner_id = models.IntegerField(default=0)
token = models.CharField(max_length=22, blank=True)
default_analysis = models.BooleanField(default=True)
status = models.IntegerField(choices=STATUS_CHOICES,
default=STATUS['PENDING'])
workdir = models.CharField(max_length=50)
microbedb_ver = models.IntegerField(default=0)
start_date = models.DateTimeField('date started')
complete_date = models.DateTimeField('date completed')
# Specialty function to find an analysis with the same
# Islandpick settings (comparison genomes, min_gi_size)
@classmethod
def find_islandpick(cls, ext_id, genomes, min_gi_size):
min_gi_size = int(min_gi_size)
if settings.DEBUG:
print "Testing for existing islandpick using, ext_id {}, using: ".format(ext_id)
print "Looking for min_gi_size: {} and genomes {}".format(min_gi_size, genomes)
analysis = Analysis.objects.filter(ext_id = ext_id)
for a in analysis:
a_parameters_json = None
for task in a.tasks.all():
if task.prediction_method == 'Islandpick':
a_parameters_json = task.parameters
break
# We found an Islandpick in that analysis
if a_parameters_json:
if settings.DEBUG:
print "Checking Islandpick in analysis {}".format(a.aid)
a_parameters = json.loads(a_parameters_json)
if settings.DEBUG:
print "Found parameters:"
pprint.pprint(a_parameters)
# First check we have the right fields...
if 'comparison_genomes' not in a_parameters or 'MIN_GI_SIZE' not in a_parameters:
if settings.DEBUG:
print "Either comparison_genomes or min_gi_size aren't in the db for analysis {}, skipping".format(a.aid)
continue
# Next check the comparison genomes
if sorted(a_parameters['comparison_genomes'].split(' ')) != sorted(genomes):
if settings.DEBUG:
print "comparison_genomes for analysis {} don't match, skipping".format(a.aid)
continue
# Finally, does the min_gi_size match?
if int(a_parameters['MIN_GI_SIZE']) != min_gi_size:
if settings.DEBUG:
print "min_gi_size for analysis {} doesn't match, skipping".format(a.aid)
continue
# We made it this far, we must have a match, return this aid
return (a.aid, a.token)
# We exited the loop without returning, we must not have
# a match, return None
return None
def find_reference_genome(self):
a_parameters_json = None
for task in self.tasks.all():
if task.prediction_method == 'Prepare':
a_parameters_json = task.parameters
break
# We found a Prepare task, let's see if it has a ref_accnum
if a_parameters_json:
a_parameters = json.loads(a_parameters_json)
if settings.DEBUG:
print "Found parameters:"
pprint.pprint(a_parameters)
if 'ref_accnum' in a_parameters:
return a_parameters['ref_accnum']
return None
@classmethod
def lookup_genome(cls, accnum):
try:
float(accnum)
except ValueError:
# It's not a custom genome...
genome = NameCache.objects.get(cid=accnum)
return genome
# It's a custom genome
genome = CustomGenome.objects.get(cid=accnum)
return genome
@classmethod
def last_modified(cls, request, aid):
return Analysis.objects.get(aid=aid).complete_date
class Meta:
db_table = "Analysis"
class GIAnalysisTask(models.Model):
taskid = models.AutoField(primary_key=True)
aid = models.ForeignKey(Analysis, related_name='tasks')
prediction_method = models.CharField(max_length=15)
status = models.IntegerField(choices=STATUS_CHOICES,
default=STATUS['PENDING'])
parameters = models.CharField(max_length=15)
start_date = models.DateTimeField('date started')
complete_date = models.DateTimeField('date completed')
@classmethod
def fetch_parameters(cls, aid, method):
try:
if settings.DEBUG:
print "Checking method {} in analysis {}".format(method,aid)
task = GIAnalysisTask.objects.filter(aid=aid, prediction_method=method)
a_parameters_json = task.parameters
a_parameters = json.loads(a_parameters_json)
if settings.DEBUG:
print "Found parameters:"
pprint.pprint(a_parameters)
except Exception as e:
if settings.DEBUG:
print e
raise e
return a_parameters
class Meta:
db_table = "GIAnalysisTask"
class GenomicIsland(models.Model):
gi = models.AutoField(primary_key=True)
aid = models.ForeignKey(Analysis)
start = models.IntegerField(default=0)
end = models.IntegerField(default=0)
prediction_method = models.CharField(max_length=15)
details = models.CharField(max_length=20)
@classmethod
def sqltodict(cls, query,param):
from django.db import connection
cursor = connection.cursor()
cursor.execute(query,param)
fieldnames = [name[0] for name in cursor.description]
result = []
for row in cursor.fetchall():
rowset = []
for field in zip(fieldnames, row):
rowset.append(field)
result.append(dict(rowset))
return result
class Meta:
db_table = "GenomicIsland"
class GC(models.Model):
ext_id = models.CharField(primary_key=True,max_length=15)
min = models.FloatField()
max = models.FloatField()
mean = models.FloatField()
gc = models.TextField()
class Meta:
db_table = "GC"
class Genes(models.Model):
ext_id = models.CharField(max_length=15)
start = models.IntegerField(default=0)
end = models.IntegerField(default=0)
strand = models.IntegerField()
name = models.CharField(max_length=14)
gene = models.CharField(max_length=10)
product = models.CharField(max_length=100)
locus = models.CharField(max_length=10)
class Meta:
db_table = "Genes"
class IslandGenes(models.Model):
gi = models.IntegerField()
gene = models.ForeignKey(Genes)
class Meta:
db_table = "IslandGenes"
class Distance(models.Model):
rep_accnum1 = models.CharField(max_length=15)
rep_accnum2 = models.CharField(max_length=15)
distance = models.FloatField()
@classmethod
def find_genomes(cls, accnum, *args, **kwargs):
#pprint.pprint(kwargs)
if 'min_cutoff' in kwargs:
min_cutoff = kwargs['min_cutoff']
else:
min_cutoff = 0.1
if 'max_cutoff' in kwargs:
max_cutoff = kwargs['max_cutoff']
else:
max_cutoff = 0.42
params = [accnum, accnum, min_cutoff, max_cutoff]
sql = "SELECT id, rep_accnum1, rep_accnum2, distance from Distance WHERE (rep_accnum1 = %s or rep_accnum2 = %s) AND "
sql_dist = "(distance >= %s AND distance <= %s)"
if 'extra_genomes' in kwargs:
rep_list = ','.join("'" + rep + "'" for rep in kwargs['extra_genomes'])
sql += "(" + sql_dist + " OR (rep_accnum1 IN ({}) OR rep_accnum2 IN ({})))".format(rep_list, rep_list)
else:
sql += sql_dist
#sql += ' ORDER BY distance'
dists = Distance.objects.raw(sql, params)
#dists = Distance.objects.filter(models.Q(rep_accnum1=accnum) | models.Q(rep_accnum2=accnum), distance__gte=min_cutoff, distance__lte=max_cutoff).order_by('distance')
genomes = [(g.rep_accnum1, g.distance) if g.rep_accnum1 != accnum else (g.rep_accnum2, g.distance) for g in dists]
return genomes
@classmethod
def distance_matrix(cls, cluster_list):
print cluster_list
dists = Distance.objects.filter(rep_accnum1__in=cluster_list, rep_accnum2__in=cluster_list)
distance_pairs = {g.rep_accnum1 + '_' + g.rep_accnum2: g.distance for g in dists.all()}
matrix = []
for i in range(0,len(cluster_list)):
matrix_iteration = []
for j in range(0,i+1):
if i == j:
matrix_iteration.append(0)
elif cluster_list[i] + '_' + cluster_list[j] in distance_pairs:
matrix_iteration.append(distance_pairs[cluster_list[i] + '_' + cluster_list[j]])
elif cluster_list[j] + '_' + cluster_list[i] in distance_pairs:
matrix_iteration.append(distance_pairs[cluster_list[j] + '_' + cluster_list[i]])
else:
raise("Error, can't find pair!")
matrix.append(matrix_iteration)
#print matrix_iteration
cluster_list = [s.encode('ascii', 'ignore') for s in cluster_list]
matrix_obj = _DistanceMatrix(names=cluster_list, matrix=matrix)
constructor = DistanceTreeConstructor()
tree = constructor.nj(matrix_obj)
tree.ladderize()
#Phylo.draw_ascii(tree)
output = StringIO.StringIO()
Phylo.write(tree, output, 'newick')
tree_str = output.getvalue()
#print tree_str
return tree_str
class Meta:
db_table = "Distance"
class DistanceAttempts(models.Model):
rep_accnum1 = models.CharField(max_length=15)
rep_accnum2 = models.CharField(max_length=15)
status = models.IntegerField()
run_date = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = "DistanceAttempts"
class UploadGenome(models.Model):
id = models.AutoField(primary_key=True)
filename = models.CharField(max_length=120)
ip_addr = models.GenericIPAddressField()
genome_name = models.CharField(max_length=40)
email = models.EmailField()
cid = models.IntegerField(default=0)
date_uploaded = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = "UploadGenome"
class Notification(models.Model):
analysis = models.ForeignKey(Analysis, related_name='notifications')
email = models.EmailField()
status = models.IntegerField(default=0)
class Meta:
db_table = "Notification"
class SiteStatus(models.Model):
status = models.IntegerField(default=0, primary_key=True)
message = models.CharField(max_length=500)
class Meta:
managed = False
db_table = 'SiteStatus'
class Virulence(models.Model):
protein_accnum = models.CharField(max_length=18,primary_key=True)
external_id = models.CharField(max_length=18)
source = models.CharField(max_length=4, blank=True)
type = models.CharField(max_length=20, blank=False)
class Meta:
managed = False
db_table = 'virulence'
'''
MicrobeDB models
'''
class Genomeproject(models.Model):
gpv_id = models.IntegerField(primary_key=True)
assembly_accession = models.CharField(max_length=20)
asm_name = models.CharField(max_length=24)
genome_name = models.TextField()
version_id = models.IntegerField()
bioproject = models.CharField(max_length=14)
biosample = models.CharField(max_length=14)
taxid = models.IntegerField(blank=True, null=True)
species_taxid = models.IntegerField(blank=True, null=True)
org_name = models.TextField(blank=True)
infraspecific_name = models.CharField(max_length=24, null=True)
submitter = models.TextField(blank=True)
release_date = models.DateField(blank=True, null=True)
gpv_directory = models.TextField(blank=True)
filename = models.CharField(max_length=75)
file_types = models.TextField(blank=True)
prev_gpv = models.IntegerField()
class Meta:
managed = False
db_table = 'genomeproject'
class Genomeproject_Checksum(models.Model):
version_id = models.IntegerField(primary_key=True)
filename = models.CharField(max_length=64)
checksum = models.CharField(max_length=32)
gpv_id = models.IntegerField()
class Meta:
managed = False
db_table = 'genomeproject_checksum'
class Genomeproject_Meta(models.Model):
gpv_id = models.IntegerField(primary_key=True)
gram_stain = models.CharField(max_length=7, blank=True)
genome_gc = models.FloatField(blank=True, null=True)
patho_status = models.CharField(max_length=11, blank=True)
disease = models.TextField(blank=True)
genome_size = models.FloatField(blank=True, null=True)
pathogenic_in = models.TextField(blank=True)
temp_range = models.CharField(max_length=17, blank=True)
habitat = models.CharField(max_length=15, blank=True)
shape = models.TextField(blank=True)
arrangement = models.TextField(blank=True)
endospore = models.CharField(max_length=7, blank=True)
motility = models.CharField(max_length=7, blank=True)
salinity = models.TextField(blank=True)
oxygen_req = models.CharField(max_length=15, blank=True)
centre = models.TextField(blank=True)
chromosome_num = models.IntegerField(blank=True, null=True)
plasmid_num = models.IntegerField(blank=True, null=True)
contig_num = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'genomeproject_meta'
class Replicon(models.Model):
rpv_id = models.IntegerField(primary_key=True)
gpv_id = models.IntegerField()
version_id = models.IntegerField()
rep_accnum = models.CharField(max_length=20, blank=True)
rep_version = models.IntegerField()
definition = models.TextField(blank=True)
rep_type = models.CharField(max_length=10, blank=True)
rep_ginum = models.TextField(blank=True)
file_name = models.TextField(blank=True)
file_types = models.TextField(blank=True)
cds_num = models.IntegerField(blank=True, null=True)
gene_num = models.IntegerField(blank=True, null=True)
rep_size = models.IntegerField(blank=True, null=True)
rna_num = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'replicon'
class Taxonomy(models.Model):
taxon_id = models.IntegerField(primary_key=True)
superkingdom = models.TextField(blank=True)
phylum = models.TextField(blank=True)
class_field = models.TextField(db_column='class', blank=True) # Field renamed because it was a Python reserved word.
order = models.TextField(blank=True)
family = models.TextField(blank=True)
genus = models.TextField(blank=True)
species = models.TextField(blank=True)
other = models.TextField(blank=True)
synonyms = models.TextField(blank=True)
class Meta:
managed = False
db_table = 'taxonomy'
class Version(models.Model):
version_id = models.IntegerField(primary_key=True)
dl_directory = models.TextField(blank=True)
version_date = models.DateField()
used_by = models.TextField(blank=True)
is_current = models.IntegerField()
class Meta:
managed = False
db_table = 'version'
|
lairdm/islandviewer-ui
|
webui/models.py
|
Python
|
gpl-3.0
| 17,795
|
[
"BLAST"
] |
24e7b7f0064feaa64cf3dda319b95dfebdd167b8863a6011b7d9519c766f6c10
|
# Orca
#
# Copyright 2005-2008 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Provides debug utilities for Orca. Debugging is managed by a debug
level, which is held in the debugLevel field. All other methods take
a debug level, which is compared to the current debug level to
determine if the content should be output."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2008 Sun Microsystems Inc."
__license__ = "LGPL"
import inspect
import time
import traceback
import os
import pyatspi
import subprocess
import sys
# Used to turn off all debugging.
#
LEVEL_OFF = 10000
# Used to describe events of considerable importance and which will prevent
# normal program execution.
#
LEVEL_SEVERE = 1000
# Used to decribe events of interest to end users or system managers or which
# indicate potential problems, but which Orca can deal with without crashing.
#
LEVEL_WARNING = 900
# Used to indicate reasonably significant messages that make sense to end users
# and system managers.
#
# For the purposes of Orca, LEVEL_INFO means display the text being sent to
# speech and braille.
#
LEVEL_INFO = 800
# Used to indicate static configuration information to assist in debugging
# problems that may be associated with a particular configuration.
#
# For the purposes of Orca, LEVEL_CONFIGURATION means display the various
# apsects of whether a particular feature (e.g., speech, braille, etc.)
# is enabled or not as well as details about that feature.
#
LEVEL_CONFIGURATION = 700
# Used for lowest volume of detailed tracing information.
#
# For the purposes of Orca, this is braille and keyboard input, script
# activation and deletion, locus of focus changes, and visual changes
# to the locus of focus.
#
LEVEL_FINE = 600
# Used for medium volume of detailed tracing information.
#
# For the purposes of Orca, this is for debugging speech and braille
# generators and tracking the synthesis of device events.
#
LEVEL_FINER = 500
# Used for maximum volume of detailed tracing information.
#
# For the purposes of Orca, this is for tracking all AT-SPI object
# events. NOTE that one can up the debug level of AT-SPI object
# events by setting the eventDebugLevel. In addition, one can filter
# events by setting eventDebugFilter to a regular expression that
# matches event type names.
#
LEVEL_FINEST = 400
# Used for all detailed tracing information, even finer than LEVEL_FINEST
#
LEVEL_ALL = 0
debugLevel = LEVEL_SEVERE
# The debug file. If this is not set, then all debug output is done
# via stdout. If this is set, then all debug output is sent to the
# file. This can be useful for debugging because one can pass in a
# non-buffered file to better track down hangs.
#
debugFile = None
# The debug filter should be either None (which means to match all
# events) or a compiled regular expression from the 're' module (see
# http://www.amk.ca/python/howto/regex/). The regular expression will
# be used as a matching function - if the event type creates a match
# in the regular expression, then it will be considered for output. A
# typical call to this method might look like:
#
# debug.eventDebugFilter = rc.compile('focus:|window:activate')
#
eventDebugLevel = LEVEL_FINEST
eventDebugFilter = None
# If True, we output debug information for the event queue. We
# use this in addition to log level to prevent debug logic from
# bogging down event handling.
#
debugEventQueue = False
# What module(s) should be traced if traceit is being used. By default
# we'll just attend to ourself. (And by default, we will not enable
# traceit.) Note that enabling this functionality will drag your system
# to a complete and utter halt and should only be used in extreme
# desperation by developers who are attempting to reproduce a very
# specific, immediate issue. Trust me. :-) Disabling braille monitor in
# this case is also strongly advised.
#
TRACE_MODULES = ['orca']
# Specific modules to ignore with traceit.
#
TRACE_IGNORE_MODULES = ['traceback', 'linecache', 'locale', 'gettext',
'logging', 'UserDict', 'encodings', 'posixpath',
'genericpath', 're']
# Specific apps to trace with traceit.
#
TRACE_APPS = []
# What AT-SPI event(s) should be traced if traceit is being used. By
# default, we'll trace everything. Examples of what you might wish to
# do to narrow things down include:
#
# TRACE_EVENTS = ['object:state-changed', 'focus:']
# (for any and all object:state-changed events plus focus: events)
# TRACE_EVENTS = ['object:state-changed:selected']
# (if you know the exact event type of interest)
#
TRACE_EVENTS = []
# What pyatspi role(s) should be traced if traceit is being used. By
# default, we'll trace everything. An example of what you might wish
# to do to narrow things down, if you know buttons trigger the problem:
#
# TRACE_ROLES = [pyatspi.ROLE_PUSH_BUTTON, pyatspi.ROLE_TOGGLE_BUTTON]
#
TRACE_ROLES = []
# Whether or not traceit should only trace the work being done when
# processing an actual event. This is when most bad things happen.
# So we'll default to True.
#
TRACE_ONLY_PROCESSING_EVENTS = True
objEvent = None
def printException(level):
"""Prints out information regarding the current exception.
Arguments:
- level: the accepted debug level
"""
if level >= debugLevel:
println(level)
traceback.print_exc(100, debugFile)
println(level)
def printStack(level):
"""Prints out the current stack.
Arguments:
- level: the accepted debug level
"""
if level >= debugLevel:
println(level)
traceback.print_stack(None, 100, debugFile)
println(level)
def println(level, text="", timestamp=False):
"""Prints the text to stderr unless debug is enabled.
If debug is enabled the text will be redirected to the
file debugFile.
Arguments:
- level: the accepted debug level
- text: the text to print (default is a blank line)
"""
if level >= debugLevel:
text = text.replace("\ufffc", "[OBJ]")
if timestamp:
text = "%s - %s" % (time.strftime("%H:%M:%S"), text)
if debugFile:
try:
debugFile.writelines([text, "\n"])
except TypeError:
text = "TypeError when trying to write text"
debugFile.writelines([text, "\n"])
except:
text = "Exception when trying to write text"
debugFile.writelines([text, "\n"])
else:
try:
sys.stderr.writelines([text, "\n"])
except TypeError:
text = "TypeError when trying to write text"
sys.stderr.writelines([text, "\n"])
except:
text = "Exception when trying to write text"
sys.stderr.writelines([text, "\n"])
def printResult(level, result=None):
"""Prints the return result, along with information about the
method, arguments, and any errors encountered."""
if level < debugLevel:
return
stack = inspect.stack()
current, prev = stack[1], stack[2]
frame = current[0]
# To better print arguments which are accessible objects
args = inspect.getargvalues(frame)
for key, value in list(args.locals.items()):
args.locals[key] = str(value)
fArgs = str.replace(inspect.formatargvalues(*args), "'", "")
callString = 'CALL: %s.%s (line %s) -> %s.%s%s' % (
inspect.getmodulename(prev[1]), prev[3], prev[2],
inspect.getmodulename(current[1]), current[3], fArgs)
string = '%s\n%s %s' % (callString, 'RESULT:', result)
println(level, '%s' % string)
def printObjectEvent(level, event, sourceInfo=None, timestamp=False):
"""Prints out an Python Event object. The given level may be
overridden if the eventDebugLevel is greater. Furthermore, only
events with event types matching the eventDebugFilter regular
expression will be printed.
Arguments:
- level: the accepted debug level
- event: the Python Event to print
- sourceInfo: additional string to print out
"""
if eventDebugFilter and not eventDebugFilter.match(event.type):
return
level = max(level, eventDebugLevel)
text = "OBJECT EVENT: %s (%d, %d, %s)" \
% (event.type, event.detail1, event.detail2, event.any_data)
println(level, text, timestamp)
if sourceInfo:
println(level, " %s" % sourceInfo, timestamp)
def printInputEvent(level, string, timestamp=False):
"""Prints out an input event. The given level may be overridden
if the eventDebugLevel (see setEventDebugLevel) is greater.
Arguments:
- level: the accepted debug level
- string: the string representing the input event
"""
println(max(level, eventDebugLevel), string, timestamp)
def printDetails(level, indent, accessible, includeApp=True, timestamp=False):
"""Lists the details of the given accessible with the given
indentation.
Arguments:
- level: the accepted debug level
- indent: a string containing spaces for indentation
- accessible: the accessible whose details are to be listed
- includeApp: if True, include information about the app
"""
if level >= debugLevel and accessible:
println(level,
getAccessibleDetails(level, accessible, indent, includeApp),
timestamp)
def getAccessibleDetails(level, acc, indent="", includeApp=True):
"""Returns a string, suitable for printing, that describes the
given accessible.
Arguments:
- indent: A string to prefix the output with
- includeApp: If True, include information about the app
for this accessible.
"""
if level < debugLevel:
return ""
if includeApp:
try:
app = acc.getApplication()
except:
string = indent + "app=(exception getting app) "
app = None
else:
if app:
try:
string = indent + "app.name='%s' " % app.name
except (LookupError, RuntimeError):
string = indent + "app.name='(exception getting name)' "
else:
string = indent + "app=None "
else:
string = indent
# create the States string
try:
stateSet = acc.getState()
except:
string += "(exception getting state set)"
try:
states = stateSet.getStates()
except:
string += "(exception getting states)"
states = []
state_strings = []
for state in states:
state_strings.append(pyatspi.stateToString(state))
state_string = ' '.join(state_strings)
# create the relations string
try:
relations = acc.getRelationSet()
except:
string += "(exception getting relation set)"
relations = None
if relations:
relation_strings = []
for relation in relations:
relation_strings.append( \
pyatspi.relationToString(relation.getRelationType()))
rel_string = ' '.join(relation_strings)
else:
rel_string = ''
try:
iface_string = " ".join(pyatspi.utils.listInterfaces(acc))
except:
iface_string = "(exception calling listInterfaces)"
try:
string += "name='%s' role='%s' state='%s' \n%srelations='%s' interfaces='%s'" \
% (acc.name or 'None', acc.getRoleName(),
state_string, indent, rel_string, iface_string)
except:
string += "(exception fetching data)"
return string
# The following code originated from the following URL:
#
# http://www.dalkescientific.com/writings/diary/archive/ \
# 2005/04/20/tracing_python_code.html
#
import linecache
def _getFileAndModule(frame):
filename, module = None, None
try:
filename = frame.f_globals["__file__"]
module = frame.f_globals["__name__"]
except:
pass
else:
if (filename.endswith(".pyc") or filename.endswith(".pyo")):
filename = filename[:-1]
return filename, module
def _shouldTraceIt():
if not objEvent:
return not TRACE_ONLY_PROCESSING_EVENTS
eventSource = objEvent.source
if TRACE_APPS:
app = objEvent.host_application or eventSource.getApplication()
try:
app = objEvent.host_application or eventSource.getApplication()
except:
pass
else:
if not app.name in TRACE_APPS:
return False
if TRACE_ROLES and not eventSource.getRole() in TRACE_ROLES:
return False
if TRACE_EVENTS and \
not [x for x in map(objEvent.type.startswith, TRACE_EVENTS) if x]:
return False
return True
def traceit(frame, event, arg):
"""Line tracing utility to output all lines as they are executed by
the interpreter. This is to be used by sys.settrace and is for
debugging purposes.
Arguments:
- frame: is the current stack frame
- event: 'call', 'line', 'return', 'exception', 'c_call', 'c_return',
or 'c_exception'
- arg: depends on the event type (see docs for sys.settrace)
"""
if not _shouldTraceIt():
return None
filename, module = _getFileAndModule(frame)
if not (filename and module):
return traceit
if module in TRACE_IGNORE_MODULES:
return traceit
if TRACE_MODULES and not module.split('.')[0] in TRACE_MODULES:
return traceit
if not event in ['call', 'line', 'return']:
return traceit
lineno = frame.f_lineno
line = linecache.getline(filename, lineno).rstrip()
output = 'TRACE %s:%s: %s' % (module, lineno, line)
if event == 'call':
argvals = inspect.getargvalues(frame)
keys = [x for x in argvals[0] if x != 'self']
try:
values = list(map(argvals[3].get, keys))
except TypeError:
if len(keys) == 1 and isinstance(keys[0], list):
values = list(map(argvals[3].get, keys[0]))
else:
return traceit
for i, key in enumerate(keys):
output += '\n ARG %s=%s' % (key, values[i])
lineElements = line.strip().split()
if lineElements and lineElements[0] == 'return':
if event == 'line':
return traceit
output = '%s (rv: %s)' % (output, arg)
println(LEVEL_ALL, output)
return traceit
def getOpenFDCount(pid):
procs = subprocess.check_output([ 'lsof', '-w', '-Ff', '-p', str(pid)])
procs = procs.decode('UTF-8').split('\n')
files = list(filter(lambda s: s and s[0] == 'f' and s[1:].isdigit(), procs))
return len(files)
def getCmdline(pid):
try:
openFile = os.popen('cat /proc/%s/cmdline' % pid)
cmdline = openFile.read()
openFile.close()
except:
cmdline = '(Could not obtain cmdline)'
cmdline = cmdline.replace('\x00', ' ')
return cmdline
def pidOf(procName):
openFile = subprocess.Popen('pgrep %s' % procName,
shell=True,
stdout=subprocess.PIPE).stdout
pids = openFile.read()
openFile.close()
return [int(p) for p in pids.split()]
def examineProcesses():
desktop = pyatspi.Registry.getDesktop(0)
println(LEVEL_ALL, 'INFO: Desktop has %i apps:' % desktop.childCount)
for i, app in enumerate(desktop):
pid = app.get_process_id()
cmd = getCmdline(pid)
fds = getOpenFDCount(pid)
try:
name = app.name
except:
name = 'ERROR: Could not get name'
else:
if name == '':
name = 'WARNING: Possible hang'
println(LEVEL_ALL, '%3i. %s (pid: %s) %s file descriptors: %i' \
% (i+1, name, pid, cmd, fds))
# Other 'suspect' processes which might not show up as accessible apps.
otherApps = ['apport']
for app in otherApps:
pids = pidOf(app)
if not pids:
println(LEVEL_ALL, 'INFO: no pid for %s' % app)
continue
for pid in pids:
cmd = getCmdline(pid)
fds = getOpenFDCount(pid)
println(LEVEL_ALL, 'INFO: %s (pid: %s) %s file descriptors: %i' \
% (app, pid, cmd, fds))
|
chrys87/orca-beep
|
src/orca/debug.py
|
Python
|
lgpl-2.1
| 17,180
|
[
"ORCA"
] |
9ddf4f8cf945271e489c31637fee5a11ec82a6a09b3e1c582274cc54b4598064
|
import datetime
import sys
import numpy as np
import mygis
from bunch import Bunch
def write_file(date,info,erai):
"""writes ERAi input data to a netcdf file"""
filename=str(date).replace(" ","_")
dims = ("time", "level","lat","lon")
dims2dt = ("time", "lat","lon")
extra_vars=[]
# 3D variables
# cloud,ice,qv,u,v,t,p, z
# 2D variables
# hgt,latent_heat,PBL_height,sensible_heat,sfc_hgt (sfc_hgt not used currently should be ~the same as hgt)
# 1D variables / coordinates
# lat, lon
atts=Bunch(long_name="Cloud liquid water content",units="kg kg**-1")
extra_vars.append(Bunch(name="cloud",data=erai["cloud"],dims=dims,dtype="f",attributes=atts))
atts=Bunch(long_name="Cloud ice water content",units="kg kg**-1")
extra_vars.append(Bunch(name="ice",data=erai["ice"],dims=dims,dtype="f",attributes=atts))
# used as primary variable in io.write
# atts=Bunch(long_name="Specific Humidity",units="kg kg**-1")
# extra_vars.append(Bunch(name="qv",data=erai["qv"],dims=dims,dtype="f",attributes=atts))
atts=Bunch(long_name="U (E/W) wind speed",units="m s**-1")
extra_vars.append(Bunch(name="u",data=erai["u"],dims=dims,dtype="f",attributes=atts))
atts=Bunch(long_name="V (N/S) wind speed",units="m s**-1")
extra_vars.append(Bunch(name="v",data=erai["v"],dims=dims,dtype="f",attributes=atts))
atts=Bunch(long_name="Potential Temperature",units="K")
extra_vars.append(Bunch(name="theta",data=erai["t"],dims=dims,dtype="f",attributes=atts))
atts=Bunch(long_name="Pressure",units="Pa")
extra_vars.append(Bunch(name="p",data=erai["p"],dims=dims,dtype="f",attributes=atts))
atts=Bunch(long_name="Atmospheric Elevation",units="m",positive="up")
extra_vars.append(Bunch(name="z",data=erai["z"],dims=dims,dtype="f",attributes=atts))
atts=Bunch(long_name="Topographic Height",units="m")
extra_vars.append(Bunch(name="hgt",data=erai["hgt"],dims=dims[2:],dtype="f",attributes=atts))
atts=Bunch(long_name="Surface solar radiation (downwards)",units="W m**-2")
extra_vars.append(Bunch(name="swdown",data=erai["sw"],dims=dims2dt,dtype="f",attributes=atts))
atts=Bunch(long_name="Surface longwave radiation (downwards)",units="W m**-2")
extra_vars.append(Bunch(name="lwdown",data=erai["lw"],dims=dims2dt,dtype="f",attributes=atts))
atts=Bunch(long_name="Surface Latent Heat flux (positive up)",units="W m**-2")
extra_vars.append(Bunch(name="latent_heat",data=erai["latent_heat"],dims=dims2dt,dtype="f",attributes=atts))
atts=Bunch(long_name="Surface Sensible Heat flux (positive up)",units="W m**-2")
extra_vars.append(Bunch(name="sensible_heat",data=erai["sensible_heat"],dims=dims2dt,dtype="f",attributes=atts))
atts=Bunch(long_name="Planetary Boundary Layer Height",units="m")
extra_vars.append(Bunch(name="PBL_height",data=erai["PBL_height"],dims=dims2dt,dtype="f",attributes=atts))
atts=Bunch(long_name="Skin Temperature",units="K")
extra_vars.append(Bunch(name="tskin",data=erai["tskin"],dims=dims2dt,dtype="f",attributes=atts))
atts=Bunch(long_name="Convective precipitation",units="mm")
extra_vars.append(Bunch(name="cp",data=erai["cp"],dims=dims2dt,dtype="f",attributes=atts))
atts=Bunch(long_name="latitude",units="degrees_north")
extra_vars.append(Bunch(name="lat",data=info.lat_data[:,0],dims=("lat",),dtype="f",attributes=atts))
atts=Bunch(long_name="longitude",units="degrees_east")
extra_vars.append(Bunch(name="lon",data=info.lon_data[0,:],dims=("lon",),dtype="f",attributes=atts))
time_since_1900 = date - datetime.datetime(1900,1,1,0,0,0)
time = time_since_1900.days + np.float64(time_since_1900.seconds/86400.0)
atts=Bunch(long_name="time",units="days since 1900-01-01", calendar="gregorian")
extra_vars.append(Bunch(name="time",data=time,dims=(dims[0],),dtype="d",attributes=atts))
qvatts=Bunch(long_name="Specific Humidity",units="kg kg**-1")
# write to output file
mygis.write(filename=filename,varname="qv",data=erai.qv,attributes=qvatts,dtype="f",dims=dims,
extravars=extra_vars,history=" Produced by erai2icar v."+info.version+" "+" ".join(sys.argv))
|
NCAR/icar
|
helpers/erai/output.py
|
Python
|
mit
| 4,218
|
[
"NetCDF"
] |
3c06cecd37199ba16383858082f315748fba25cd58d55ca61114997d9cbe65d1
|
########################################
# Read ECMWF netcdf files for heat fluxes
#
# Created by: Peter Willetts
# Created on: 12/06/2014
#
# ECMWF heat and radiation flux - Read from netcdf
# filter by date, latitude and longitude
# calculate mean and total heat flux
# BEWARE!!! ECMWF flux descrpitions may be the wrong way round, as well as upwards/downwards signs
# This script is deisgned to work with the total accumulated time-integrated fluxes at 0 timesteps - every 12 hours
# So average of 12 hourly accumulations in J/m2, divided by seconds, minutes and 12 hours gives Wm^-2
#
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
from netCDF4 import Dataset
import glob
import re
import os
import pickle
import datetime
#first_month=8
#first_day_of_month=
#last_month=9
#last_day=
time_min=datetime.datetime(2011,8,18,0,0,0,0)
time_max=datetime.datetime(2011,9,8,0,0,0,0)
lon_max = 116
lon_min = 34
lat_max= 40.
lat_min=-11.25
nc = Dataset('/nfs/a90/eepdw/Data/ERA_Iinterim_Heat_Rad_Fluxes/era_interim_netcdf_heat_rad_flux_evap_precip_00_timestep.nc')
hours_since=datetime.datetime(1900,1,1,0,0,0,0)
# Get min and max index positions for latitude and longitude
datetimes = np.array([datetime.timedelta(hours=float(i))+hours_since for i in nc.variables['time'][:]])
time_index= np.where((datetimes<=time_max) & (datetimes >= time_min))
la_index = np.where((nc.variables['latitude'][:]<=lat_max) & (nc.variables['latitude'][:] >= lat_min))
lo_index = np.where((nc.variables['longitude'][:]<=lon_max) & (nc.variables['longitude'][:] >= lon_min))
la_i_max = np.max(la_index)
la_i_min = np.min(la_index)
lo_i_max = np.max(lo_index)
lo_i_min = np.min(lo_index)
t_i_max = np.max(time_index)
t_i_min = np.min(time_index)
lat_amounts=la_i_max-la_i_min
lon_amounts=lo_i_max-lo_i_min
print nc
latent_in = nc.variables['slhf'][t_i_min:t_i_max+1,la_i_min:la_i_max+1, lo_i_min:lo_i_max+1]/(60*60*12)
sensible_in = nc.variables['sshf'][t_i_min:t_i_max+1,la_i_min:la_i_max+1, lo_i_min:lo_i_max+1]/(60*60*6)
#lwave_in = nc.variables['str'][t_i_min:t_i_max+1,la_i_min:la_i_max+1, lo_i_min:lo_i_max+1]/(60*60*6)
#swave_in = nc.variables['ssr'][t_i_min:t_i_max+1,la_i_min:la_i_max+1, lo_i_min:lo_i_max+1]/(60*60*6)
latitude_in = nc.variables['latitude'][la_index]
longitude_in = nc.variables['longitude'][lo_index]
time_in = datetimes[time_index]
##
latent_mean = -np.mean(latent_in, axis=0, dtype=np.float64)
sensible_mean = -np.mean(sensible_in, axis=0, dtype=np.float64)
swave_mean = np.mean(swave_in, axis=0, dtype=np.float64)
lwave_mean = np.mean(lwave_in, axis=0, dtype=np.float64)
# I don't think the ECMWF data is very well documented
# According to ECMWF descriptions 'swave_in' is solar (longwave - sounds wrong to me) downward radiation
# 'lwave_in is thermal (shortwave - again sounds wrong) upward radiation
# 'latent_mean' is upward
# 'sensible_mean' is upward
# From UM calc - pcubetotal = Downward shortwave + Downward longwave flux - Upward sensible - Upward latent heat
# Even though the ECMRWF latent/sensible heat flux is 'upwards', the sign's are opposite to those in the EMBRACE data etc
total_mean = swave_mean + lwave_mean - sensible_mean - latent_mean
np.save('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_latent_mean', latent_mean)
np.save('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_sensible_mean', sensible_mean)
np.save('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_swave_mean', swave_mean)
np.save('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_lwave_mean', lwave_mean)
np.save('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_total_mean', total_mean)
np.save('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_lats', latitude_in)
np.save('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_longs', longitude_in)
np.save('/nfs/a90/eepdw/Data/Saved_data/era_i/Fluxes/era_i_emb_times', time_in)
if '__name__' == '__netcdf_fileread___':
TRMM_fileread()
|
peterwilletts24/Python-Scripts
|
era_interim/netcdf_fileread_heat_fluxes.py
|
Python
|
mit
| 4,199
|
[
"NetCDF"
] |
298c26bcb8c97ec3b5c2a467d5ab96b96a78807c4c02f93c73f5dfbc2c19258e
|
import socket
import time
import sys
import subprocess
import codecs
from octopus.modules import dictdiffer
from unittest import TestCase
from octopus.lib import plugin
import os
class FunctionalTestServer(TestCase):
"""
FIXME: don't use this, it doesn't work. Leaving it here for later diagnosis.
"""
def setUp(self):
super(FunctionalTestServer, self).setUp()
if self.config and self.cfg_file and self.flask_app:
mod = plugin.load_module(self.flask_app)
make_config(self.config, self.cfg_file)
self.test_server = TestServer(port=None, index=None, python_app_module_path=os.path.abspath(mod.__file__), cfg_file=self.cfg_file)
self.test_server.spawn_with_config()
def tearDown(self):
super(FunctionalTestServer, self).tearDown()
self.test_server.terminate()
os.remove(self.cfg_file)
def make_config(cfg, filepath):
with codecs.open(filepath, "wb") as out:
for k, v in cfg.iteritems():
if isinstance(v, basestring):
# if the value is a string, wrap it in quotes
out.write(k + " = '" + v + "'\n")
else:
# otherwise it's probably an int, float or bool so just stringify it
out.write(k + " = " + str(v) + "\n")
# NOTE: this would not handle dicts and lists, so you might get errors, in which
# case you'll need to work out what to do next
def get_first_free_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0)) # let OS pick the first available port
free_port = sock.getsockname()[1] # which port did the OS pick?
sock.close()
return free_port
class TestServer(object):
def __init__(self, port, index, python_app_module_path='service/web.py', cfg_file=None):
self.port = port
self.index = index
self.python_app_module_path = python_app_module_path
self.cfg_file = cfg_file
self._process = None
def get_server_url(self):
"""
Return the url of the test server
"""
return 'http://localhost:{0}'.format(self.port)
def spawn(self):
# sys.executable is the full, absolute path to the current Python interpreter
# This is used so that the new process with the test app in it runs properly in a virtualenv.
self._process = subprocess.Popen([sys.executable, self.python_app_module_path, '--port', str(self.port), '--index', self.index, '--no-logging'])
# we must wait for the server to start listening
time.sleep(1)
def spawn_with_config(self):
self._process = subprocess.Popen([sys.executable, self.python_app_module_path, "--config", self.cfg_file])
# we must wait for the server to start listening
time.sleep(3)
def terminate(self):
if self._process:
self._process.terminate()
time.sleep(1)
def diff_dicts(d1, d2, d1_label='d1', d2_label='d2', print_unchanged=False):
"""
Diff two dictionaries - prints changed, added and removed keys and the changed values. DOES NOT DO NESTED DICTS!
:param d1: First dict - we compare this with d2
:param d2: Second dict - we compare against this one
:param d1_label: Will be used instead of "d1" in debugging output to make it more helpful.
:param d2_label: Will be used instead of "d2" in debugging output to make it more helpful.
:param print_unchanged: - should we print set of unchanged keys (can be long and useless). Default: False.
:return: nothing, prints results to STDOUT
"""
differ = dictdiffer.DictDiffer(d1, d2)
print 'Added :: keys present in {d1} which are not in {d2}'.format(d1=d1_label, d2=d2_label)
print differ.added()
print
print 'Removed :: keys present in {d2} which are not in {d1}'.format(d1=d1_label, d2=d2_label)
print differ.removed()
print
print 'Changed :: keys which are the same in {d1} and {d2} but whose values are different'.format(d1=d1_label, d2=d2_label)
print differ.changed()
print
if differ.changed():
print 'Changed values :: the values of keys which have changed. Format is as follows:'
print ' Key name:'
print ' value in {d1}'.format(d1=d1_label)
print ' value in {d2}'.format(d2=d2_label)
print
for key in differ.changed():
print ' ', key + ':'
print ' ', d1[key]
print ' ', d2[key]
print
print
if print_unchanged:
print 'Unchanged :: keys which are the same in {d1} and {d2} and whose values are also the same'.format(d1=d1_label, d2=d2_label)
print differ.unchanged()
|
JiscPER/magnificent-octopus
|
octopus/modules/test/helpers.py
|
Python
|
apache-2.0
| 4,753
|
[
"Octopus"
] |
320ddea1842946a9e84a6f05ee017fdd8385f1383b00e15ccaa5791124ccbf87
|
# Orca
#
# Copyright 2011 The Orca Team.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom script for xfwm4."""
from .script import Script
|
ruibarreira/linuxtrail
|
usr/lib/python3/dist-packages/orca/scripts/apps/xfwm4/__init__.py
|
Python
|
gpl-3.0
| 830
|
[
"ORCA"
] |
9a923140881b341155aaaa3cf78d55befc00473c877dbe2cab375dbbd4472085
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Reference-free tGBS related functions.
"""
import os.path as op
import logging
import sys
from collections import defaultdict
from itertools import combinations
from jcvi.formats.fasta import Fasta, SeqIO
from jcvi.formats.fastq import iter_fastq
from jcvi.formats.base import must_open, write_file
from jcvi.formats.bed import Bed, mergeBed
from jcvi.utils.counter import Counter
from jcvi.apps.cdhit import uclust, deduplicate
from jcvi.apps.base import OptionParser, ActionDispatcher, need_update, sh, iglob
class HaplotypeResolver (object):
def __init__(self, haplotype_set, maf=.1):
self.haplotype_set = haplotype_set
self.nind = len(haplotype_set)
self.notmissing = sum(1 for x in haplotype_set if x)
counter = Counter()
for haplotypes in haplotype_set:
counter.update(Counter(haplotypes))
self.counter = {}
for h, c in counter.items():
if c >= self.notmissing * maf:
self.counter[h] = c
def __str__(self):
return "N={0} M={1} C={2}".format(len(self.counter), \
self.notmissing, self.counter)
def solve(self, fw):
haplotype_counts = self.counter.items()
for (a, ai), (b, bi) in combinations(haplotype_counts, 2):
abi = sum(1 for haplotypes in self.haplotype_set \
if a in haplotypes and b in haplotypes)
pct = max(abi * 100 / ai, abi * 100 / bi)
print >> fw, a, b, "A={0}".format(ai), "B={0}".format(bi), \
"AB={0}".format(abi), "{0}%".format(pct), \
"compatible" if pct < 50 else ""
fw.flush()
alignsh = r"""
ls *.gz | sed 's/\..*//' | sort -u | \
awk '{{printf("SNP_Discovery-short.pl -native %s.*native.gz \
-o %s.SNPs_Het.txt -a 2 -ac 0.3 -c 0.8\n",$0,$0)}}' \
> SNP.call.sh
parallel -j {0} < SNP.call.sh
ls *.gz | sed 's/\..*//' | sort -u | \
awk '{{printf("extract_reference_alleles.pl --native %s.*native.gz \
--genotype %s.SNPs_Het.txt --allgenotypes *.SNPs_Het.txt \
--fasta {1} --output %s.equal\n",$0,$0,$0)}}' \
> SNP.equal.sh
parallel -j {0} < SNP.equal.sh
generate_matrix.pl --tables *SNPs_Het.txt --equal *equal \
--fasta {1} --output snps.matrix.txt
ls *.gz | sed 's/\..*//' | sort -u | \
awk '{{printf("count_reads_per_allele.pl -m snps.matrix.txt -s %s \
--native %s.*native.gz \
-o %s.SNPs_Het.allele_counts\n",$0,$0,$0)}}' \
> SNP.count.sh
parallel -j {0} < SNP.count.sh
"""
def main():
actions = (
('snp', 'run SNP calling on GSNAP output'),
('bam', 'convert GSNAP output to BAM'),
('novo', 'reference-free tGBS pipeline'),
('resolve', 'separate repeats on collapsed contigs'),
('count', 'count the number of reads in all clusters'),
('track', 'track and contrast read mapping in two bam files'),
('weblogo', 'extract base composition for reads'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def weblogo(args):
"""
%prog weblogo [fastafile|fastqfile]
Extract base composition for reads
"""
import numpy as np
from jcvi.utils.progressbar import ProgressBar, Percentage, Bar, ETA
p = OptionParser(weblogo.__doc__)
p.add_option("-N", default=10, type="int",
help="Count the first and last N bases")
p.add_option("--nreads", default=1000000, type="int",
help="Parse first N reads")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
N = opts.N
nreads = opts.nreads
pat = "ATCG"
L = np.zeros((4, N), dtype="int32")
R = np.zeros((4, N), dtype="int32")
p = dict((a, i) for (i, a) in enumerate(pat))
L4, R3 = Counter(), Counter()
widgets = ['Parse reads: ', Percentage(), ' ',
Bar(marker='>', left='[', right=']'), ' ', ETA()]
pr = ProgressBar(maxval=nreads, term_width=60, widgets=widgets).start()
k = 0
fw_L = open("L.fasta", "w")
fw_R = open("R.fasta", "w")
fastq = fastqfile.endswith(".fastq")
it = iter_fastq(fastqfile) if fastq else \
SeqIO.parse(must_open(fastqfile), "fasta")
for rec in it:
k += 1
if k % 1000 == 0:
pr.update(k)
if k > nreads:
break
if rec is None:
break
s = str(rec.seq)
for i, a in enumerate(s[:N]):
if a in p:
a = p[a]
L[a][i] += 1
for j, a in enumerate(s[-N:][::-1]):
if a in p:
a = p[a]
R[a][N - 1 - j] += 1
l4, r3 = s[:4], s[-3:]
L4[l4] += 1
R3[r3] += 1
print >> fw_L, ">{0}\n{1}".format(k, s[:N])
print >> fw_R, ">{0}\n{1}".format(k, s[-N:])
fw_L.close()
fw_R.close()
cmd = "weblogo -F png -s large -f {0}.fasta -o {0}.png"
cmd += " --color-scheme classic --composition none -U probability"
cmd += " --title {1}"
sh(cmd.format('L', "First_10_bases"))
sh(cmd.format('R', "Last_10_bases"))
np.savetxt("L.{0}.csv".format(pat), L, delimiter=',', fmt="%d")
np.savetxt("R.{0}.csv".format(pat), R, delimiter=',', fmt="%d")
fw = open("L4.common", "w")
for p, c in L4.most_common(N):
print >> fw, "\t".join((p, str(c)))
fw.close()
fw = open("R3.common", "w")
for p, c in R3.most_common(N):
print >> fw, "\t".join((p, str(c)))
fw.close()
def bed_store(bedfile, sorted=False):
bedfile = mergeBed(bedfile, s=True, nms=True, sorted=sorted)
bed = Bed(bedfile)
reads, reads_r = {}, defaultdict(list)
for b in bed:
target = "{0}:{1}".format(b.seqid, b.start)
for accn in b.accn.split(","):
reads[accn] = target
reads_r[target].append(accn)
return reads, reads_r
def contrast_stores(bed1_store_r, bed2_store, minreads=10, minpct=.1, prefix="AB"):
for target, reads in bed1_store_r.iteritems():
nreads = len(reads)
if nreads < minreads:
continue
good_mapping = max(minreads / 2, minpct * nreads)
bed2_targets = Counter(bed2_store.get(r) for r in reads)
c = dict((k, v) for (k, v) in bed2_targets.items() if v >= good_mapping)
ctag = "|".join("{0}({1})".format(k, v) for (k, v) in c.items())
print prefix, target, nreads, ctag, len(set(c.keys()) - set([None]))
def track(args):
"""
%prog track bed1 bed2
Track and contrast read mapping in two bam files.
"""
p = OptionParser(track.__doc__)
p.add_option("--sorted", default=False, action="store_true",
help="BED already sorted")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bed1, bed2 = args
sorted = opts.sorted
bed1_store, bed1_store_r = bed_store(bed1, sorted=sorted)
bed2_store, bed2_store_r = bed_store(bed2, sorted=sorted)
contrast_stores(bed1_store_r, bed2_store)
contrast_stores(bed2_store_r, bed1_store, prefix="BA")
def resolve(args):
"""
%prog resolve matrixfile fastafile bamfolder
Separate repeats along collapsed contigs. First scan the matrixfile for
largely heterozygous sites. For each heterozygous site, we scan each bam to
retrieve distinct haplotypes. The frequency of each haplotype is then
computed, the haplotype with the highest frequency, assumed to be
paralogous, is removed.
"""
import pysam
from collections import defaultdict
from itertools import groupby
p = OptionParser(resolve.__doc__)
p.add_option("--missing", default=.5, type="float",
help="Max level of missing data")
p.add_option("--het", default=.5, type="float",
help="Min level of heterozygous calls")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
matrixfile, fastafile, bamfolder = args
#f = Fasta(fastafile)
fp = open(matrixfile)
for row in fp:
if row[0] != '#':
break
header = row.split()
ngenotypes = len(header) - 4
nmissing = int(round(opts.missing * ngenotypes))
logging.debug("A total of {0} individuals scanned".format(ngenotypes))
logging.debug("Look for markers with < {0} missing and > {1} het".\
format(opts.missing, opts.het))
bamfiles = iglob(bamfolder, "*.bam")
logging.debug("Folder `{0}` contained {1} bam files".\
format(bamfolder, len(bamfiles)))
data = []
for row in fp:
if row[0] == '#':
continue
atoms = row.split()
seqid, pos, ref, alt = atoms[:4]
genotypes = atoms[4:]
c = Counter(genotypes)
c0 = c.get('0', 0)
c3 = c.get('3', 0)
if c0 >= nmissing:
continue
hetratio = c3 * 1. / (ngenotypes - c0)
if hetratio <= opts.het:
continue
pos = int(pos)
data.append((seqid, pos, ref, alt, c, hetratio))
data.sort()
logging.debug("A total of {0} target markers in {1} contigs.".\
format(len(data), len(set(x[0] for x in data))))
samfiles = [pysam.AlignmentFile(x, "rb") for x in bamfiles]
samfiles = [(op.basename(x.filename).split(".")[0], x) for x in samfiles]
samfiles.sort()
logging.debug("BAM files grouped to {0} individuals".\
format(len(set(x[0] for x in samfiles))))
fw = must_open(opts.outfile, "w")
for seqid, d in groupby(data, lambda x: x[0]):
d = list(d)
nmarkers = len(d)
logging.debug("Process contig {0} ({1} markers)".format(seqid, nmarkers))
haplotype_set = []
for pf, sf in groupby(samfiles, key=lambda x: x[0]):
haplotypes = []
for pfi, samfile in sf:
reads = defaultdict(list)
positions = []
for s, pos, ref, alt, c, hetratio in d:
for c in samfile.pileup(seqid):
if c.reference_pos != pos - 1:
continue
for r in c.pileups:
rname = r.alignment.query_name
rbase = r.alignment.query_sequence[r.query_position]
reads[rname].append((pos, rbase))
positions.append(pos)
for read in reads.values():
hap = ['-'] * nmarkers
for p, rbase in read:
hap[positions.index(p)] = rbase
hap = "".join(hap)
if "-" in hap:
continue
haplotypes.append(hap)
haplotypes = set(haplotypes)
haplotype_set.append(haplotypes)
hr = HaplotypeResolver(haplotype_set)
print >> fw, seqid, hr
hr.solve(fw)
def count(args):
"""
%prog count cdhit.consensus.fasta
Scan the headers for the consensus clusters and count the number of reads.
"""
from jcvi.graphics.histogram import stem_leaf_plot
from jcvi.utils.cbook import SummaryStats
p = OptionParser(count.__doc__)
p.add_option("--csv", help="Write depth per contig to file")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
csv = open(opts.csv, "w") if opts.csv else None
f = Fasta(fastafile, lazy=True)
sizes = []
for desc, rec in f.iterdescriptions_ordered():
if desc.startswith("singleton"):
sizes.append(1)
continue
# consensus_for_cluster_0 with 63 sequences
name, w, size, seqs = desc.split()
if csv:
print >> csv, "\t".join(str(x) for x in (name, size, len(rec)))
assert w == "with"
sizes.append(int(size))
if csv:
csv.close()
logging.debug("File written to `{0}`".format(opts.csv))
s = SummaryStats(sizes)
print >> sys.stderr, s
stem_leaf_plot(s.data, 0, 100, 20, title="Cluster size")
def novo(args):
"""
%prog novo reads.fastq
Reference-free tGBS pipeline.
"""
from jcvi.assembly.kmer import jellyfish, histogram
from jcvi.assembly.preprocess import diginorm
from jcvi.formats.fasta import filter as fasta_filter, format
from jcvi.apps.cdhit import filter as cdhit_filter
p = OptionParser(novo.__doc__)
p.add_option("--technology", choices=("illumina", "454", "iontorrent"),
default="iontorrent", help="Sequencing platform")
p.add_option("--dedup", choices=("uclust", "cdhit"),
default="cdhit", help="Dedup algorithm")
p.set_depth(depth=50)
p.set_align(pctid=96)
p.set_home("cdhit", default="/usr/local/bin/")
p.set_home("fiona", default="/usr/local/bin/")
p.set_home("jellyfish", default="/usr/local/bin/")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
cpus = opts.cpus
depth = opts.depth
pf, sf = fastqfile.rsplit(".", 1)
diginormfile = pf + ".diginorm." + sf
if need_update(fastqfile, diginormfile):
diginorm([fastqfile, "--single", "--depth={0}".format(depth)])
keepabund = fastqfile + ".keep.abundfilt"
sh("cp -s {0} {1}".format(keepabund, diginormfile))
jf = pf + "-K23.histogram"
if need_update(diginormfile, jf):
jellyfish([diginormfile, "--prefix={0}".format(pf),
"--cpus={0}".format(cpus),
"--jellyfish_home={0}".format(opts.jellyfish_home)])
genomesize = histogram([jf, pf, "23"])
fiona = pf + ".fiona.fa"
if need_update(diginormfile, fiona):
cmd = op.join(opts.fiona_home, "fiona")
cmd += " -g {0} -nt {1} --sequencing-technology {2}".\
format(genomesize, cpus, opts.technology)
cmd += " -vv {0} {1}".format(diginormfile, fiona)
logfile = pf + ".fiona.log"
sh(cmd, outfile=logfile, errfile=logfile)
dedup = opts.dedup
pctid = opts.pctid
cons = fiona + ".P{0}.{1}.consensus.fasta".format(pctid, dedup)
if need_update(fiona, cons):
if dedup == "cdhit":
deduplicate([fiona, "--consensus", "--reads",
"--pctid={0}".format(pctid),
"--cdhit_home={0}".format(opts.cdhit_home)])
else:
uclust([fiona, "--pctid={0}".format(pctid)])
filteredfile = pf + ".filtered.fasta"
if need_update(cons, filteredfile):
covfile = pf + ".cov.fasta"
cdhit_filter([cons, "--outfile={0}".format(covfile),
"--minsize={0}".format(depth / 5)])
fasta_filter([covfile, "50", "--outfile={0}".format(filteredfile)])
finalfile = pf + ".final.fasta"
if need_update(filteredfile, finalfile):
format([filteredfile, finalfile, "--sequential=replace",
"--prefix={0}_".format(pf)])
def bam(args):
"""
%prog snp input.gsnap ref.fasta
Convert GSNAP output to BAM.
"""
from jcvi.formats.sizes import Sizes
from jcvi.formats.sam import index
p = OptionParser(bam.__doc__)
p.set_home("eddyyeh")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gsnapfile, fastafile = args
EYHOME = opts.eddyyeh_home
pf = gsnapfile.rsplit(".", 1)[0]
uniqsam = pf + ".unique.sam"
if need_update((gsnapfile, fastafile), uniqsam):
cmd = op.join(EYHOME, "gsnap2gff3.pl")
sizesfile = Sizes(fastafile).filename
cmd += " --format sam -i {0} -o {1}".format(gsnapfile, uniqsam)
cmd += " -u -l {0} -p {1}".format(sizesfile, opts.cpus)
sh(cmd)
index([uniqsam])
def snp(args):
"""
%prog snp reference.fasta
Run SNP calling on GSNAP native output after apps.gsnap.align --snp. Files
*native.gz in the current folder will be used as input.
"""
p = OptionParser(snp.__doc__)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
ref, = args
runfile = "align.sh"
write_file(runfile, alignsh.format(opts.cpus, ref))
if __name__ == '__main__':
main()
|
sgordon007/jcvi_062915
|
projects/tgbs.py
|
Python
|
bsd-2-clause
| 16,624
|
[
"pysam"
] |
78721f800f700801332ab0c1574bff74b96a899282f91a06df5c659e6b08e117
|
"""Tests for core.groups.requires decorator
"""
import numpy as np
from numpy.testing import (
assert_,
assert_raises,
)
from MDAnalysis.core.groups import requires
from MDAnalysis import NoDataError
from MDAnalysisTests import make_Universe
class TestRequires(object):
def test_requires_failure_singular(self):
@requires('masses')
def mass_multiplier(ag1, ag2, scalar):
return (ag1.masses + ag2.masses) * scalar
u = make_Universe(('charges',))
assert_raises(NoDataError, mass_multiplier, u.atoms[:10], u.atoms[20:30], 4.0)
def test_requires_failure_multiple(self):
@requires('masses', 'charges')
def mass_multiplier(ag1, ag2, scalar):
return (ag1.masses + ag2.charges) * scalar
u = make_Universe(('masses', 'types'))
assert_raises(NoDataError, mass_multiplier, u.atoms[:10], u.atoms[20:30], 4.0)
def test_requires_success(self):
@requires('masses')
def mass_multiplier(ag1, ag2, scalar):
return (ag1.masses + ag2.masses) * scalar
u = make_Universe(('masses',))
result = mass_multiplier(u.atoms[:10], u.atoms[20:30], 4.0)
assert_(isinstance(result, np.ndarray))
def test_failure_errormessage(self):
# failures should list all required attributes not
# just the first one
@requires('cats', 'dogs', 'frogs')
def animal_print(ag):
return len(ag.cats), len(ag.dogs), len(ag.frogs)
u = make_Universe()
try:
animal_print(u.atoms)
except NoDataError as e:
message = e.args[0]
# Test function name gets returned (for debug)
assert_('animal_print' in message)
assert_('cats' in message)
assert_('dogs' in message)
assert_('frogs' in message)
else:
raise AssertionError("Should raise NoDataError")
|
alejob/mdanalysis
|
testsuite/MDAnalysisTests/core/test_requires.py
|
Python
|
gpl-2.0
| 1,949
|
[
"MDAnalysis"
] |
3ba5c14abec2ff824eb71fdf2dd2eb72da08af244dfe6d304bec32b7e19d5dab
|
from multiprocessing import Pool
import itertools
import time
import numpy as np
import os
import glob
import mdtraj as md
import fahmunge
import pandas as pd
import signal
import sys
# Reads in a list of project details from a CSV file with Core17/18 FAH projects and munges them.
projects = pd.read_csv("./projects.csv", index_col=0)
output_path = "/data/choderalab/fah/munged/"
num_processes = 10
def init_work():
signal.signal(signal.SIGINT, signal.SIG_IGN)
def munge(inputs):
project, location, pdb = inputs
print(project, location, pdb)
allatom_output_path = os.path.join(output_path, "all-atoms/", "%s/" % project)
protein_output_path = os.path.join(output_path, "no-solvent/", "%s/" % project)
fahmunge.automation.make_path(allatom_output_path)
fahmunge.automation.make_path(protein_output_path)
fahmunge.automation.merge_fah_trajectories(location, allatom_output_path, pdb)
fahmunge.automation.strip_water(allatom_output_path, protein_output_path)
if __name__ == "__main__":
print "Creating thread pool..."
pool = Pool(num_processes, init_work)
for iteration in itertools.count():
print "Starting asynchronous map operations..."
job = pool.map_async(munge, projects.itertuples())
while(not job.ready()):
try:
print "Sleeping for 10 seconds..."
time.sleep(10)
except KeyboardInterrupt:
print "Caught KeyboardInterrupt, terminating workers"
pool.terminate()
pool.join()
sys.exit(1)
output = job.get()
print output
print("Finished iteration %d, sleeping." % iteration)
time.sleep(3600)
|
steven-albanese/FAHMunge
|
scripts/munge_fah_data_parallel.py
|
Python
|
lgpl-2.1
| 1,726
|
[
"MDTraj"
] |
fe5f065e72b76a126c6321ac587c5875a8158528440638e34bcbc90e9370218e
|
#!/usr/bin/env python
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit http://feedparser.org/ for the latest version
Visit http://feedparser.org/docs/ for the latest documentation
Required: Python 2.1 or later
Recommended: Python 2.3 or later
Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "4.2-pre-" + "$Revision: 308 $"[11:14] + "-svn"
__license__ = """Copyright (c) 2002-2008, Mark Pilgrim, All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>",
"Sam Ruby <http://intertwingly.net/>",
"Ade Oshineye <http://blog.oshineye.com/>"]
_debug = 0
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# If you want feedparser to automatically resolve all relative URIs, set this
# to 1.
RESOLVE_RELATIVE_URIS = 1
# If you want feedparser to automatically sanitize all potentially unsafe
# HTML content, set this to 1.
SANITIZE_HTML = 1
# ---------- required modules (should come with any Python distribution) ----------
import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2
try:
from cStringIO import StringIO as _StringIO
except:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except:
gzip = None
try:
import zlib
except:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
from xml.sax.saxutils import escape as _xmlescape
_XML_AVAILABLE = 1
except:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except:
base64 = binascii = None
# cjkcodecs and iconv_codec provide support for more character encodings.
# Both are available from http://cjkpython.i18n.org/
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
if _debug:
import chardet.constants
chardet.constants._debug = 1
except:
chardet = None
# reversable htmlentitydefs mappings for Python 2.2
try:
from htmlentitydefs import name2codepoint, codepoint2name
except:
import htmlentitydefs
name2codepoint={}
codepoint2name={}
for (name,codepoint) in htmlentitydefs.entitydefs.iteritems():
if codepoint.startswith('&#'): codepoint=unichr(int(codepoint[2:-1]))
name2codepoint[name]=ord(codepoint)
codepoint2name[ord(codepoint)]=name
# BeautifulSoup parser used for parsing microformats from embedded HTML content
# http://www.crummy.com/software/BeautifulSoup/
# feedparser is tested with BeautifulSoup 3.0.x, but it might work with the
# older 2.x series. If it doesn't, and you can figure out why, I'll accept a
# patch and modify the compatibility statement accordingly.
try:
import BeautifulSoup
except:
BeautifulSoup = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
sgmllib.special = re.compile('<!')
sgmllib.charref = re.compile('&#(\d+|x[0-9a-fA-F]+);')
if sgmllib.endbracket.search(' <').start(0):
class EndBracketHack:
endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self,string,index=0):
m = self.endbracket.match(string,index)
if m is not None:
return EndBracketMatch(m)
else:
return None
class EndBracketMatch:
def __init__(self,match):
self.match = match
def start(self,n):
return self.match.end(n)
sgmllib.endbracket = EndBracketHack()
SUPPORTED_VERSIONS = {'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
'hotrss': 'Hot RSS'
}
try:
UserDict = dict
except NameError:
# Python 2.1 does not have dict
from UserDict import UserDict
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
class FeedParserDict(UserDict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['subtitle', 'summary'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
return UserDict.__getitem__(self, 'tags')[0]['term']
if key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
return [norel(link) for link in UserDict.__getitem__(self, 'links') if link['rel']=='enclosure']
if key == 'license':
for link in UserDict.__getitem__(self, 'links'):
if link['rel']=='license' and link.has_key('href'):
return link['href']
if key == 'categories':
return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')]
realkey = self.keymap.get(key, key)
if type(realkey) == types.ListType:
for k in realkey:
if UserDict.has_key(self, k):
return UserDict.__getitem__(self, k)
if UserDict.has_key(self, key):
return UserDict.__getitem__(self, key)
return UserDict.__getitem__(self, realkey)
def __setitem__(self, key, value):
for k in self.keymap.keys():
if key == k:
key = self.keymap[k]
if type(key) == types.ListType:
key = key[0]
return UserDict.__setitem__(self, key, value)
def get(self, key, default=None):
if self.has_key(key):
return self[key]
else:
return default
def setdefault(self, key, value):
if not self.has_key(key):
self[key] = value
return self[key]
def has_key(self, key):
try:
return hasattr(self, key) or UserDict.has_key(self, key)
except AttributeError:
return False
def __getattr__(self, key):
try:
return self.__dict__[key]
except KeyError:
pass
try:
assert not key.startswith('_')
return self.__getitem__(key)
except:
raise AttributeError, "object has no attribute '%s'" % key
def __setattr__(self, key, value):
if key.startswith('_') or key == 'data':
self.__dict__[key] = value
else:
return self.__setitem__(key, value)
def __contains__(self, key):
return self.has_key(key)
def zopeCompatibilityHack():
global FeedParserDict
del FeedParserDict
def FeedParserDict(aDict=None):
rc = {}
if aDict:
rc.update(aDict)
return rc
_ebcdic_to_ascii_map = None
def _ebcdic_to_ascii(s):
global _ebcdic_to_ascii_map
if not _ebcdic_to_ascii_map:
emap = (
0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
)
import string
_ebcdic_to_ascii_map = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(_ebcdic_to_ascii_map)
_cp1252 = {
unichr(128): unichr(8364), # euro sign
unichr(130): unichr(8218), # single low-9 quotation mark
unichr(131): unichr( 402), # latin small letter f with hook
unichr(132): unichr(8222), # double low-9 quotation mark
unichr(133): unichr(8230), # horizontal ellipsis
unichr(134): unichr(8224), # dagger
unichr(135): unichr(8225), # double dagger
unichr(136): unichr( 710), # modifier letter circumflex accent
unichr(137): unichr(8240), # per mille sign
unichr(138): unichr( 352), # latin capital letter s with caron
unichr(139): unichr(8249), # single left-pointing angle quotation mark
unichr(140): unichr( 338), # latin capital ligature oe
unichr(142): unichr( 381), # latin capital letter z with caron
unichr(145): unichr(8216), # left single quotation mark
unichr(146): unichr(8217), # right single quotation mark
unichr(147): unichr(8220), # left double quotation mark
unichr(148): unichr(8221), # right double quotation mark
unichr(149): unichr(8226), # bullet
unichr(150): unichr(8211), # en dash
unichr(151): unichr(8212), # em dash
unichr(152): unichr( 732), # small tilde
unichr(153): unichr(8482), # trade mark sign
unichr(154): unichr( 353), # latin small letter s with caron
unichr(155): unichr(8250), # single right-pointing angle quotation mark
unichr(156): unichr( 339), # latin small ligature oe
unichr(158): unichr( 382), # latin small letter z with caron
unichr(159): unichr( 376)} # latin capital letter y with diaeresis
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
try:
return urlparse.urljoin(base, uri)
except:
uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)])
return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
#Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
'http://search.yahoo.com/mrss/': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/':'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://www.w3.org/XML/1998/namespace': 'xml'
}
_matchnamespaces = {}
can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo']
can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
if _debug: sys.stderr.write('initializing FeedParser\n')
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or ''
self.lang = baselang or None
self.svgOK = 0
self.hasTitle = 0
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
def unknown_starttag(self, tag, attrs):
if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs))
# normalize attrs
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
if type(baseuri) != type(u''):
try:
baseuri = unicode(baseuri, self.encoding)
except:
baseuri = unicode(baseuri, 'iso-8859-1')
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
if tag in ['xhtml:div', 'div']: return # typepad does this 10/2007
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
if tag.find(':') <> -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
if tag == 'svg': self.svgOK += 1
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
# Since there's no handler or something has gone wrong we explicitly add the element and its attributes
unknown_tag = prefix + suffix
if len(attrsD) == 0:
# No attributes so merge it into the encosing dictionary
return self.push(unknown_tag, 1)
else:
# Has attributes so create it in its own dictionary
context = self._getContext()
context[unknown_tag] = attrsD
def unknown_endtag(self, tag):
if _debug: sys.stderr.write('end %s\n' % tag)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
if suffix == 'svg' and self.svgOK: self.svgOK -= 1
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
if self.svgOK: raise AttributeError()
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
if tag in ['xhtml:div', 'div']: return # typepad does this 10/2007
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack: return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack: return
if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref)
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities.keys():
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try: name2codepoint[ref]
except KeyError: text = '&%s;' % ref
else: text = unichr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack: return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if _debug: sys.stderr.write('entering parse_declaration\n')
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
# CDATA block began but didn't finish
k = len(self.rawdata)
return k
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
if k >= 0:
return k+1
else:
# We have an incomplete CDATA block.
return k
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text':
contentType = 'text/plain'
elif contentType == 'html':
contentType = 'text/html'
elif contentType == 'xhtml':
contentType = 'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
self.version = 'rss090'
if loweruri == 'http://purl.org/rss/1.0/' and not self.version:
self.version = 'rss10'
if loweruri == 'http://www.w3.org/2005/atom' and not self.version:
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if self._matchnamespaces.has_key(loweruri):
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack: return
if self.elementstack[-1][0] != element: return
element, expectingText, pieces = self.elementstack.pop()
if self.version == 'atom10' and self.contentparams.get('type','text') == 'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0: break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
output = ''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText: return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = base64.decodestring(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
if self.lookslikehtml(output):
self.contentparams['type']='text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish and RESOLVE_RELATIVE_URIS:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', 'text/html'))
# parse microformats
# (must do this before sanitizing because some microformats
# rely on elements that we sanitize)
if is_htmlish and element in ['content', 'description', 'summary']:
mfresults = _parseMicroformats(output, self.baseuri, self.encoding)
if mfresults:
for tag in mfresults.get('tags', []):
self._addTag(tag['term'], tag['scheme'], tag['label'])
for enclosure in mfresults.get('enclosures', []):
self._start_enclosure(enclosure)
for xfn in mfresults.get('xfn', []):
self._addXFN(xfn['relationships'], xfn['href'], xfn['name'])
vcard = mfresults.get('vcard')
if vcard:
self._getContext()['vcard'] = vcard
# sanitize embedded markup
if is_htmlish and SANITIZE_HTML:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', 'text/html'))
if self.encoding and type(output) != type(u''):
try:
output = unicode(output, self.encoding)
except:
pass
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding=='utf-8' and type(output) == type(u''):
try:
output = unicode(output.encode('iso-8859-1'), 'utf-8')
except:
pass
# map win-1252 extensions to the proper code points
if type(output) == type(u''):
output = u''.join([c in _cp1252.keys() and _cp1252[c] or c for c in output])
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
if element == 'title' and self.hasTitle:
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang: self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
def lookslikehtml(self, str):
if self.version.startswith('atom'): return
if self.contentparams.get('type','text/html') != 'text/plain': return
# must have a close tag or a entity reference to qualify
if not (re.search(r'</(\w+)>',str) or re.search("&#?\w+;",str)): return
# all tags must be in a restricted subset of valid HTML tags
if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
re.findall(r'</?(\w+)',str)): return
# all entities must have been defined as valid HTML entities
from htmlentitydefs import entitydefs
if filter(lambda e: e not in entitydefs.keys(),
re.findall(r'&(\w+);',str)): return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value):
context = self._getContext()
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
#If we're here then this is an RSS feed.
#If we don't have a version or have a version that starts with something
#other than RSS then there's been a mistake. Correct it.
if not self.version or not self.version.startswith('rss'):
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_dlhottitles(self, attrsD):
self.version = 'hotrss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
_start_feedinfo = _start_channel
def _cdf_common(self, attrsD):
if attrsD.has_key('lastmod'):
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if attrsD.has_key('href'):
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.hasTitle = 0
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.hasTitle = 0
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage:
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author: return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.replace('<>', '')
author = author.replace('<>', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, FeedParserDict())
if author:
context['%s_detail' % key]['name'] = author
if email:
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, 'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, 'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
self.hasTitle = 0
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
_start_product = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value))
_end_dcterms_issued = _end_published
_end_issued = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_pubdate = _start_updated
_start_dc_date = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_pubdate = _end_updated
_end_dc_date = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value))
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')))
def _start_cc_license(self, attrsD):
context = self._getContext()
value = self._getAttribute(attrsD, 'rdf:resource')
attrsD = FeedParserDict()
attrsD['rel']='license'
if value: attrsD['href']=value
context.setdefault('links', []).append(attrsD)
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
_start_creativeCommons_license = _start_creativecommons_license
def _end_creativecommons_license(self):
value = self.pop('license')
context = self._getContext()
attrsD = FeedParserDict()
attrsD['rel']='license'
if value: attrsD['href']=value
context.setdefault('links', []).append(attrsD)
del context['license']
_end_creativeCommons_license = _end_creativecommons_license
def _addXFN(self, relationships, href, name):
context = self._getContext()
xfn = context.setdefault('xfn', [])
value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name})
if value not in xfn:
xfn.append(value)
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label): return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(value)
def _start_category(self, attrsD):
if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD))
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split():
self._addTag(term, 'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value: return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
if attrsD['rel'] == 'self':
attrsD.setdefault('type', 'application/atom+xml')
else:
attrsD.setdefault('type', 'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
if attrsD.get('rel')=='enclosure' and not context.get('id'):
context['id'] = attrsD.get('href')
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
context['links'].append(FeedParserDict(attrsD))
if attrsD.has_key('href'):
expectingText = 0
if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
_start_producturl = _start_link
def _end_link(self):
value = self.pop('link')
context = self._getContext()
_end_producturl = _end_link
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
def _start_title(self, attrsD):
if self.svgOK: return self.unknown_starttag('title', attrsD.items())
self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
if self.svgOK: return
value = self.popContent('title')
if not value: return
context = self._getContext()
self.hasTitle = 1
_end_dc_title = _end_title
def _end_media_title(self):
hasTitle = self.hasTitle
self._end_title()
self.hasTitle = hasTitle
def _start_description(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if context.has_key('generator_detail'):
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel']='enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
href = attrsD.get('href')
if href and not context.get('id'):
context['id'] = href
def _start_source(self, attrsD):
if 'url' in attrsD:
# This means that we're processing a source element from an RSS 2.0 feed
self.sourcedata['href'] = attrsD[u'url']
self.push('source', 1)
self.insource = 1
self.hasTitle = 0
def _end_source(self):
self.insource = 0
value = self.pop('source')
if value:
self.sourcedata['title'] = value
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, 'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_prodlink(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
value = self.popContent('content')
if copyToDescription:
self._save('description', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
_end_prodlink = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0
def _start_media_content(self, attrsD):
context = self._getContext()
context.setdefault('media_content', [])
context['media_content'].append(attrsD)
def _start_media_thumbnail(self, attrsD):
context = self._getContext()
context.setdefault('media_thumbnail', [])
self.push('url', 1) # new
context['media_thumbnail'].append(attrsD)
def _end_media_thumbnail(self):
url = self.pop('url')
context = self._getContext()
if url != None and len(url.strip()) != 0:
if not context['media_thumbnail'][-1].has_key('url'):
context['media_thumbnail'][-1]['url'] = url
def _start_media_player(self, attrsD):
self.push('media_player', 0)
self._getContext()['media_player'] = FeedParserDict(attrsD)
def _end_media_player(self):
value = self.pop('media_player')
context = self._getContext()
context['media_player']['content'] = value
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
if _debug: sys.stderr.write('trying StrictFeedParser\n')
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
def startPrefixMapping(self, prefix, uri):
self.trackNamespace(prefix, uri)
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD = {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname))
for (namespace, attrlocalname), attrvalue in attrs._attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param']
def __init__(self, encoding, type):
self.encoding = encoding
self.type = type
if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding)
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
def parse_starttag(self,i):
j=sgmllib.SGMLParser.parse_starttag(self, i)
if self.type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
#data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace
data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
if self.encoding and type(data) == type(u''):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs: return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if type(value) != type(u''):
try:
value = unicode(value, self.encoding)
except:
value = unicode(value, 'iso-8859-1')
uattrs.append((unicode(key, self.encoding), value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs=strattrs.encode(self.encoding)
except:
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
else:
self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
if ref.startswith('x'):
value = unichr(int(ref[1:],16))
else:
value = unichr(int(ref))
if value in _cp1252.keys():
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%(ref)s;' % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if name2codepoint.has_key(ref):
self.pieces.append('&%(ref)s;' % locals())
else:
self.pieces.append('&%(ref)s' % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_data, text=%s\n' % text)
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%(text)s-->' % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%(text)s>' % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%(text)s>' % locals())
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _MicroformatsParser:
STRING = 1
DATE = 2
URI = 3
NODE = 4
EMAIL = 5
known_xfn_relationships = ['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me']
known_binary_extensions = ['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv']
def __init__(self, data, baseuri, encoding):
self.document = BeautifulSoup.BeautifulSoup(data)
self.baseuri = baseuri
self.encoding = encoding
if type(data) == type(u''):
data = data.encode(encoding)
self.tags = []
self.enclosures = []
self.xfn = []
self.vcard = None
def vcardEscape(self, s):
if type(s) in (type(''), type(u'')):
s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n')
return s
def vcardFold(self, s):
s = re.sub(';+$', '', s)
sFolded = ''
iMax = 75
sPrefix = ''
while len(s) > iMax:
sFolded += sPrefix + s[:iMax] + '\n'
s = s[iMax:]
sPrefix = ' '
iMax = 74
sFolded += sPrefix + s
return sFolded
def normalize(self, s):
return re.sub(r'\s+', ' ', s).strip()
def unique(self, aList):
results = []
for element in aList:
if element not in results:
results.append(element)
return results
def toISO8601(self, dt):
return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt)
def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0):
all = lambda x: 1
sProperty = sProperty.lower()
bFound = 0
bNormalize = 1
propertyMatch = {'class': re.compile(r'\b%s\b' % sProperty)}
if bAllowMultiple and (iPropertyType != self.NODE):
snapResults = []
containers = elmRoot(['ul', 'ol'], propertyMatch)
for container in containers:
snapResults.extend(container('li'))
bFound = (len(snapResults) != 0)
if not bFound:
snapResults = elmRoot(all, propertyMatch)
bFound = (len(snapResults) != 0)
if (not bFound) and (sProperty == 'value'):
snapResults = elmRoot('pre')
bFound = (len(snapResults) != 0)
bNormalize = not bFound
if not bFound:
snapResults = [elmRoot]
bFound = (len(snapResults) != 0)
arFilter = []
if sProperty == 'vcard':
snapFilter = elmRoot(all, propertyMatch)
for node in snapFilter:
if node.findParent(all, propertyMatch):
arFilter.append(node)
arResults = []
for node in snapResults:
if node not in arFilter:
arResults.append(node)
bFound = (len(arResults) != 0)
if not bFound:
if bAllowMultiple: return []
elif iPropertyType == self.STRING: return ''
elif iPropertyType == self.DATE: return None
elif iPropertyType == self.URI: return ''
elif iPropertyType == self.NODE: return None
else: return None
arValues = []
for elmResult in arResults:
sValue = None
if iPropertyType == self.NODE:
if bAllowMultiple:
arValues.append(elmResult)
continue
else:
return elmResult
sNodeName = elmResult.name.lower()
if (iPropertyType == self.EMAIL) and (sNodeName == 'a'):
sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0]
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'abbr'):
sValue = elmResult.get('title')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (iPropertyType == self.URI):
if sNodeName == 'a': sValue = elmResult.get('href')
elif sNodeName == 'img': sValue = elmResult.get('src')
elif sNodeName == 'object': sValue = elmResult.get('data')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'img'):
sValue = elmResult.get('alt')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue:
sValue = elmResult.renderContents()
sValue = re.sub(r'<\S[^>]*>', '', sValue)
sValue = sValue.replace('\r\n', '\n')
sValue = sValue.replace('\r', '\n')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue: continue
if iPropertyType == self.DATE:
sValue = _parse_date_iso8601(sValue)
if bAllowMultiple:
arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue)
else:
return bAutoEscape and self.vcardEscape(sValue) or sValue
return arValues
def findVCards(self, elmRoot, bAgentParsing=0):
sVCards = ''
if not bAgentParsing:
arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1)
else:
arCards = [elmRoot]
for elmCard in arCards:
arLines = []
def processSingleString(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue))
return sValue or ''
def processSingleURI(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.URI)
if sValue:
sContentType = ''
sEncoding = ''
sValueKey = ''
if sValue.startswith('data:'):
sEncoding = ';ENCODING=b'
sContentType = sValue.split(';')[0].split('/').pop()
sValue = sValue.split(',', 1).pop()
else:
elmValue = self.getPropertyValue(elmCard, sProperty)
if elmValue:
if sProperty != 'url':
sValueKey = ';VALUE=uri'
sContentType = elmValue.get('type', '').strip().split('/').pop().strip()
sContentType = sContentType.upper()
if sContentType == 'OCTET-STREAM':
sContentType = ''
if sContentType:
sContentType = ';TYPE=' + sContentType.upper()
arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue))
def processTypeValue(sProperty, arDefaultType, arForceType=None):
arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1)
for elmResult in arResults:
arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1)
if arForceType:
arType = self.unique(arForceType + arType)
if not arType:
arType = arDefaultType
sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue))
# AGENT
# must do this before all other properties because it is destructive
# (removes nested class="vcard" nodes so they don't interfere with
# this vcard's other properties)
arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1)
for elmAgent in arAgent:
if re.compile(r'\bvcard\b').search(elmAgent.get('class')):
sAgentValue = self.findVCards(elmAgent, 1) + '\n'
sAgentValue = sAgentValue.replace('\n', '\\n')
sAgentValue = sAgentValue.replace(';', '\\;')
if sAgentValue:
arLines.append(self.vcardFold('AGENT:' + sAgentValue))
elmAgent['class'] = ''
elmAgent.contents = []
else:
sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1);
if sAgentValue:
arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue))
# FN (full name)
sFN = processSingleString('fn')
# N (name)
elmName = self.getPropertyValue(elmCard, 'n')
if elmName:
sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1)
sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1)
arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1)
arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1)
arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1)
arLines.append(self.vcardFold('N:' + sFamilyName + ';' +
sGivenName + ';' +
','.join(arAdditionalNames) + ';' +
','.join(arHonorificPrefixes) + ';' +
','.join(arHonorificSuffixes)))
elif sFN:
# implied "N" optimization
# http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization
arNames = self.normalize(sFN).split()
if len(arNames) == 2:
bFamilyNameFirst = (arNames[0].endswith(',') or
len(arNames[1]) == 1 or
((len(arNames[1]) == 2) and (arNames[1].endswith('.'))))
if bFamilyNameFirst:
arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1]))
else:
arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0]))
# SORT-STRING
sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1)
if sSortString:
arLines.append(self.vcardFold('SORT-STRING:' + sSortString))
# NICKNAME
arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1)
if arNickname:
arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname)))
# PHOTO
processSingleURI('photo')
# BDAY
dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE)
if dtBday:
arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday)))
# ADR (address)
arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1)
for elmAdr in arAdr:
arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1)
if not arType:
arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1
sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1)
sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1)
sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1)
sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1)
sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1)
sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1)
sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1)
arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' +
sPostOfficeBox + ';' +
sExtendedAddress + ';' +
sStreetAddress + ';' +
sLocality + ';' +
sRegion + ';' +
sPostalCode + ';' +
sCountryName))
# LABEL
processTypeValue('label', ['intl','postal','parcel','work'])
# TEL (phone number)
processTypeValue('tel', ['voice'])
# EMAIL
processTypeValue('email', ['internet'], ['internet'])
# MAILER
processSingleString('mailer')
# TZ (timezone)
processSingleString('tz')
# GEO (geographical information)
elmGeo = self.getPropertyValue(elmCard, 'geo')
if elmGeo:
sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1)
sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1)
arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude))
# TITLE
processSingleString('title')
# ROLE
processSingleString('role')
# LOGO
processSingleURI('logo')
# ORG (organization)
elmOrg = self.getPropertyValue(elmCard, 'org')
if elmOrg:
sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1)
if not sOrganizationName:
# implied "organization-name" optimization
# http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization
sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1)
if sOrganizationName:
arLines.append(self.vcardFold('ORG:' + sOrganizationName))
else:
arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1)
arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit)))
# CATEGORY
arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1)
if arCategory:
arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory)))
# NOTE
processSingleString('note')
# REV
processSingleString('rev')
# SOUND
processSingleURI('sound')
# UID
processSingleString('uid')
# URL
processSingleURI('url')
# CLASS
processSingleString('class')
# KEY
processSingleURI('key')
if arLines:
arLines = ['BEGIN:vCard','VERSION:3.0'] + arLines + ['END:vCard']
sVCards += '\n'.join(arLines) + '\n'
return sVCards.strip()
def isProbablyDownloadable(self, elm):
attrsD = elm.attrMap
if not attrsD.has_key('href'): return 0
linktype = attrsD.get('type', '').strip()
if linktype.startswith('audio/') or \
linktype.startswith('video/') or \
(linktype.startswith('application/') and not linktype.endswith('xml')):
return 1
path = urlparse.urlparse(attrsD['href'])[2]
if path.find('.') == -1: return 0
fileext = path.split('.').pop().lower()
return fileext in self.known_binary_extensions
def findTags(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile(r'\btag\b')}):
href = elm.get('href')
if not href: continue
urlscheme, domain, path, params, query, fragment = \
urlparse.urlparse(_urljoin(self.baseuri, href))
segments = path.split('/')
tag = segments.pop()
if not tag:
tag = segments.pop()
tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', ''))
if not tagscheme.endswith('/'):
tagscheme += '/'
self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''}))
def findEnclosures(self):
all = lambda x: 1
enclosure_match = re.compile(r'\benclosure\b')
for elm in self.document(all, {'href': re.compile(r'.+')}):
if not enclosure_match.search(elm.get('rel', '')) and not self.isProbablyDownloadable(elm): continue
if elm.attrMap not in self.enclosures:
self.enclosures.append(elm.attrMap)
if elm.string and not elm.get('title'):
self.enclosures[-1]['title'] = elm.string
def findXFN(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}):
rels = elm.get('rel', '').split()
xfn_rels = []
for rel in rels:
if rel in self.known_xfn_relationships:
xfn_rels.append(rel)
if xfn_rels:
self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string})
def _parseMicroformats(htmlSource, baseURI, encoding):
if not BeautifulSoup: return
if _debug: sys.stderr.write('entering _parseMicroformats\n')
p = _MicroformatsParser(htmlSource, baseURI, encoding)
p.vcard = p.findVCards(p.document)
p.findTags()
p.findEnclosures()
p.findXFN()
return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard}
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = [('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')]
def __init__(self, baseuri, encoding, type):
_BaseHTMLProcessor.__init__(self, encoding, type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _urljoin(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
if _debug:
sys.stderr.write('tag: [%s] with attributes: [%s]\n' % (tag, str(attrs)))
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, type):
if _debug:
sys.stderr.write('entering _resolveRelativeURIs\n')
p = _RelativeURIResolver(baseURI, encoding, type)
p.feed(htmlSource)
return p.output()
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'article',
'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button', 'canvas',
'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'command',
'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn', 'dialog', 'dir',
'div', 'dl', 'dt', 'em', 'event-source', 'fieldset', 'figure', 'footer',
'font', 'form', 'header', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i',
'img', 'input', 'ins', 'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map',
'menu', 'meter', 'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup',
'option', 'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong', 'sub',
'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot', 'th', 'thead',
'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data',
'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay',
'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
'prompt', 'pqg', 'radiogroup', 'readonly', 'rel', 'repeat-max',
'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows',
'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src',
'start', 'step', 'summary', 'suppress', 'tabindex', 'target', 'template',
'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign',
'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap',
'xml:lang']
unacceptable_elements_with_end_tag = ['script', 'applet', 'style']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
# survey of common keywords found in feeds
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = ['annotation', 'annotation-xml', 'maction', 'math',
'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none', 'semantics']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant',
'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign',
'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href',
'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink']
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
'svg', 'switch', 'text', 'title', 'tspan', 'use']
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
'min', 'name', 'offset', 'opacity', 'orient', 'origin',
'overline-position', 'overline-thickness', 'panose-1', 'path',
'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv',
'stop-color', 'stop-opacity', 'strikethrough-position',
'strikethrough-thickness', 'stroke', 'stroke-dasharray',
'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage',
'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2',
'underline-position', 'underline-thickness', 'unicode', 'unicode-range',
'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width',
'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
'y2', 'zoomAndPan']
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK += 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK += 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
elif not tag in self.acceptable_elements:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if filter(lambda (n,v): n.startswith('xlink:'),attrs):
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value: clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math' and self.mathmlOK: self.mathmlOK -= 1
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg' and self.svgOK: self.svgOK -= 1
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return ''
# This replaced a regexp that used re.match and was prone to pathological back-tracking.
if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip(): return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value: continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def _sanitizeHTML(htmlSource, encoding, type):
p = _HTMLSanitizer(encoding, type)
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = type(data) == type(u'')
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
if ((code / 100) == 3) and (code != 304):
return self.http_error_302(req, fp, code, msg, headers)
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
return infourl
def http_error_302(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
def http_error_301(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
http_error_300 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions)
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
try:
assert sys.version.split()[0] >= '2.3.3'
assert base64 != None
user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
except:
return self.http_error_default(req, fp, code, msg, headers)
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if url_file_stream_or_string == '-':
return sys.stdin
if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'):
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.encodestring(user_passwd).strip()
# iri support
try:
if isinstance(url_file_stream_or_string,unicode):
url_file_stream_or_string = url_file_stream_or_string.encode('idna')
else:
url_file_stream_or_string = url_file_stream_or_string.decode('utf-8').encode('idna')
except:
pass
# try to open with urllib2 (to use optional headers)
request = urllib2.Request(url_file_stream_or_string)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if type(modified) == type(''):
modified = _parse_date(modified)
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
request.add_header('A-IM', 'feed') # RFC 3229 support
opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string)
except:
pass
# treat url_file_stream_or_string as string
return _StringIO(str(url_file_stream_or_string))
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(\.(?P<fracsecond>\d+))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
del tmpl
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
del regex
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m: break
if not m: return
if m.span() == (0, 0): return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params.keys():
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tm))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m: return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
_mssql_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?')
def _parse_date_mssql(dateString):
'''Parse a string according to the MS SQL date format'''
m = _mssql_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_mssql)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m: return
try:
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
except:
return
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date)
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m: return
try:
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
except:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<julian>\d\d\d)'
'|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0: return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
# rfc822.py defines several time zones, but we define some extra ones.
# 'ET' is equivalent to 'EST', etc.
_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
rfc822._timezones.update(_additional_timezones)
registerDateHandler(_parse_date_rfc822)
def _parse_date_perforce(aDateString):
"""parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
# Fri, 2006/09/15 08:19:53 EDT
_my_date_pattern = re.compile( \
r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
dow, year, month, day, hour, minute, second, tz = \
_my_date_pattern.search(aDateString).groups()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
registerDateHandler(_parse_date_perforce)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
if not date9tuple: continue
if len(date9tuple) != 9:
if _debug: sys.stderr.write('date handler function must return 9-tuple\n')
raise ValueError
map(int, date9tuple)
return date9tuple
except Exception, e:
if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e)))
pass
return None
def _getCharacterEncoding(http_headers, xml_data):
'''Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to 'utf-8' if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to 'us-ascii' if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
'iso-8859-1' as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). CJKCodecs and iconv_codec help a lot;
you should definitely install them if you can.
http://cjkpython.i18n.org/
'''
def _parseHTTPContentType(content_type):
'''takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
'''
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
return content_type, params.get('charset', '').replace("'", '')
sniffed_xml_encoding = ''
xml_encoding = ''
true_encoding = ''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type'))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = _ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].lower()
if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and http_content_type.endswith('+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/')) and http_content_type.endswith('+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
true_encoding = http_encoding or 'us-ascii'
elif http_headers and (not http_headers.has_key('content-type')):
true_encoding = xml_encoding or 'iso-8859-1'
else:
true_encoding = xml_encoding or 'utf-8'
# some feeds claim to be gb2312 but are actually gb18030.
# apparently MSIE and Firefox both do the following switch:
if true_encoding.lower() == 'gb2312':
true_encoding = 'gb18030'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
'''Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
'''
if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding)
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16be':
sys.stderr.write('trying utf-16be instead\n')
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16le':
sys.stderr.write('trying utf-16le instead\n')
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-8':
sys.stderr.write('trying utf-8 instead\n')
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32be':
sys.stderr.write('trying utf-32be instead\n')
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32le':
sys.stderr.write('trying utf-32le instead\n')
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode('utf-8')
def _stripDoctype(data):
'''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document, minus the DOCTYPE
'''
start = re.search('<\w',data)
start = start and start.start() or -1
head,data = data[:start+1], data[start+1:]
entity_pattern = re.compile(r'^\s*<!ENTITY([^>]*?)>', re.MULTILINE)
entity_results=entity_pattern.findall(head)
head = entity_pattern.sub('', head)
doctype_pattern = re.compile(r'^\s*<!DOCTYPE([^>]*?)>', re.MULTILINE)
doctype_results = doctype_pattern.findall(head)
doctype = doctype_results and doctype_results[0] or ''
if doctype.lower().count('netscape'):
version = 'rss091n'
else:
version = None
# only allow in 'safe' inline entity definitions
replacement=''
if len(doctype_results)==1 and entity_results:
safe_pattern=re.compile('\s+(\w+)\s+"(&#\w+;|[^&"]*)"')
safe_entities=filter(lambda e: safe_pattern.match(e),entity_results)
if safe_entities:
replacement='<!DOCTYPE feed [\n <!ENTITY %s>\n]>' % '>\n <!ENTITY '.join(safe_entities)
data = doctype_pattern.sub(replacement, head) + data
return version, data, dict(replacement and safe_pattern.findall(replacement))
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[], header_defaults=None):
'''Parse a feed from a URL, file, stream, or string'''
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
if _XML_AVAILABLE:
result['bozo'] = 0
if type(handlers) == types.InstanceType:
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = None
f = None
# if feed is gzip-compressed, decompress it
if f and data and hasattr(f, 'headers'):
if gzip and f.headers.get('content-encoding', '') == 'gzip':
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except Exception, e:
# Some feeds claim to be gzipped but they're not, so
# we get garbage. Ideally, we should re-request the
# feed without the 'Accept-encoding: gzip' header,
# but we don't.
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
elif zlib and f.headers.get('content-encoding', '') == 'deflate':
try:
data = zlib.decompress(data, -zlib.MAX_WBITS)
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
# save HTTP headers
if hasattr(f, 'info'):
info = f.info()
etag = info.getheader('ETag')
if etag:
result['etag'] = etag
last_modified = info.getheader('Last-Modified')
if last_modified:
result['modified'] = _parse_date(last_modified)
if hasattr(f, 'url'):
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'headers'):
result['headers'] = f.headers.dict
elif header_defaults:
result['headers'] = header_defaults
if hasattr(f, 'close'):
f.close()
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
http_headers = result.get('headers', {})
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if http_headers.has_key('content-type'):
bozo_message = '%s is not an XML media type' % http_headers['content-type']
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
if data is not None:
result['version'], data, entities = _stripDoctype(data)
baseuri = http_headers.get('content-location', result.get('href'))
baselang = http_headers.get('content-language', None)
# if server sent 304, we're done
if result.get('status', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
# if there was a problem downloading, we're done
if data is None:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding):
if not proposed_encoding: continue
if proposed_encoding in tried_encodings: continue
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
break
except:
pass
# if no luck and we have auto-detection library, try that
if (not known_encoding) and chardet:
try:
proposed_encoding = chardet.detect(data)['encoding']
if proposed_encoding and (proposed_encoding not in tried_encodings):
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried utf-8 yet, try that
if (not known_encoding) and ('utf-8' not in tried_encodings):
try:
proposed_encoding = 'utf-8'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried windows-1252 yet, try that
if (not known_encoding) and ('windows-1252' not in tried_encodings):
try:
proposed_encoding = 'windows-1252'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried iso-8859-2 yet, try that.
if (not known_encoding) and ('iso-8859-2' not in tried_encodings):
try:
proposed_encoding = 'iso-8859-2'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck, give up
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
'document encoding unknown, I tried ' + \
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' % \
(result['encoding'], xml_encoding))
result['encoding'] = ''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
'documented declared as %s, but parsed as %s' % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
if hasattr(saxparser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
saxparser.parse(source)
except Exception, e:
if _debug:
import traceback
traceback.print_stack()
traceback.print_exc()
sys.stderr.write('xml parsing failed\n')
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser:
feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '', entities)
feedparser.feed(data)
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
class Serializer:
def __init__(self, results):
self.results = results
class TextSerializer(Serializer):
def write(self, stream=sys.stdout):
self._writer(stream, self.results, '')
def _writer(self, stream, node, prefix):
if not node: return
if hasattr(node, 'keys'):
keys = node.keys()
keys.sort()
for k in keys:
if k in ('description', 'link'): continue
if node.has_key(k + '_detail'): continue
if node.has_key(k + '_parsed'): continue
self._writer(stream, node[k], prefix + k + '.')
elif type(node) == types.ListType:
index = 0
for n in node:
self._writer(stream, n, prefix[:-1] + '[' + str(index) + '].')
index += 1
else:
try:
s = str(node).encode('utf-8')
s = s.replace('\\', '\\\\')
s = s.replace('\r', '')
s = s.replace('\n', r'\n')
stream.write(prefix[:-1])
stream.write('=')
stream.write(s)
stream.write('\n')
except:
pass
class PprintSerializer(Serializer):
def write(self, stream=sys.stdout):
if self.results.has_key('href'):
stream.write(self.results['href'] + '\n\n')
from pprint import pprint
pprint(self.results, stream)
stream.write('\n')
if __name__ == '__main__':
try:
from optparse import OptionParser
except:
OptionParser = None
if OptionParser:
optionParser = OptionParser(version=__version__, usage="%prog [options] url_or_filename_or_-")
optionParser.set_defaults(format="pprint")
optionParser.add_option("-A", "--user-agent", dest="agent", metavar="AGENT", help="User-Agent for HTTP URLs")
optionParser.add_option("-e", "--referer", "--referrer", dest="referrer", metavar="URL", help="Referrer for HTTP URLs")
optionParser.add_option("-t", "--etag", dest="etag", metavar="TAG", help="ETag/If-None-Match for HTTP URLs")
optionParser.add_option("-m", "--last-modified", dest="modified", metavar="DATE", help="Last-modified/If-Modified-Since for HTTP URLs (any supported date format)")
optionParser.add_option("-f", "--format", dest="format", metavar="FORMAT", help="output results in FORMAT (text, pprint)")
optionParser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="write debugging information to stderr")
(options, urls) = optionParser.parse_args()
if options.verbose:
_debug = 1
if not urls:
optionParser.print_help()
sys.exit(0)
else:
if not sys.argv[1:]:
print __doc__
sys.exit(0)
class _Options:
etag = modified = agent = referrer = None
format = 'pprint'
options = _Options()
urls = sys.argv[1:]
zopeCompatibilityHack()
serializer = globals().get(options.format.capitalize() + 'Serializer', Serializer)
for url in urls:
results = parse(url, etag=options.etag, modified=options.modified, agent=options.agent, referrer=options.referrer)
serializer(results).write(sys.stdout)
|
ltucker/radarpost
|
radarpost/lib/feedparser.py
|
Python
|
gpl-2.0
| 157,161
|
[
"NetCDF",
"VisIt"
] |
66af4b1ab3d489a2f68a15c45bf1abdaf315a068f73c2397993cb8bbb3e88801
|
path={0:{1:10},
1:{2:20},
2:{1:30},
3:{0:-60},
}
def dfs(start,visit):
for i in path[start]:
if str(i) in visit:
print(start,visit)
pathl=0
for x in visit[visit.index(str(i)):]:
pathl+=path[int(x)][start]
if pathl > pathl+path[start][int(visit[-1])]:
print('have')
else:
print('NO')
break
visit+=str(start)
dfs(i,visit)
dfs(0,'')
#print(path)
|
ytlai4851/Uva
|
Python/Q558.py
|
Python
|
gpl-2.0
| 390
|
[
"VisIt"
] |
80c183a7cd44f56cdaf5598e553c439c19bdff6ec6dc4d3c1d2701d5e4893184
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
"""
Tests for Component objects.
"""
import unittest
import os
from MooseDocs import common
from MooseDocs.common import exceptions
from MooseDocs.extensions import command
class TestTranslator(unittest.TestCase):
def setUp(self):
command.CommandExtension.EXTENSION_COMMANDS.clear()
config = os.path.join('..', 'config.yml')
self.translator, _ = common.load_config(config)
self.translator.init()
def testFindPage(self):
page = self.translator.findPage('core.md')
self.assertEqual(page.local, 'extensions/core.md')
def testFindPageError(self):
with self.assertRaises(exceptions.MooseDocsException) as cm:
page = self.translator.findPage('wrong.md')
self.assertIn('Did you mean', ex.exception.message)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
nuclear-wizard/moose
|
python/MooseDocs/test/base/test_translator.py
|
Python
|
lgpl-2.1
| 1,188
|
[
"MOOSE"
] |
7055cfe1e4ee4491f972637b57c55df5c508ae2447102a3e18429f8ed9bfc86d
|
"""
audfprint_analyze.py
Class to do the analysis of wave files into hash constellations.
2014-09-20 Dan Ellis dpwe@ee.columbia.edu
"""
from __future__ import print_function
import os
import numpy as np
import scipy.signal
# For reading/writing hashes to file
import struct
# For glob2hashtable, localtester
import glob
import time
# For utility, glob2hashtable
import hash_table
import librosa
import audio_read
################ Globals ################
# Special extension indicating precomputed fingerprint
PRECOMPEXT = '.afpt'
# A different precomputed fingerprint is just the peaks
PRECOMPPKEXT = '.afpk'
def locmax(vec, indices=False):
""" Return a boolean vector of which points in vec are local maxima.
End points are peaks if larger than single neighbors.
if indices=True, return the indices of the True values instead
of the boolean vector.
"""
# vec[-1]-1 means last value can be a peak
#nbr = np.greater_equal(np.r_[vec, vec[-1]-1], np.r_[vec[0], vec])
# the np.r_ was killing us, so try an optimization...
nbr = np.zeros(len(vec)+1, dtype=bool)
nbr[0] = True
nbr[1:-1] = np.greater_equal(vec[1:], vec[:-1])
maxmask = (nbr[:-1] & ~nbr[1:])
if indices:
return np.nonzero(maxmask)[0]
else:
return maxmask
# Constants for Analyzer
# DENSITY controls the density of landmarks found (approx DENSITY per sec)
DENSITY = 20.0
# OVERSAMP > 1 tries to generate extra landmarks by decaying faster
OVERSAMP = 1
## 512 pt FFT @ 11025 Hz, 50% hop
#t_win = 0.0464
#t_hop = 0.0232
# Just specify n_fft
N_FFT = 512
N_HOP = 256
# spectrogram enhancement
HPF_POLE = 0.98
# Globals defining packing of landmarks into hashes
F1_BITS = 8
DF_BITS = 6
DT_BITS = 6
# derived constants
B1_MASK = (1 << F1_BITS) - 1
B1_SHIFT = DF_BITS + DT_BITS
DF_MASK = (1 << DF_BITS) - 1
DF_SHIFT = DT_BITS
DT_MASK = (1 << DT_BITS) - 1
def landmarks2hashes(landmarks):
"""Convert a list of (time, bin1, bin2, dtime) landmarks
into a list of (time, hash) pairs where the hash combines
the three remaining values.
"""
# build up and return the list of hashed values
return [(time_,
(((bin1 & B1_MASK) << B1_SHIFT)
| (((bin2 - bin1) & DF_MASK) << DF_SHIFT)
| (dtime & DT_MASK)))
for time_, bin1, bin2, dtime in landmarks]
def hashes2landmarks(hashes):
"""Convert the mashed-up landmarks in hashes back into a list
of (time, bin1, bin2, dtime) tuples.
"""
landmarks = []
for time_, hash_ in hashes:
dtime = hash_ & DT_MASK
bin1 = (hash_ >> B1_SHIFT) & B1_MASK
dbin = (hash_ >> DF_SHIFT) & DF_MASK
# Sign extend frequency difference
if dbin >= (1 << (DF_BITS-1)):
dbin -= (1 << DF_BITS)
landmarks.append((time_, bin1, bin1+dbin, dtime))
return landmarks
class Analyzer(object):
""" A class to wrap up all the parameters associated with
the analysis of soundfiles into fingerprints """
# Parameters
# optimization: cache pre-calculated Gaussian profile
__sp_width = None
__sp_len = None
__sp_vals = []
def __init__(self, density=DENSITY):
self.density = density
self.target_sr = 11025
self.n_fft = N_FFT
self.n_hop = N_HOP
self.shifts = 1
# how wide to spreak peaks
self.f_sd = 30.0
# Maximum number of local maxima to keep per frame
self.maxpksperframe = 5
# Limit the num of pairs we'll make from each peak (Fanout)
self.maxpairsperpeak = 3
# Values controlling peaks2landmarks
# +/- 31 bins in freq (LIMITED TO -32..31 IN LANDMARK2HASH)
self.targetdf = 31
# min time separation (traditionally 1, upped 2014-08-04)
self.mindt = 2
# max lookahead in time (LIMITED TO <64 IN LANDMARK2HASH)
self.targetdt = 63
# global stores duration of most recently-read soundfile
self.soundfiledur = 0.0
# .. and total amount of sound processed
self.soundfiletotaldur = 0.0
# .. and count of files
self.soundfilecount = 0
# Control behavior on file reading error
self.fail_on_error = True
def spreadpeaksinvector(self, vector, width=4.0):
""" Create a blurred version of vector, where each of the local maxes
is spread by a gaussian with SD <width>.
"""
npts = len(vector)
peaks = locmax(vector, indices=True)
return self.spreadpeaks(zip(peaks, vector[peaks]),
npoints=npts, width=width)
def spreadpeaks(self, peaks, npoints=None, width=4.0, base=None):
""" Generate a vector consisting of the max of a set of Gaussian bumps
:params:
peaks : list
list of (index, value) pairs giving the center point and height
of each gaussian
npoints : int
the length of the output vector (needed if base not provided)
width : float
the half-width of the Gaussians to lay down at each point
base : np.array
optional initial lower bound to place Gaussians above
:returns:
vector : np.array(npoints)
the maximum across all the scaled Gaussians
"""
if base is None:
vec = np.zeros(npoints)
else:
npoints = len(base)
vec = np.copy(base)
#binvals = np.arange(len(vec))
#for pos, val in peaks:
# vec = np.maximum(vec, val*np.exp(-0.5*(((binvals - pos)
# /float(width))**2)))
if width != self.__sp_width or npoints != self.__sp_len:
# Need to calculate new vector
self.__sp_width = width
self.__sp_len = npoints
self.__sp_vals = np.exp(-0.5*((np.arange(-npoints, npoints+1)
/ float(width))**2))
# Now the actual function
for pos, val in peaks:
vec = np.maximum(vec, val*self.__sp_vals[np.arange(npoints)
+ npoints - pos])
return vec
def _decaying_threshold_fwd_prune(self, sgram, a_dec):
""" forward pass of findpeaks
initial threshold envelope based on peaks in first 10 frames
"""
(srows, scols) = np.shape(sgram)
sthresh = self.spreadpeaksinvector(
np.max(sgram[:, :np.minimum(10, scols)], axis=1), self.f_sd
)
## Store sthresh at each column, for debug
#thr = np.zeros((srows, scols))
peaks = np.zeros((srows, scols))
# optimization of mask update
__sp_pts = len(sthresh)
__sp_v = self.__sp_vals
for col in range(scols):
s_col = sgram[:, col]
# Find local magnitude peaks that are above threshold
sdmaxposs = np.nonzero(locmax(s_col) * (s_col > sthresh))[0]
# Work down list of peaks in order of their absolute value
# above threshold
valspeaks = sorted(zip(s_col[sdmaxposs], sdmaxposs), reverse=True)
for val, peakpos in valspeaks[:self.maxpksperframe]:
# What we actually want
#sthresh = spreadpeaks([(peakpos, s_col[peakpos])],
# base=sthresh, width=f_sd)
# Optimization - inline the core function within spreadpeaks
sthresh = np.maximum(sthresh,
val*__sp_v[(__sp_pts - peakpos):
(2*__sp_pts - peakpos)])
peaks[peakpos, col] = 1
sthresh *= a_dec
return peaks
def _decaying_threshold_bwd_prune_peaks(self, sgram, peaks, a_dec):
""" backwards pass of findpeaks """
scols = np.shape(sgram)[1]
# Backwards filter to prune peaks
sthresh = self.spreadpeaksinvector(sgram[:, -1], self.f_sd)
for col in range(scols, 0, -1):
pkposs = np.nonzero(peaks[:, col-1])[0]
peakvals = sgram[pkposs, col-1]
for val, peakpos in sorted(zip(peakvals, pkposs), reverse=True):
if val >= sthresh[peakpos]:
# Setup the threshold
sthresh = self.spreadpeaks([(peakpos, val)], base=sthresh,
width=self.f_sd)
# Delete any following peak (threshold should, but be sure)
if col < scols:
peaks[peakpos, col] = 0
else:
# delete the peak
peaks[peakpos, col-1] = 0
sthresh = a_dec*sthresh
return peaks
def find_peaks(self, d, sr):
""" Find the local peaks in the spectrogram as basis for fingerprints.
Returns a list of (time_frame, freq_bin) pairs.
:params:
d - np.array of float
Input waveform as 1D vector
sr - int
Sampling rate of d (not used)
:returns:
pklist - list of (int, int)
Ordered list of landmark peaks found in STFT. First value of
each pair is the time index (in STFT frames, i.e., units of
n_hop/sr secs), second is the FFT bin (in units of sr/n_fft
Hz).
"""
if len(d) == 0:
return []
# masking envelope decay constant
a_dec = (1.0 - 0.01*(self.density*np.sqrt(self.n_hop/352.8)/35.0)) \
**(1.0/OVERSAMP)
# Take spectrogram
mywin = np.hanning(self.n_fft+2)[1:-1]
sgram = np.abs(librosa.stft(d, n_fft=self.n_fft,
hop_length=int(self.n_hop),
window=mywin))
sgrammax = np.max(sgram)
if sgrammax > 0.0:
sgram = np.log(np.maximum(sgram, np.max(sgram)/1e6))
sgram = sgram - np.mean(sgram)
else:
# The sgram is identically zero, i.e., the input signal was identically
# zero. Not good, but let's let it through for now.
print("find_peaks: Warning: input signal is identically zero.")
# High-pass filter onset emphasis
# [:-1,] discards top bin (nyquist) of sgram so bins fit in 8 bits
sgram = np.array([scipy.signal.lfilter([1, -1],
[1, -(HPF_POLE)** \
(1/OVERSAMP)], s_row)
for s_row in sgram])[:-1,]
# Prune to keep only local maxima in spectrum that appear above an online,
# decaying threshold
peaks = self._decaying_threshold_fwd_prune(sgram, a_dec)
# Further prune these peaks working backwards in time, to remove small peaks
# that are closely followed by a large peak
peaks = self._decaying_threshold_bwd_prune_peaks(sgram, peaks, a_dec)
# build a list of peaks we ended up with
scols = np.shape(sgram)[1]
pklist = []
for col in range(scols):
for bin in np.nonzero(peaks[:, col])[0]:
pklist.append( (col, bin) )
return pklist
def peaks2landmarks(self, pklist):
""" Take a list of local peaks in spectrogram
and form them into pairs as landmarks.
pklist is a column-sorted list of (col, bin) pairs as created
by findpeaks().
Return a list of (col, peak, peak2, col2-col) landmark descriptors.
"""
# Form pairs of peaks into landmarks
landmarks = []
if len(pklist) > 0:
# Find column of the final peak in the list
scols = pklist[-1][0] + 1
# Convert (col, bin) list into peaks_at[col] lists
peaks_at = [[] for col in range(scols)]
for (col, bin) in pklist:
peaks_at[col].append(bin)
# Build list of landmarks <starttime F1 endtime F2>
for col in range(scols):
for peak in peaks_at[col]:
pairsthispeak = 0
for col2 in range(col+self.mindt,
min(scols, col+self.targetdt)):
if pairsthispeak < self.maxpairsperpeak:
for peak2 in peaks_at[col2]:
if abs(peak2-peak) < self.targetdf:
#and abs(peak2-peak) + abs(col2-col) > 2 ):
if pairsthispeak < self.maxpairsperpeak:
# We have a pair!
landmarks.append((col, peak,
peak2, col2-col))
pairsthispeak += 1
return landmarks
def wavfile2peaks(self, filename, shifts=None, thread={'interrupted':False}):
""" Read a soundfile and return its landmark peaks as a
list of (time, bin) pairs. If specified, resample to sr first.
shifts > 1 causes hashes to be extracted from multiple shifts of
waveform, to reduce frame effects. """
ext = ''
if isinstance(filename, basestring):
ext = os.path.splitext(filename)[1]
if ext == PRECOMPPKEXT:
# short-circuit - precomputed fingerprint file
peaks = peaks_load(filename)
dur = np.max(peaks, axis=0)[0]*self.n_hop/float(self.target_sr)
else:
try:
#[d, sr] = librosa.load(filename, sr=self.target_sr)
d, sr, metadata = audio_read.audio_read(filename, sr=self.target_sr, channels=1, thread=thread)
# except: # audioread.NoBackendError:
except Exception as e: # audioread.NoBackendError:
message = "wavfile2peaks: Error reading " + str(filename) + " " + str(e)
if self.fail_on_error:
raise IOError(message)
print(message, "skipping")
d = []
sr = self.target_sr
# Store duration in a global because it's hard to handle
dur = float(len(d))/sr
if shifts is None or shifts < 2:
peaks = self.find_peaks(d, sr);
else:
# Calculate hashes with optional part-frame shifts
peaklists = []
for shift in range(shifts):
shiftsamps = int(float(shift)/self.shifts*self.n_hop)
peaklists.append(self.find_peaks(d[shiftsamps:], sr))
peaks = peaklists
# instrumentation to track total amount of sound processed
self.soundfiledur = dur
self.soundfiletotaldur += dur
self.soundfilecount += 1
return ( peaks, metadata )
def wavfile2hashes(self, filename, thread):
""" Read a soundfile and return its fingerprint hashes as a
list of (time, hash) pairs. If specified, resample to sr first.
shifts > 1 causes hashes to be extracted from multiple shifts of
waveform, to reduce frame effects. """
ext = ''
if isinstance(filename, basestring):
ext = os.path.splitext(filename)[1]
if ext == PRECOMPEXT:
# short-circuit - precomputed fingerprint file
hashes = hashes_load(filename)
dur = np.max(hashes, axis=0)[0]*self.n_hop/float(self.target_sr)
# instrumentation to track total amount of sound processed
self.soundfiledur = dur
self.soundfiletotaldur += dur
self.soundfilecount += 1
else:
peaks, metadata = self.wavfile2peaks(filename, self.shifts, thread)
if len(peaks) == 0:
return []
# Did we get returned a list of lists of peaks due to shift?
if isinstance(peaks[0], list):
peaklists = peaks
query_hashes = []
for peaklist in peaklists:
query_hashes += landmarks2hashes(
self.peaks2landmarks(peaklist)
)
else:
query_hashes = landmarks2hashes(self.peaks2landmarks(peaks))
# remove duplicate elements by pushing through a set
hashes = sorted(list(set(query_hashes)))
#print("wavfile2hashes: read", len(hashes), "hashes from", filename)
return ( hashes, metadata )
########### functions to link to actual hash table index database #######
def ingest(self, hashtable, filename, thread={'interrupted':False}):
""" Read an audio file and add it to the database
:params:
hashtable : HashTable object
the hash table to add to
filename : str
name of the soundfile to add
:returns:
dur : float
the duration of the track
nhashes : int
the number of hashes it mapped into
"""
#sr = 11025
#print("ingest: sr=",sr)
#d, sr = librosa.load(filename, sr=sr)
# librosa.load on mp3 files prepends 396 samples compared
# to Matlab audioread ??
#hashes = landmarks2hashes(peaks2landmarks(find_peaks(d, sr,
# density=density,
# n_fft=n_fft,
# n_hop=n_hop)))
hashes, metadata = self.wavfile2hashes(filename, thread)
hashtable.store(filename, hashes, metadata)
#return (len(d)/float(sr), len(hashes))
#return (np.max(hashes, axis=0)[0]*n_hop/float(sr), len(hashes))
# soundfiledur is set up in wavfile2hashes, use result here
return self.soundfiledur, len(hashes)
########### functions to read/write hashes to file for a single track #####
# Format string for writing binary data to file
HASH_FMT = '<2i'
HASH_MAGIC = 'audfprinthashV00' # 16 chars, FWIW
PEAK_FMT = '<2i'
PEAK_MAGIC = 'audfprintpeakV00' # 16 chars, FWIW
def hashes_save(hashfilename, hashes):
""" Write out a list of (time, hash) pairs as 32 bit ints """
with open(hashfilename, 'wb') as f:
f.write(HASH_MAGIC)
for time_, hash_ in hashes:
f.write(struct.pack(HASH_FMT, time_, hash_))
def hashes_load(hashfilename):
""" Read back a set of hashes written by hashes_save """
hashes = []
fmtsize = struct.calcsize(HASH_FMT)
with open(hashfilename, 'rb') as f:
magic = f.read(len(HASH_MAGIC))
if magic != HASH_MAGIC:
raise IOError('%s is not a hash file (magic %s)'
% (hashfilename, magic))
data = f.read(fmtsize)
while data is not None and len(data) == fmtsize:
hashes.append(struct.unpack(HASH_FMT, data))
data = f.read(fmtsize)
return hashes
def peaks_save(peakfilename, peaks):
""" Write out a list of (time, bin) pairs as 32 bit ints """
with open(peakfilename, 'wb') as f:
f.write(PEAK_MAGIC)
for time_, bin_ in peaks:
f.write(struct.pack(PEAK_FMT, time_, bin_))
def peaks_load(peakfilename):
""" Read back a set of (time, bin) pairs written by peaks_save """
peaks = []
fmtsize = struct.calcsize(PEAK_FMT)
with open(peakfilename, 'rb') as f:
magic = f.read(len(PEAK_MAGIC))
if magic != PEAK_MAGIC:
raise IOError('%s is not a peak file (magic %s)'
% (peakfilename, magic))
data = f.read(fmtsize)
while data is not None and len(data) == fmtsize:
peaks.append(struct.unpack(PEAK_FMT, data))
data = f.read(fmtsize)
return peaks
######## function signature for Gordon feature extraction
######## which stores the precalculated hashes for each track separately
extract_features_analyzer = None
def extract_features(track_obj, *args, **kwargs):
""" Extract the audfprint fingerprint hashes for one file.
:params:
track_obj : object
Gordon's internal structure defining a track; we use
track_obj.fn_audio to find the actual audio file.
:returns:
hashes : list of (int, int)
The times (in frames) and hashes analyzed from the audio file.
"""
global extract_features_analyzer
if extract_features_analyzer == None:
extract_features_analyzer = Analyzer()
density = None
n_fft = None
n_hop = None
sr = None
if "density" in kwargs:
density = kwargs["density"]
if "n_fft" in kwargs:
n_fft = kwargs["n_fft"]
if "n_hop" in kwargs:
n_hop = kwargs["n_hop"]
if "sr" in kwargs:
sr = kwargs["sr"]
extract_features_analyzer.density = density
extract_features_analyzer.n_fft = n_fft
extract_features_analyzer.n_hop = n_hop
extract_features_analyzer.target_sr = sr
return extract_features_analyzer.wavfile2hashes(track_obj.fn_audio, {'interrupted':False})[0]
# Handy function to build a new hash table from a file glob pattern
g2h_analyzer = None
def glob2hashtable(pattern, density=20.0):
""" Build a hash table from the files matching a glob pattern """
global g2h_analyzer
if g2h_analyzer == None:
g2h_analyzer = Analyzer(density=density)
ht = hash_table.HashTable()
filelist = glob.glob(pattern)
initticks = time.clock()
totdur = 0.0
tothashes = 0
for ix, file_ in enumerate(filelist):
print(time.ctime(), "ingesting #", ix, ":", file_, "...")
dur, nhash = g2h_analyzer.ingest(ht, file_)
totdur += dur
tothashes += nhash
elapsedtime = time.clock() - initticks
print("Added", tothashes, "(", tothashes/float(totdur), "hashes/sec) at ",
elapsedtime/totdur, "x RT")
return ht
def local_tester():
test_fn = '/Users/dpwe/Downloads/carol11k.wav'
test_ht = hash_table.HashTable()
test_analyzer = Analyzer()
test_analyzer.ingest(test_ht, test_fn)
test_ht.save('httest.pklz')
# Run the test function if called from the command line
if __name__ == "__main__":
local_tester()
|
piotrwicijowski/whistler
|
audfprint_analyze.py
|
Python
|
mit
| 22,382
|
[
"Gaussian"
] |
5fdd7dec0c190142e9159bdc1baa29cc2e2539019f7553bf07e24bc5f8fc8587
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
******************************
**espresso.FixedPairDistList**
******************************
"""
from espresso import pmi
import _espresso
import espresso
from espresso.esutil import cxxinit
class FixedPairDistListLocal(_espresso.FixedPairDistList):
'The (local) fixed pair list.'
def __init__(self, storage):
'Local construction of a fixed pair list'
if pmi.workerIsActive():
cxxinit(self, _espresso.FixedPairDistList, storage)
def add(self, pid1, pid2):
'add pair to fixed pair list'
if pmi.workerIsActive():
return self.cxxclass.add(self, pid1, pid2)
def size(self):
'count number of bonds in GlobalPairList, involves global reduction'
if pmi.workerIsActive():
return self.cxxclass.size(self)
def addPairs(self, bondlist):
"""
Each processor takes the broadcasted bondlist and
adds those pairs whose first particle is owned by
this processor.
"""
if pmi.workerIsActive():
for bond in bondlist:
pid1, pid2 = bond
self.cxxclass.add(self, pid1, pid2)
def getPairs(self):
'return the bonds of the GlobalPairList'
if pmi.workerIsActive():
bonds=self.cxxclass.getPairs(self)
return bonds
def getPairsDist(self):
'return the bonds of the GlobalPairList'
if pmi.workerIsActive():
bonds=self.cxxclass.getPairsDist(self)
return bonds
def getDist(self, pid1, pid2):
if pmi.workerIsActive():
return self.cxxclass.getDist(self, pid1, pid2)
if pmi.isController:
class FixedPairDistList(object):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.FixedPairDistListLocal',
localcall = [ "add" ],
pmicall = [ "addPairs" ],
pmiinvoke = ['getPairs', 'getPairsDist', 'size']
)
def getDist(self, pid1, pid2):
pairs = pmi.invoke(self.pmiobject, 'getDist', pid1, pid2)
for i in pairs:
if( i != -1 ):
return i
|
BackupTheBerlios/espressopp
|
src/FixedPairDistList.py
|
Python
|
gpl-3.0
| 2,997
|
[
"ESPResSo"
] |
97d2f01c3ce22e403f3855acf9343c9bf576bbcca193e6de047e49482a199c0d
|
import os
import numpy as np
import mdtraj as md
from unittest import skipIf
import logging
from mdtraj.testing import eq
from openmoltools import utils
from openmoltools import amber
import simtk.unit as u
from simtk.openmm import app
import simtk.openmm as mm
import simtk.openmm.openmm as mmmm
from distutils.spawn import find_executable
import parmed
HAVE_RDKIT = True
try:
from rdkit import Chem
from rdkit.Chem import AllChem
except ImportError:
HAVE_RDKIT = False
OBABEL_PATH = find_executable("obabel")
SKIP_SMILES = not ((HAVE_RDKIT) & (OBABEL_PATH is not None))
CHECKMOL_PATH = find_executable("checkmol")
SKIP_CHECKMOL = (CHECKMOL_PATH is None)
logging.basicConfig(level=logging.DEBUG, format="LOG: %(message)s")
def test_temp_dir_context():
"""Test the context temporary_directory()."""
with utils.temporary_directory() as tmp_dir:
assert os.path.isdir(tmp_dir)
assert not os.path.exists(tmp_dir)
def test_temp_cd_context():
"""Test the context temporary_cd()."""
with utils.temporary_directory() as tmp_dir:
with utils.temporary_cd(tmp_dir):
assert os.getcwd() == os.path.realpath(tmp_dir)
assert os.getcwd() != os.path.realpath(tmp_dir)
def test_parse_ligand_filename():
molecule_name = "sustiva"
input_filename = utils.get_data_filename("chemicals/sustiva/sustiva.mol2")
name, ext = utils.parse_ligand_filename(input_filename)
eq(name, "sustiva")
eq(ext, ".mol2")
def test_run_test_molecule():
molecule_name = "sustiva"
input_filename = utils.get_data_filename("chemicals/sustiva/sustiva.mol2")
with utils.enter_temp_directory(): # Prevents creating tons of GAFF files everywhere.
utils.test_molecule(molecule_name, input_filename)
def test_acpype_conversion():
molecule_name = 'sustiva'
input_filename = utils.get_data_filename("chemicals/sustiva/sustiva.mol2")
with utils.enter_temp_directory(): # Prevents creating tons of GAFF files everywhere.
gaff_mol2_filename, frcmod_filename = amber.run_antechamber(molecule_name, input_filename, charge_method=None)
prmtop, inpcrd = amber.run_tleap(molecule_name, gaff_mol2_filename, frcmod_filename)
out_top, out_gro = utils.convert_via_acpype( molecule_name, prmtop, inpcrd )
def test_parmed_conversion():
molecule_name = 'sustiva'
input_filename = utils.get_data_filename("chemicals/sustiva/sustiva.mol2")
with utils.enter_temp_directory(): # Prevents creating tons of GAFF files everywhere.
#Make sure conversion runs
gaff_mol2_filename, frcmod_filename = amber.run_antechamber(molecule_name, input_filename, charge_method=None)
prmtop, inpcrd = amber.run_tleap(molecule_name, gaff_mol2_filename, frcmod_filename)
out_top, out_gro = utils.amber_to_gromacs( molecule_name, prmtop, inpcrd, precision = 8 )
#Test energies before and after conversion
#Set up amber system
a = parmed.amber.AmberParm( prmtop, inpcrd )
ambersys = a.createSystem()
ambercon = mmmm.Context( ambersys, mm.VerletIntegrator(0.001))
ambercon.setPositions( a.positions )
#Set up GROMACS system
g = parmed.load_file( out_top )
gro = parmed.gromacs.GromacsGroFile.parse( out_gro )
g.box = gro.box
g.positions = gro.positions
gromacssys = g.createSystem()
gromacscon = mmmm.Context( gromacssys, mm.VerletIntegrator(0.001))
gromacscon.setPositions( g.positions )
#Check energies
a_energies = parmed.openmm.utils.energy_decomposition( a, ambercon )
g_energies = parmed.openmm.utils.energy_decomposition( g, gromacscon )
#Check components
tolerance = 1e-5
ok = True
for key in a_energies.keys():
diff = np.abs(a_energies[key] - g_energies[key] )
if diff/np.abs(a_energies[key]) > tolerance:
ok = False
print("In testing AMBER to GROMACS conversion, %s energy differs by %.5g, which is more than a fraction %.2g of the total, so conversion appears not to be working properly." % ( key, diff, tolerance) )
if not ok:
raise(ValueError("AMBER to GROMACS conversion yields energies which are too different."))
@skipIf(SKIP_CHECKMOL, "Skipping testing of checkmol descriptors since checkmol is not found (under that name)." )
def test_checkmol_descriptors():
input_filename = utils.get_data_filename("chemicals/sustiva/sustiva.mol2")
utils.get_checkmol_descriptors( input_filename )
@skipIf(SKIP_SMILES, "Skipping testing of smiles conversion because openbabel or rdkit not found.")
def test_smiles_conversion():
pdb_filename = utils.get_data_filename("chemicals/proteins/1vii.pdb")
smiles = 'Cc1ccccc1' # Also known as toluene.
temperature = 300 * u.kelvin
friction = 0.3 / u.picosecond
timestep = 0.01 * u.femtosecond
protein_traj = md.load(pdb_filename)
protein_traj.center_coordinates()
protein_top = protein_traj.top.to_openmm()
protein_xyz = protein_traj.openmm_positions(0)
ligand_trajectories, ffxml = utils.smiles_to_mdtraj_ffxml([smiles])
ligand_traj = ligand_trajectories[0]
ligand_traj.center_coordinates()
eq(ligand_traj.n_atoms, 15)
eq(ligand_traj.n_frames, 1)
#Move the pre-centered ligand sufficiently far away from the protein to avoid a clash.
min_atom_pair_distance = ((ligand_traj.xyz[0] ** 2.).sum(1) ** 0.5).max() + ((protein_traj.xyz[0] ** 2.).sum(1) ** 0.5).max() + 0.3
ligand_traj.xyz += np.array([1.0, 0.0, 0.0]) * min_atom_pair_distance
ligand_xyz = ligand_traj.openmm_positions(0)
ligand_top = ligand_traj.top.to_openmm()
forcefield = app.ForceField("amber10.xml", ffxml, "tip3p.xml")
model = app.modeller.Modeller(protein_top, protein_xyz)
model.add(ligand_top, ligand_xyz)
model.addSolvent(forcefield, padding=0.4 * u.nanometer)
system = forcefield.createSystem(model.topology, nonbondedMethod=app.PME, nonbondedCutoff=1.0 * u.nanometers, constraints=app.HAngles)
integrator = mm.LangevinIntegrator(temperature, friction, timestep)
simulation = app.Simulation(model.topology, system, integrator)
simulation.context.setPositions(model.positions)
print("running")
simulation.step(1)
|
andrrizzi/openmoltools
|
openmoltools/tests/test_utils.py
|
Python
|
gpl-2.0
| 6,346
|
[
"Amber",
"Gromacs",
"MDTraj",
"OpenMM",
"RDKit"
] |
159992f792f98cd1d14a9a620050332fa73cc12a8007cb73dfc4c71151673b9b
|
## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
import os
import py
import qilinguist.worktree
from qisys.test.conftest import *
from qibuild.test.conftest import *
class QiLinguistAction(TestAction):
def __init__(self, worktree_root=None):
super(QiLinguistAction, self).__init__("qilinguist.actions")
self.build_worktree = TestBuildWorkTree()
self.trad = self.build_worktree.add_test_project("translateme/gettext")
def create_po(self, proj):
fr_FR_po_file = os.path.join(proj.path, "po", "fr_FR.po")
en_US_po_file = os.path.join(proj.path, "po", "en_US.po")
fr_file = open(fr_FR_po_file, 'wb')
en_file = open(en_US_po_file, 'wb')
fr_file.write("""
# French translations for qi package
# Traductions fran\xc3\xa7aises du paquet qi.
# Copyright (C) 2012 THE qi'S COPYRIGHT HOLDER
# This file is distributed under the same license as the qi package.
# Automatically generated, 2012.
#
msgid ""
msgstr ""
"Project-Id-Version: qi 1.16\\n"
"Report-Msgid-Bugs-To: \\n"
"POT-Creation-Date: 2012-10-09 15:15+0200\\n"
"PO-Revision-Date: 2012-10-09 15:15+0200\\n"
"Last-Translator: Automatically generated\\n"
"Language-Team: none\\n"
"Language: fr\\n"
"MIME-Version: 1.0\\n"
"Content-Type: text/plain; charset=UTF-8\\n"
"Content-Transfer-Encoding: 8bit\\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\\n"
"X-Language: fr_FR\\n"
#: main.cpp:15
msgid "Brian is in the kitchen."
msgstr "Brian est dans la cuisine."
#: main.cpp:13
msgid "Hi, my name is NAO."
msgstr "Bonjour, mon nom est NAO."
#: main.cpp:14
msgid "Where is Brian?"
msgstr "O\xc3\xb9 est Brian ?"
""")
en_file.write("""
# English translations for qi package.
# Copyright (C) 2012 THE qi'S COPYRIGHT HOLDER
# This file is distributed under the same license as the qi package.
# Automatically generated, 2012.
#
msgid ""
msgstr ""
"Project-Id-Version: qi 1.16\\n"
"Report-Msgid-Bugs-To: \\n"
"POT-Creation-Date: 2012-10-09 15:15+0200\\n"
"PO-Revision-Date: 2012-10-09 15:15+0200\\n"
"Last-Translator: Automatically generated\\n"
"Language-Team: none\\n"
"Language: en_US\\n"
"MIME-Version: 1.0\\n"
"Content-Type: text/plain; charset=UTF-8\\n"
"Content-Transfer-Encoding: 8bit\\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\\n"
"X-Language: en_US\\n"
#: main.cpp:15
msgid "Brian is in the kitchen."
msgstr "Brian is in the kitchen."
#: main.cpp:13
msgid "Hi, my name is NAO."
msgstr "Hi, my name is NAO."
#: main.cpp:14
msgid "Where is Brian?"
msgstr "Where is Brian?"
""")
fr_file.close()
en_file.close()
class TestLinguistWorktree(qilinguist.worktree.LinguistWorkTree):
def __init__(self, worktree=None):
if not worktree:
worktree = TestWorkTree()
super(TestLinguistWorktree, self).__init__(worktree)
self.tmpdir = py.path.local(self.root)
def create_gettext_project(self, name):
proj_path = os.path.join(self.root, name)
qisys.sh.mkdir(proj_path, recursive=True)
qiproject_xml = os.path.join(proj_path, "qiproject.xml")
with open(qiproject_xml, "w") as fp:
fp.write("""
<project version="3">
<qilinguist name="{name}" tr="gettext" linguas="fr_FR en_US" />
</project>
""".format(name=name))
self.worktree.add_project(name)
return self.get_linguist_project(name, raises=True)
@pytest.fixture
def qilinguist_action(cd_to_tmpdir):
res = QiLinguistAction()
return res
@pytest.fixture
def linguist_worktree(cd_to_tmpdir):
return TestLinguistWorktree()
|
dmerejkowsky/qibuild
|
python/qilinguist/test/conftest.py
|
Python
|
bsd-3-clause
| 3,893
|
[
"Brian"
] |
2628f2273d9e6115eaa1e0e24700a799ad2298c733cf7f4dd390d7dd690ef141
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.exceptions import DataDimensionalityWarning
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import GaussianRandomProjection
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
# Check basic properties of random matrix generation
for random_matrix in all_random_matrix:
yield check_input_size_random_matrix, random_matrix
yield check_size_generated, random_matrix
yield check_zero_mean_and_unit_norm, random_matrix
for random_matrix in all_sparse_random_matrix:
yield check_input_with_sparse_random_matrix, random_matrix
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
yield check_zero_mean_and_unit_norm, random_matrix_dense
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [[0, 1, 2]])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
|
DailyActie/Surrogate-Model
|
01-codes/scikit-learn-master/sklearn/tests/test_random_projection.py
|
Python
|
mit
| 14,036
|
[
"Gaussian"
] |
bda4f1af983568ff09bba8ccb546e5828e2a8e335dbf5e592f7be1924a1d8073
|
"""
Clustering Interface For Galaxy
Date: January 2013
Author: James Boocock
"""
#Python Imports
import os
import sys
import logging
#Clustering Module Imports
import util
from grid import Grid
from tool_run import ToolRun
from ui_reader import UiReader
from elementtree import ElementTree
log = logging.getLogger(__name__)
DEFAULT_CLUSTERING_FAIL_MESSAGE= " Unable to run job due to the misconfiguration of the clustering inteface "
class ClusteringInterface(object):
""" Clustering Interface class contains everything the clustering inteface needs"""
def __init__(self,app,job_runners,config_file):
self.app =app
self.avaliable_runners= job_runners
log.debug(job_runners)
self.grids_by_id = {}
log.debug(config_file)
try:
self.init_grids(config_file)
except:
log.exception("Error loading grids specifed in the config file {0}".format(config_file))
log.debug(self.generate_avaliable_grids())
# Do some ui reading
#HARD CODED FOR NOW
self.ui_reader = UiReader(self.app,self.grids_by_id,"lib/galaxy/jobs/clustering/tool_parralelism.ini")
def init_grids(self,config_file):
""" Initalise all the grids specfied in the grid config file"""
tree=util.parse_xml(config_file)
root=tree.getroot()
for _, elem in enumerate(root):
if elem.tag == "grid":
grid = Grid(elem,self.app,self.avaliable_runners)
self.grids_by_id[grid.id] = grid
def get_grid(self, job_wrapper):
return self.grids_by_id['nesi0']
def put(self, job_wrapper):
try:
tool_run = ToolRun(self.app, job_wrapper,self.grids_by_id,self.ui_reader)
runner_name = tool_run.get_runner_name()
# If the grid is local or lwr we wont have a grid
# Object so the tool should continue to
# Run as if nothing has changed.
#Get a new runner url for situation for each job #
self.avaliable_runners[runner_name].put(job_wrapper)
except KeyError:
log.exception("put(): (%s) Invalid Job Runner:")
job_wrapper.fail(DEFAULT_CLUSTERING_FAIL_MESSAGE)
def stop(self, job):
return 1
def recover(self, job, job_wrapper):
return 1
def print_grids(self):
""" Prints out all the avaliable grids """
for grid in self.grids_by_id:
print grid.name
def get_grids(self):
return self.grids_by_id
def generate_avaliable_grids(self):
""" Gets all the avaliabe grids """
grid_names=[]
for grid in self.grids_by_id:
grid_names.append(grid)
return grid_names
def get_ui_reader(self):
return self.ui_reader
|
smilefreak/NesiGridSelection
|
grid_selection/clustering/interface.py
|
Python
|
mit
| 2,833
|
[
"Galaxy"
] |
631571ffebe239538a8a4fd502df240b5b3ced5228484409a183555fdf87071d
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
##################################################################################################
# module for the symmetric eigenvalue problem
# Copyright 2013 Timo Hartmann (thartmann15 at gmail.com)
#
# todo:
# - implement balancing
#
##################################################################################################
"""
The symmetric eigenvalue problem.
---------------------------------
This file contains routines for the symmetric eigenvalue problem.
high level routines:
eigsy : real symmetric (ordinary) eigenvalue problem
eighe : complex hermitian (ordinary) eigenvalue problem
eigh : unified interface for eigsy and eighe
svd_r : singular value decomposition for real matrices
svd_c : singular value decomposition for complex matrices
svd : unified interface for svd_r and svd_c
low level routines:
r_sy_tridiag : reduction of real symmetric matrix to real symmetric tridiagonal matrix
c_he_tridiag_0 : reduction of complex hermitian matrix to real symmetric tridiagonal matrix
c_he_tridiag_1 : auxiliary routine to c_he_tridiag_0
c_he_tridiag_2 : auxiliary routine to c_he_tridiag_0
tridiag_eigen : solves the real symmetric tridiagonal matrix eigenvalue problem
svd_r_raw : raw singular value decomposition for real matrices
svd_c_raw : raw singular value decomposition for complex matrices
"""
from ..libmp.backend import xrange
from .eigen import defun
def r_sy_tridiag(ctx, A, D, E, calc_ev = True):
"""
This routine transforms a real symmetric matrix A to a real symmetric
tridiagonal matrix T using an orthogonal similarity transformation:
Q' * A * Q = T (here ' denotes the matrix transpose).
The orthogonal matrix Q is build up from Householder reflectors.
parameters:
A (input/output) On input, A contains the real symmetric matrix of
dimension (n,n). On output, if calc_ev is true, A contains the
orthogonal matrix Q, otherwise A is destroyed.
D (output) real array of length n, contains the diagonal elements
of the tridiagonal matrix
E (output) real array of length n, contains the offdiagonal elements
of the tridiagonal matrix in E[0:(n-1)] where is the dimension of
the matrix A. E[n-1] is undefined.
calc_ev (input) If calc_ev is true, this routine explicitly calculates the
orthogonal matrix Q which is then returned in A. If calc_ev is
false, Q is not explicitly calculated resulting in a shorter run time.
This routine is a python translation of the fortran routine tred2.f in the
software library EISPACK (see netlib.org) which itself is based on the algol
procedure tred2 described in:
- Num. Math. 11, p.181-195 (1968) by Martin, Reinsch and Wilkonson
- Handbook for auto. comp., Vol II, Linear Algebra, p.212-226 (1971)
For a good introduction to Householder reflections, see also
Stoer, Bulirsch - Introduction to Numerical Analysis.
"""
# note : the vector v of the i-th houshoulder reflector is stored in a[(i+1):,i]
# whereas v/<v,v> is stored in a[i,(i+1):]
n = A.rows
for i in xrange(n - 1, 0, -1):
# scale the vector
scale = 0
for k in xrange(0, i):
scale += abs(A[k,i])
scale_inv = 0
if scale != 0:
scale_inv = 1/scale
# sadly there are floating point numbers not equal to zero whose reciprocal is infinity
if i == 1 or scale == 0 or ctx.isinf(scale_inv):
E[i] = A[i-1,i] # nothing to do
D[i] = 0
continue
# calculate parameters for housholder transformation
H = 0
for k in xrange(0, i):
A[k,i] *= scale_inv
H += A[k,i] * A[k,i]
F = A[i-1,i]
G = ctx.sqrt(H)
if F > 0:
G = -G
E[i] = scale * G
H -= F * G
A[i-1,i] = F - G
F = 0
# apply housholder transformation
for j in xrange(0, i):
if calc_ev:
A[i,j] = A[j,i] / H
G = 0 # calculate A*U
for k in xrange(0, j + 1):
G += A[k,j] * A[k,i]
for k in xrange(j + 1, i):
G += A[j,k] * A[k,i]
E[j] = G / H # calculate P
F += E[j] * A[j,i]
HH = F / (2 * H)
for j in xrange(0, i): # calculate reduced A
F = A[j,i]
G = E[j] - HH * F # calculate Q
E[j] = G
for k in xrange(0, j + 1):
A[k,j] -= F * E[k] + G * A[k,i]
D[i] = H
for i in xrange(1, n): # better for compatibility
E[i-1] = E[i]
E[n-1] = 0
if calc_ev:
D[0] = 0
for i in xrange(0, n):
if D[i] != 0:
for j in xrange(0, i): # accumulate transformation matrices
G = 0
for k in xrange(0, i):
G += A[i,k] * A[k,j]
for k in xrange(0, i):
A[k,j] -= G * A[k,i]
D[i] = A[i,i]
A[i,i] = 1
for j in xrange(0, i):
A[j,i] = A[i,j] = 0
else:
for i in xrange(0, n):
D[i] = A[i,i]
def c_he_tridiag_0(ctx, A, D, E, T):
"""
This routine transforms a complex hermitian matrix A to a real symmetric
tridiagonal matrix T using an unitary similarity transformation:
Q' * A * Q = T (here ' denotes the hermitian matrix transpose,
i.e. transposition und conjugation).
The unitary matrix Q is build up from Householder reflectors and
an unitary diagonal matrix.
parameters:
A (input/output) On input, A contains the complex hermitian matrix
of dimension (n,n). On output, A contains the unitary matrix Q
in compressed form.
D (output) real array of length n, contains the diagonal elements
of the tridiagonal matrix.
E (output) real array of length n, contains the offdiagonal elements
of the tridiagonal matrix in E[0:(n-1)] where is the dimension of
the matrix A. E[n-1] is undefined.
T (output) complex array of length n, contains a unitary diagonal
matrix.
This routine is a python translation (in slightly modified form) of the fortran
routine htridi.f in the software library EISPACK (see netlib.org) which itself
is a complex version of the algol procedure tred1 described in:
- Num. Math. 11, p.181-195 (1968) by Martin, Reinsch and Wilkonson
- Handbook for auto. comp., Vol II, Linear Algebra, p.212-226 (1971)
For a good introduction to Householder reflections, see also
Stoer, Bulirsch - Introduction to Numerical Analysis.
"""
n = A.rows
T[n-1] = 1
for i in xrange(n - 1, 0, -1):
# scale the vector
scale = 0
for k in xrange(0, i):
scale += abs(ctx.re(A[k,i])) + abs(ctx.im(A[k,i]))
scale_inv = 0
if scale != 0:
scale_inv = 1 / scale
# sadly there are floating point numbers not equal to zero whose reciprocal is infinity
if scale == 0 or ctx.isinf(scale_inv):
E[i] = 0
D[i] = 0
T[i-1] = 1
continue
if i == 1:
F = A[i-1,i]
f = abs(F)
E[i] = f
D[i] = 0
if f != 0:
T[i-1] = T[i] * F / f
else:
T[i-1] = T[i]
continue
# calculate parameters for housholder transformation
H = 0
for k in xrange(0, i):
A[k,i] *= scale_inv
rr = ctx.re(A[k,i])
ii = ctx.im(A[k,i])
H += rr * rr + ii * ii
F = A[i-1,i]
f = abs(F)
G = ctx.sqrt(H)
H += G * f
E[i] = scale * G
if f != 0:
F = F / f
TZ = - T[i] * F # T[i-1]=-T[i]*F, but we need T[i-1] as temporary storage
G *= F
else:
TZ = -T[i] # T[i-1]=-T[i]
A[i-1,i] += G
F = 0
# apply housholder transformation
for j in xrange(0, i):
A[i,j] = A[j,i] / H
G = 0 # calculate A*U
for k in xrange(0, j + 1):
G += ctx.conj(A[k,j]) * A[k,i]
for k in xrange(j + 1, i):
G += A[j,k] * A[k,i]
T[j] = G / H # calculate P
F += ctx.conj(T[j]) * A[j,i]
HH = F / (2 * H)
for j in xrange(0, i): # calculate reduced A
F = A[j,i]
G = T[j] - HH * F # calculate Q
T[j] = G
for k in xrange(0, j + 1):
A[k,j] -= ctx.conj(F) * T[k] + ctx.conj(G) * A[k,i]
# as we use the lower left part for storage
# we have to use the transpose of the normal formula
T[i-1] = TZ
D[i] = H
for i in xrange(1, n): # better for compatibility
E[i-1] = E[i]
E[n-1] = 0
D[0] = 0
for i in xrange(0, n):
zw = D[i]
D[i] = ctx.re(A[i,i])
A[i,i] = zw
def c_he_tridiag_1(ctx, A, T):
"""
This routine forms the unitary matrix Q described in c_he_tridiag_0.
parameters:
A (input/output) On input, A is the same matrix as delivered by
c_he_tridiag_0. On output, A is set to Q.
T (input) On input, T is the same array as delivered by c_he_tridiag_0.
"""
n = A.rows
for i in xrange(0, n):
if A[i,i] != 0:
for j in xrange(0, i):
G = 0
for k in xrange(0, i):
G += ctx.conj(A[i,k]) * A[k,j]
for k in xrange(0, i):
A[k,j] -= G * A[k,i]
A[i,i] = 1
for j in xrange(0, i):
A[j,i] = A[i,j] = 0
for i in xrange(0, n):
for k in xrange(0, n):
A[i,k] *= T[k]
def c_he_tridiag_2(ctx, A, T, B):
"""
This routine applied the unitary matrix Q described in c_he_tridiag_0
onto the the matrix B, i.e. it forms Q*B.
parameters:
A (input) On input, A is the same matrix as delivered by c_he_tridiag_0.
T (input) On input, T is the same array as delivered by c_he_tridiag_0.
B (input/output) On input, B is a complex matrix. On output B is replaced
by Q*B.
This routine is a python translation of the fortran routine htribk.f in the
software library EISPACK (see netlib.org). See c_he_tridiag_0 for more
references.
"""
n = A.rows
for i in xrange(0, n):
for k in xrange(0, n):
B[k,i] *= T[k]
for i in xrange(0, n):
if A[i,i] != 0:
for j in xrange(0, n):
G = 0
for k in xrange(0, i):
G += ctx.conj(A[i,k]) * B[k,j]
for k in xrange(0, i):
B[k,j] -= G * A[k,i]
def tridiag_eigen(ctx, d, e, z = False):
"""
This subroutine find the eigenvalues and the first components of the
eigenvectors of a real symmetric tridiagonal matrix using the implicit
QL method.
parameters:
d (input/output) real array of length n. on input, d contains the diagonal
elements of the input matrix. on output, d contains the eigenvalues in
ascending order.
e (input) real array of length n. on input, e contains the offdiagonal
elements of the input matrix in e[0:(n-1)]. On output, e has been
destroyed.
z (input/output) If z is equal to False, no eigenvectors will be computed.
Otherwise on input z should have the format z[0:m,0:n] (i.e. a real or
complex matrix of dimension (m,n) ). On output this matrix will be
multiplied by the matrix of the eigenvectors (i.e. the columns of this
matrix are the eigenvectors): z --> z*EV
That means if z[i,j]={1 if j==j; 0 otherwise} on input, then on output
z will contain the first m components of the eigenvectors. That means
if m is equal to n, the i-th eigenvector will be z[:,i].
This routine is a python translation (in slightly modified form) of the
fortran routine imtql2.f in the software library EISPACK (see netlib.org)
which itself is based on the algol procudure imtql2 desribed in:
- num. math. 12, p. 377-383(1968) by matrin and wilkinson
- modified in num. math. 15, p. 450(1970) by dubrulle
- handbook for auto. comp., vol. II-linear algebra, p. 241-248 (1971)
See also the routine gaussq.f in netlog.org or acm algorithm 726.
"""
n = len(d)
e[n-1] = 0
iterlim = 2 * ctx.dps
for l in xrange(n):
j = 0
while 1:
m = l
while 1:
# look for a small subdiagonal element
if m + 1 == n:
break
if abs(e[m]) <= ctx.eps * (abs(d[m]) + abs(d[m + 1])):
break
m = m + 1
if m == l:
break
if j >= iterlim:
raise RuntimeError("tridiag_eigen: no convergence to an eigenvalue after %d iterations" % iterlim)
j += 1
# form shift
p = d[l]
g = (d[l + 1] - p) / (2 * e[l])
r = ctx.hypot(g, 1)
if g < 0:
s = g - r
else:
s = g + r
g = d[m] - p + e[l] / s
s, c, p = 1, 1, 0
for i in xrange(m - 1, l - 1, -1):
f = s * e[i]
b = c * e[i]
if abs(f) > abs(g): # this here is a slight improvement also used in gaussq.f or acm algorithm 726.
c = g / f
r = ctx.hypot(c, 1)
e[i + 1] = f * r
s = 1 / r
c = c * s
else:
s = f / g
r = ctx.hypot(s, 1)
e[i + 1] = g * r
c = 1 / r
s = s * c
g = d[i + 1] - p
r = (d[i] - g) * s + 2 * c * b
p = s * r
d[i + 1] = g + p
g = c * r - b
if not isinstance(z, bool):
# calculate eigenvectors
for w in xrange(z.rows):
f = z[w,i+1]
z[w,i+1] = s * z[w,i] + c * f
z[w,i ] = c * z[w,i] - s * f
d[l] = d[l] - p
e[l] = g
e[m] = 0
for ii in xrange(1, n):
# sort eigenvalues and eigenvectors (bubble-sort)
i = ii - 1
k = i
p = d[i]
for j in xrange(ii, n):
if d[j] >= p:
continue
k = j
p = d[k]
if k == i:
continue
d[k] = d[i]
d[i] = p
if not isinstance(z, bool):
for w in xrange(z.rows):
p = z[w,i]
z[w,i] = z[w,k]
z[w,k] = p
########################################################################################
@defun
def eigsy(ctx, A, eigvals_only = False, overwrite_a = False):
"""
This routine solves the (ordinary) eigenvalue problem for a real symmetric
square matrix A. Given A, an orthogonal matrix Q is calculated which
diagonalizes A:
Q' A Q = diag(E) and Q Q' = Q' Q = 1
Here diag(E) is a diagonal matrix whose diagonal is E.
' denotes the transpose.
The columns of Q are the eigenvectors of A and E contains the eigenvalues:
A Q[:,i] = E[i] Q[:,i]
input:
A: real matrix of format (n,n) which is symmetric
(i.e. A=A' or A[i,j]=A[j,i])
eigvals_only: if true, calculates only the eigenvalues E.
if false, calculates both eigenvectors and eigenvalues.
overwrite_a: if true, allows modification of A which may improve
performance. if false, A is not modified.
output:
E: vector of format (n). contains the eigenvalues of A in ascending order.
Q: orthogonal matrix of format (n,n). contains the eigenvectors
of A as columns.
return value:
E if eigvals_only is true
(E, Q) if eigvals_only is false
example:
>>> from mpmath import mp
>>> A = mp.matrix([[3, 2], [2, 0]])
>>> E = mp.eigsy(A, eigvals_only = True)
>>> print(E)
[-1.0]
[ 4.0]
>>> A = mp.matrix([[1, 2], [2, 3]])
>>> E, Q = mp.eigsy(A)
>>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0]))
[0.0]
[0.0]
see also: eighe, eigh, eig
"""
if not overwrite_a:
A = A.copy()
d = ctx.zeros(A.rows, 1)
e = ctx.zeros(A.rows, 1)
if eigvals_only:
r_sy_tridiag(ctx, A, d, e, calc_ev = False)
tridiag_eigen(ctx, d, e, False)
return d
else:
r_sy_tridiag(ctx, A, d, e, calc_ev = True)
tridiag_eigen(ctx, d, e, A)
return (d, A)
@defun
def eighe(ctx, A, eigvals_only = False, overwrite_a = False):
"""
This routine solves the (ordinary) eigenvalue problem for a complex
hermitian square matrix A. Given A, an unitary matrix Q is calculated which
diagonalizes A:
Q' A Q = diag(E) and Q Q' = Q' Q = 1
Here diag(E) a is diagonal matrix whose diagonal is E.
' denotes the hermitian transpose (i.e. ordinary transposition and
complex conjugation).
The columns of Q are the eigenvectors of A and E contains the eigenvalues:
A Q[:,i] = E[i] Q[:,i]
input:
A: complex matrix of format (n,n) which is hermitian
(i.e. A=A' or A[i,j]=conj(A[j,i]))
eigvals_only: if true, calculates only the eigenvalues E.
if false, calculates both eigenvectors and eigenvalues.
overwrite_a: if true, allows modification of A which may improve
performance. if false, A is not modified.
output:
E: vector of format (n). contains the eigenvalues of A in ascending order.
Q: unitary matrix of format (n,n). contains the eigenvectors
of A as columns.
return value:
E if eigvals_only is true
(E, Q) if eigvals_only is false
example:
>>> from mpmath import mp
>>> A = mp.matrix([[1, -3 - 1j], [-3 + 1j, -2]])
>>> E = mp.eighe(A, eigvals_only = True)
>>> print(E)
[-4.0]
[ 3.0]
>>> A = mp.matrix([[1, 2 + 5j], [2 - 5j, 3]])
>>> E, Q = mp.eighe(A)
>>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0]))
[0.0]
[0.0]
see also: eigsy, eigh, eig
"""
if not overwrite_a:
A = A.copy()
d = ctx.zeros(A.rows, 1)
e = ctx.zeros(A.rows, 1)
t = ctx.zeros(A.rows, 1)
if eigvals_only:
c_he_tridiag_0(ctx, A, d, e, t)
tridiag_eigen(ctx, d, e, False)
return d
else:
c_he_tridiag_0(ctx, A, d, e, t)
B = ctx.eye(A.rows)
tridiag_eigen(ctx, d, e, B)
c_he_tridiag_2(ctx, A, t, B)
return (d, B)
@defun
def eigh(ctx, A, eigvals_only = False, overwrite_a = False):
"""
"eigh" is a unified interface for "eigsy" and "eighe". Depending on
whether A is real or complex the appropriate function is called.
This routine solves the (ordinary) eigenvalue problem for a real symmetric
or complex hermitian square matrix A. Given A, an orthogonal (A real) or
unitary (A complex) matrix Q is calculated which diagonalizes A:
Q' A Q = diag(E) and Q Q' = Q' Q = 1
Here diag(E) a is diagonal matrix whose diagonal is E.
' denotes the hermitian transpose (i.e. ordinary transposition and
complex conjugation).
The columns of Q are the eigenvectors of A and E contains the eigenvalues:
A Q[:,i] = E[i] Q[:,i]
input:
A: a real or complex square matrix of format (n,n) which is symmetric
(i.e. A[i,j]=A[j,i]) or hermitian (i.e. A[i,j]=conj(A[j,i])).
eigvals_only: if true, calculates only the eigenvalues E.
if false, calculates both eigenvectors and eigenvalues.
overwrite_a: if true, allows modification of A which may improve
performance. if false, A is not modified.
output:
E: vector of format (n). contains the eigenvalues of A in ascending order.
Q: an orthogonal or unitary matrix of format (n,n). contains the
eigenvectors of A as columns.
return value:
E if eigvals_only is true
(E, Q) if eigvals_only is false
example:
>>> from mpmath import mp
>>> A = mp.matrix([[3, 2], [2, 0]])
>>> E = mp.eigh(A, eigvals_only = True)
>>> print(E)
[-1.0]
[ 4.0]
>>> A = mp.matrix([[1, 2], [2, 3]])
>>> E, Q = mp.eigh(A)
>>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0]))
[0.0]
[0.0]
>>> A = mp.matrix([[1, 2 + 5j], [2 - 5j, 3]])
>>> E, Q = mp.eigh(A)
>>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0]))
[0.0]
[0.0]
see also: eigsy, eighe, eig
"""
iscomplex = any(type(x) is ctx.mpc for x in A)
if iscomplex:
return ctx.eighe(A, eigvals_only = eigvals_only, overwrite_a = overwrite_a)
else:
return ctx.eigsy(A, eigvals_only = eigvals_only, overwrite_a = overwrite_a)
@defun
def gauss_quadrature(ctx, n, qtype = "legendre", alpha = 0, beta = 0):
"""
This routine calulates gaussian quadrature rules for different
families of orthogonal polynomials. Let (a, b) be an interval,
W(x) a positive weight function and n a positive integer.
Then the purpose of this routine is to calculate pairs (x_k, w_k)
for k=0, 1, 2, ... (n-1) which give
int(W(x) * F(x), x = a..b) = sum(w_k * F(x_k),k = 0..(n-1))
exact for all polynomials F(x) of degree (strictly) less than 2*n. For all
integrable functions F(x) the sum is a (more or less) good approximation to
the integral. The x_k are called nodes (which are the zeros of the
related orthogonal polynomials) and the w_k are called the weights.
parameters
n (input) The degree of the quadrature rule, i.e. its number of
nodes.
qtype (input) The family of orthogonal polynmomials for which to
compute the quadrature rule. See the list below.
alpha (input) real number, used as parameter for some orthogonal
polynomials
beta (input) real number, used as parameter for some orthogonal
polynomials.
return value
(X, W) a pair of two real arrays where x_k = X[k] and w_k = W[k].
orthogonal polynomials:
qtype polynomial
----- ----------
"legendre" Legendre polynomials, W(x)=1 on the interval (-1, +1)
"legendre01" shifted Legendre polynomials, W(x)=1 on the interval (0, +1)
"hermite" Hermite polynomials, W(x)=exp(-x*x) on (-infinity,+infinity)
"laguerre" Laguerre polynomials, W(x)=exp(-x) on (0,+infinity)
"glaguerre" generalized Laguerre polynomials, W(x)=exp(-x)*x**alpha
on (0, +infinity)
"chebyshev1" Chebyshev polynomials of the first kind, W(x)=1/sqrt(1-x*x)
on (-1, +1)
"chebyshev2" Chebyshev polynomials of the second kind, W(x)=sqrt(1-x*x)
on (-1, +1)
"jacobi" Jacobi polynomials, W(x)=(1-x)**alpha * (1+x)**beta on (-1, +1)
with alpha>-1 and beta>-1
examples:
>>> from mpmath import mp
>>> f = lambda x: x**8 + 2 * x**6 - 3 * x**4 + 5 * x**2 - 7
>>> X, W = mp.gauss_quadrature(5, "hermite")
>>> A = mp.fdot([(f(x), w) for x, w in zip(X, W)])
>>> B = mp.sqrt(mp.pi) * 57 / 16
>>> C = mp.quad(lambda x: mp.exp(- x * x) * f(x), [-mp.inf, +mp.inf])
>>> mp.nprint((mp.chop(A-B, tol = 1e-10), mp.chop(A-C, tol = 1e-10)))
(0.0, 0.0)
>>> f = lambda x: x**5 - 2 * x**4 + 3 * x**3 - 5 * x**2 + 7 * x - 11
>>> X, W = mp.gauss_quadrature(3, "laguerre")
>>> A = mp.fdot([(f(x), w) for x, w in zip(X, W)])
>>> B = 76
>>> C = mp.quad(lambda x: mp.exp(-x) * f(x), [0, +mp.inf])
>>> mp.nprint(mp.chop(A-B, tol = 1e-10), mp.chop(A-C, tol = 1e-10))
.0
# orthogonality of the chebyshev polynomials:
>>> f = lambda x: mp.chebyt(3, x) * mp.chebyt(2, x)
>>> X, W = mp.gauss_quadrature(3, "chebyshev1")
>>> A = mp.fdot([(f(x), w) for x, w in zip(X, W)])
>>> print(mp.chop(A, tol = 1e-10))
0.0
references:
- golub and welsch, "calculations of gaussian quadrature rules", mathematics of
computation 23, p. 221-230 (1969)
- golub, "some modified matrix eigenvalue problems", siam review 15, p. 318-334 (1973)
- stroud and secrest, "gaussian quadrature formulas", prentice-hall (1966)
See also the routine gaussq.f in netlog.org or ACM Transactions on
Mathematical Software algorithm 726.
"""
d = ctx.zeros(n, 1)
e = ctx.zeros(n, 1)
z = ctx.zeros(1, n)
z[0,0] = 1
if qtype == "legendre":
# legendre on the range -1 +1 , abramowitz, table 25.4, p.916
w = 2
for i in xrange(n):
j = i + 1
e[i] = ctx.sqrt(j * j / (4 * j * j - ctx.mpf(1)))
elif qtype == "legendre01":
# legendre shifted to 0 1 , abramowitz, table 25.8, p.921
w = 1
for i in xrange(n):
d[i] = 1 / ctx.mpf(2)
j = i + 1
e[i] = ctx.sqrt(j * j / (16 * j * j - ctx.mpf(4)))
elif qtype == "hermite":
# hermite on the range -inf +inf , abramowitz, table 25.10,p.924
w = ctx.sqrt(ctx.pi)
for i in xrange(n):
j = i + 1
e[i] = ctx.sqrt(j / ctx.mpf(2))
elif qtype == "laguerre":
# laguerre on the range 0 +inf , abramowitz, table 25.9, p. 923
w = 1
for i in xrange(n):
j = i + 1
d[i] = 2 * j - 1
e[i] = j
elif qtype=="chebyshev1":
# chebyshev polynimials of the first kind
w = ctx.pi
for i in xrange(n):
e[i] = 1 / ctx.mpf(2)
e[0] = ctx.sqrt(1 / ctx.mpf(2))
elif qtype == "chebyshev2":
# chebyshev polynimials of the second kind
w = ctx.pi / 2
for i in xrange(n):
e[i] = 1 / ctx.mpf(2)
elif qtype == "glaguerre":
# generalized laguerre on the range 0 +inf
w = ctx.gamma(1 + alpha)
for i in xrange(n):
j = i + 1
d[i] = 2 * j - 1 + alpha
e[i] = ctx.sqrt(j * (j + alpha))
elif qtype == "jacobi":
# jacobi polynomials
alpha = ctx.mpf(alpha)
beta = ctx.mpf(beta)
ab = alpha + beta
abi = ab + 2
w = (2**(ab+1)) * ctx.gamma(alpha + 1) * ctx.gamma(beta + 1) / ctx.gamma(abi)
d[0] = (beta - alpha) / abi
e[0] = ctx.sqrt(4 * (1 + alpha) * (1 + beta) / ((abi + 1) * (abi * abi)))
a2b2 = beta * beta - alpha * alpha
for i in xrange(1, n):
j = i + 1
abi = 2 * j + ab
d[i] = a2b2 / ((abi - 2) * abi)
e[i] = ctx.sqrt(4 * j * (j + alpha) * (j + beta) * (j + ab) / ((abi * abi - 1) * abi * abi))
elif isinstance(qtype, str):
raise ValueError("unknown quadrature rule \"%s\"" % qtype)
elif not isinstance(qtype, str):
w = qtype(d, e)
else:
assert 0
tridiag_eigen(ctx, d, e, z)
for i in xrange(len(z)):
z[i] *= z[i]
z = z.transpose()
return (d, w * z)
##################################################################################################
##################################################################################################
##################################################################################################
def svd_r_raw(ctx, A, V = False, calc_u = False):
"""
This routine computes the singular value decomposition of a matrix A.
Given A, two orthogonal matrices U and V are calculated such that
A = U S V
where S is a suitable shaped matrix whose off-diagonal elements are zero.
The diagonal elements of S are the singular values of A, i.e. the
squareroots of the eigenvalues of A' A or A A'. Here ' denotes the transpose.
Householder bidiagonalization and a variant of the QR algorithm is used.
overview of the matrices :
A : m*n A gets replaced by U
U : m*n U replaces A. If n>m then only the first m*m block of U is
non-zero. column-orthogonal: U' U = B
here B is a n*n matrix whose first min(m,n) diagonal
elements are 1 and all other elements are zero.
S : n*n diagonal matrix, only the diagonal elements are stored in
the array S. only the first min(m,n) diagonal elements are non-zero.
V : n*n orthogonal: V V' = V' V = 1
parameters:
A (input/output) On input, A contains a real matrix of shape m*n.
On output, if calc_u is true A contains the column-orthogonal
matrix U; otherwise A is simply used as workspace and thus destroyed.
V (input/output) if false, the matrix V is not calculated. otherwise
V must be a matrix of shape n*n.
calc_u (input) If true, the matrix U is calculated and replaces A.
if false, U is not calculated and A is simply destroyed
return value:
S an array of length n containing the singular values of A sorted by
decreasing magnitude. only the first min(m,n) elements are non-zero.
This routine is a python translation of the fortran routine svd.f in the
software library EISPACK (see netlib.org) which itself is based on the
algol procedure svd described in:
- num. math. 14, 403-420(1970) by golub and reinsch.
- wilkinson/reinsch: handbook for auto. comp., vol ii-linear algebra, 134-151(1971).
"""
m, n = A.rows, A.cols
S = ctx.zeros(n, 1)
# work is a temporary array of size n
work = ctx.zeros(n, 1)
g = scale = anorm = 0
maxits = 3 * ctx.dps
for i in xrange(n): # householder reduction to bidiagonal form
work[i] = scale*g
g = s = scale = 0
if i < m:
for k in xrange(i, m):
scale += ctx.fabs(A[k,i])
if scale != 0:
for k in xrange(i, m):
A[k,i] /= scale
s += A[k,i] * A[k,i]
f = A[i,i]
g = -ctx.sqrt(s)
if f < 0:
g = -g
h = f * g - s
A[i,i] = f - g
for j in xrange(i+1, n):
s = 0
for k in xrange(i, m):
s += A[k,i] * A[k,j]
f = s / h
for k in xrange(i, m):
A[k,j] += f * A[k,i]
for k in xrange(i,m):
A[k,i] *= scale
S[i] = scale * g
g = s = scale = 0
if i < m and i != n - 1:
for k in xrange(i+1, n):
scale += ctx.fabs(A[i,k])
if scale:
for k in xrange(i+1, n):
A[i,k] /= scale
s += A[i,k] * A[i,k]
f = A[i,i+1]
g = -ctx.sqrt(s)
if f < 0:
g = -g
h = f * g - s
A[i,i+1] = f - g
for k in xrange(i+1, n):
work[k] = A[i,k] / h
for j in xrange(i+1, m):
s = 0
for k in xrange(i+1, n):
s += A[j,k] * A[i,k]
for k in xrange(i+1, n):
A[j,k] += s * work[k]
for k in xrange(i+1, n):
A[i,k] *= scale
anorm = max(anorm, ctx.fabs(S[i]) + ctx.fabs(work[i]))
if not isinstance(V, bool):
for i in xrange(n-2, -1, -1): # accumulation of right hand transformations
V[i+1,i+1] = 1
if work[i+1] != 0:
for j in xrange(i+1, n):
V[i,j] = (A[i,j] / A[i,i+1]) / work[i+1]
for j in xrange(i+1, n):
s = 0
for k in xrange(i+1, n):
s += A[i,k] * V[j,k]
for k in xrange(i+1, n):
V[j,k] += s * V[i,k]
for j in xrange(i+1, n):
V[j,i] = V[i,j] = 0
V[0,0] = 1
if m<n : minnm = m
else : minnm = n
if calc_u:
for i in xrange(minnm-1, -1, -1): # accumulation of left hand transformations
g = S[i]
for j in xrange(i+1, n):
A[i,j] = 0
if g != 0:
g = 1 / g
for j in xrange(i+1, n):
s = 0
for k in xrange(i+1, m):
s += A[k,i] * A[k,j]
f = (s / A[i,i]) * g
for k in xrange(i, m):
A[k,j] += f * A[k,i]
for j in xrange(i, m):
A[j,i] *= g
else:
for j in xrange(i, m):
A[j,i] = 0
A[i,i] += 1
for k in xrange(n - 1, -1, -1):
# diagonalization of the bidiagonal form:
# loop over singular values, and over allowed itations
its = 0
while 1:
its += 1
flag = True
for l in xrange(k, -1, -1):
nm = l-1
if ctx.fabs(work[l]) + anorm == anorm:
flag = False
break
if ctx.fabs(S[nm]) + anorm == anorm:
break
if flag:
c = 0
s = 1
for i in xrange(l, k + 1):
f = s * work[i]
work[i] *= c
if ctx.fabs(f) + anorm == anorm:
break
g = S[i]
h = ctx.hypot(f, g)
S[i] = h
h = 1 / h
c = g * h
s = - f * h
if calc_u:
for j in xrange(m):
y = A[j,nm]
z = A[j,i]
A[j,nm] = y * c + z * s
A[j,i] = z * c - y * s
z = S[k]
if l == k: # convergence
if z < 0: # singular value is made nonnegative
S[k] = -z
if not isinstance(V, bool):
for j in xrange(n):
V[k,j] = -V[k,j]
break
if its >= maxits:
raise RuntimeError("svd: no convergence to an eigenvalue after %d iterations" % its)
x = S[l] # shift from bottom 2 by 2 minor
nm = k-1
y = S[nm]
g = work[nm]
h = work[k]
f = ((y - z) * (y + z) + (g - h) * (g + h))/(2 * h * y)
g = ctx.hypot(f, 1)
if f >= 0: f = ((x - z) * (x + z) + h * ((y / (f + g)) - h)) / x
else: f = ((x - z) * (x + z) + h * ((y / (f - g)) - h)) / x
c = s = 1 # next qt transformation
for j in xrange(l, nm + 1):
g = work[j+1]
y = S[j+1]
h = s * g
g = c * g
z = ctx.hypot(f, h)
work[j] = z
c = f / z
s = h / z
f = x * c + g * s
g = g * c - x * s
h = y * s
y *= c
if not isinstance(V, bool):
for jj in xrange(n):
x = V[j ,jj]
z = V[j+1,jj]
V[j ,jj]= x * c + z * s
V[j+1 ,jj]= z * c - x * s
z = ctx.hypot(f, h)
S[j] = z
if z != 0: # rotation can be arbitray if z=0
z = 1 / z
c = f * z
s = h * z
f = c * g + s * y
x = c * y - s * g
if calc_u:
for jj in xrange(m):
y = A[jj,j ]
z = A[jj,j+1]
A[jj,j ] = y * c + z * s
A[jj,j+1 ] = z * c - y * s
work[l] = 0
work[k] = f
S[k] = x
##########################
# Sort singular values into decreasing order (bubble-sort)
for i in xrange(n):
imax = i
s = ctx.fabs(S[i]) # s is the current maximal element
for j in xrange(i + 1, n):
c = ctx.fabs(S[j])
if c > s:
s = c
imax = j
if imax != i:
# swap singular values
z = S[i]
S[i] = S[imax]
S[imax] = z
if calc_u:
for j in xrange(m):
z = A[j,i]
A[j,i] = A[j,imax]
A[j,imax] = z
if not isinstance(V, bool):
for j in xrange(n):
z = V[i,j]
V[i,j] = V[imax,j]
V[imax,j] = z
return S
#######################
def svd_c_raw(ctx, A, V = False, calc_u = False):
"""
This routine computes the singular value decomposition of a matrix A.
Given A, two unitary matrices U and V are calculated such that
A = U S V
where S is a suitable shaped matrix whose off-diagonal elements are zero.
The diagonal elements of S are the singular values of A, i.e. the
squareroots of the eigenvalues of A' A or A A'. Here ' denotes the hermitian
transpose (i.e. transposition and conjugation). Householder bidiagonalization
and a variant of the QR algorithm is used.
overview of the matrices :
A : m*n A gets replaced by U
U : m*n U replaces A. If n>m then only the first m*m block of U is
non-zero. column-unitary: U' U = B
here B is a n*n matrix whose first min(m,n) diagonal
elements are 1 and all other elements are zero.
S : n*n diagonal matrix, only the diagonal elements are stored in
the array S. only the first min(m,n) diagonal elements are non-zero.
V : n*n unitary: V V' = V' V = 1
parameters:
A (input/output) On input, A contains a complex matrix of shape m*n.
On output, if calc_u is true A contains the column-unitary
matrix U; otherwise A is simply used as workspace and thus destroyed.
V (input/output) if false, the matrix V is not calculated. otherwise
V must be a matrix of shape n*n.
calc_u (input) If true, the matrix U is calculated and replaces A.
if false, U is not calculated and A is simply destroyed
return value:
S an array of length n containing the singular values of A sorted by
decreasing magnitude. only the first min(m,n) elements are non-zero.
This routine is a python translation of the fortran routine svd.f in the
software library EISPACK (see netlib.org) which itself is based on the
algol procedure svd described in:
- num. math. 14, 403-420(1970) by golub and reinsch.
- wilkinson/reinsch: handbook for auto. comp., vol ii-linear algebra, 134-151(1971).
"""
m, n = A.rows, A.cols
S = ctx.zeros(n, 1)
# work is a temporary array of size n
work = ctx.zeros(n, 1)
lbeta = ctx.zeros(n, 1)
rbeta = ctx.zeros(n, 1)
dwork = ctx.zeros(n, 1)
g = scale = anorm = 0
maxits = 3 * ctx.dps
for i in xrange(n): # householder reduction to bidiagonal form
dwork[i] = scale * g # dwork are the side-diagonal elements
g = s = scale = 0
if i < m:
for k in xrange(i, m):
scale += ctx.fabs(ctx.re(A[k,i])) + ctx.fabs(ctx.im(A[k,i]))
if scale != 0:
for k in xrange(i, m):
A[k,i] /= scale
ar = ctx.re(A[k,i])
ai = ctx.im(A[k,i])
s += ar * ar + ai * ai
f = A[i,i]
g = -ctx.sqrt(s)
if ctx.re(f) < 0:
beta = -g - ctx.conj(f)
g = -g
else:
beta = -g + ctx.conj(f)
beta /= ctx.conj(beta)
beta += 1
h = 2 * (ctx.re(f) * g - s)
A[i,i] = f - g
beta /= h
lbeta[i] = (beta / scale) / scale
for j in xrange(i+1, n):
s = 0
for k in xrange(i, m):
s += ctx.conj(A[k,i]) * A[k,j]
f = beta * s
for k in xrange(i, m):
A[k,j] += f * A[k,i]
for k in xrange(i, m):
A[k,i] *= scale
S[i] = scale * g # S are the diagonal elements
g = s = scale = 0
if i < m and i != n - 1:
for k in xrange(i+1, n):
scale += ctx.fabs(ctx.re(A[i,k])) + ctx.fabs(ctx.im(A[i,k]))
if scale:
for k in xrange(i+1, n):
A[i,k] /= scale
ar = ctx.re(A[i,k])
ai = ctx.im(A[i,k])
s += ar * ar + ai * ai
f = A[i,i+1]
g = -ctx.sqrt(s)
if ctx.re(f) < 0:
beta = -g - ctx.conj(f)
g = -g
else:
beta = -g + ctx.conj(f)
beta /= ctx.conj(beta)
beta += 1
h = 2 * (ctx.re(f) * g - s)
A[i,i+1] = f - g
beta /= h
rbeta[i] = (beta / scale) / scale
for k in xrange(i+1, n):
work[k] = A[i, k]
for j in xrange(i+1, m):
s = 0
for k in xrange(i+1, n):
s += ctx.conj(A[i,k]) * A[j,k]
f = s * beta
for k in xrange(i+1,n):
A[j,k] += f * work[k]
for k in xrange(i+1, n):
A[i,k] *= scale
anorm = max(anorm,ctx.fabs(S[i]) + ctx.fabs(dwork[i]))
if not isinstance(V, bool):
for i in xrange(n-2, -1, -1): # accumulation of right hand transformations
V[i+1,i+1] = 1
if dwork[i+1] != 0:
f = ctx.conj(rbeta[i])
for j in xrange(i+1, n):
V[i,j] = A[i,j] * f
for j in xrange(i+1, n):
s = 0
for k in xrange(i+1, n):
s += ctx.conj(A[i,k]) * V[j,k]
for k in xrange(i+1, n):
V[j,k] += s * V[i,k]
for j in xrange(i+1,n):
V[j,i] = V[i,j] = 0
V[0,0] = 1
if m < n : minnm = m
else : minnm = n
if calc_u:
for i in xrange(minnm-1, -1, -1): # accumulation of left hand transformations
g = S[i]
for j in xrange(i+1, n):
A[i,j] = 0
if g != 0:
g = 1 / g
for j in xrange(i+1, n):
s = 0
for k in xrange(i+1, m):
s += ctx.conj(A[k,i]) * A[k,j]
f = s * ctx.conj(lbeta[i])
for k in xrange(i, m):
A[k,j] += f * A[k,i]
for j in xrange(i, m):
A[j,i] *= g
else:
for j in xrange(i, m):
A[j,i] = 0
A[i,i] += 1
for k in xrange(n-1, -1, -1):
# diagonalization of the bidiagonal form:
# loop over singular values, and over allowed itations
its = 0
while 1:
its += 1
flag = True
for l in xrange(k, -1, -1):
nm = l - 1
if ctx.fabs(dwork[l]) + anorm == anorm:
flag = False
break
if ctx.fabs(S[nm]) + anorm == anorm:
break
if flag:
c = 0
s = 1
for i in xrange(l, k+1):
f = s * dwork[i]
dwork[i] *= c
if ctx.fabs(f) + anorm == anorm:
break
g = S[i]
h = ctx.hypot(f, g)
S[i] = h
h = 1 / h
c = g * h
s = -f * h
if calc_u:
for j in xrange(m):
y = A[j,nm]
z = A[j,i]
A[j,nm]= y * c + z * s
A[j,i] = z * c - y * s
z = S[k]
if l == k: # convergence
if z < 0: # singular value is made nonnegative
S[k] = -z
if not isinstance(V, bool):
for j in xrange(n):
V[k,j] = -V[k,j]
break
if its >= maxits:
raise RuntimeError("svd: no convergence to an eigenvalue after %d iterations" % its)
x = S[l] # shift from bottom 2 by 2 minor
nm = k-1
y = S[nm]
g = dwork[nm]
h = dwork[k]
f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2 * h * y)
g = ctx.hypot(f, 1)
if f >=0: f = (( x - z) *( x + z) + h *((y / (f + g)) - h)) / x
else: f = (( x - z) *( x + z) + h *((y / (f - g)) - h)) / x
c = s = 1 # next qt transformation
for j in xrange(l, nm + 1):
g = dwork[j+1]
y = S[j+1]
h = s * g
g = c * g
z = ctx.hypot(f, h)
dwork[j] = z
c = f / z
s = h / z
f = x * c + g * s
g = g * c - x * s
h = y * s
y *= c
if not isinstance(V, bool):
for jj in xrange(n):
x = V[j ,jj]
z = V[j+1,jj]
V[j ,jj]= x * c + z * s
V[j+1,jj ]= z * c - x * s
z = ctx.hypot(f, h)
S[j] = z
if z != 0: # rotation can be arbitray if z=0
z = 1 / z
c = f * z
s = h * z
f = c * g + s * y
x = c * y - s * g
if calc_u:
for jj in xrange(m):
y = A[jj,j ]
z = A[jj,j+1]
A[jj,j ]= y * c + z * s
A[jj,j+1 ]= z * c - y * s
dwork[l] = 0
dwork[k] = f
S[k] = x
##########################
# Sort singular values into decreasing order (bubble-sort)
for i in xrange(n):
imax = i
s = ctx.fabs(S[i]) # s is the current maximal element
for j in xrange(i + 1, n):
c = ctx.fabs(S[j])
if c > s:
s = c
imax = j
if imax != i:
# swap singular values
z = S[i]
S[i] = S[imax]
S[imax] = z
if calc_u:
for j in xrange(m):
z = A[j,i]
A[j,i] = A[j,imax]
A[j,imax] = z
if not isinstance(V, bool):
for j in xrange(n):
z = V[i,j]
V[i,j] = V[imax,j]
V[imax,j] = z
return S
##################################################################################################
@defun
def svd_r(ctx, A, full_matrices = False, compute_uv = True, overwrite_a = False):
"""
This routine computes the singular value decomposition of a matrix A.
Given A, two orthogonal matrices U and V are calculated such that
A = U S V and U' U = 1 and V V' = 1
where S is a suitable shaped matrix whose off-diagonal elements are zero.
Here ' denotes the transpose. The diagonal elements of S are the singular
values of A, i.e. the squareroots of the eigenvalues of A' A or A A'.
input:
A : a real matrix of shape (m, n)
full_matrices : if true, U and V are of shape (m, m) and (n, n).
if false, U and V are of shape (m, min(m, n)) and (min(m, n), n).
compute_uv : if true, U and V are calculated. if false, only S is calculated.
overwrite_a : if true, allows modification of A which may improve
performance. if false, A is not modified.
output:
U : an orthogonal matrix: U' U = 1. if full_matrices is true, U is of
shape (m, m). ortherwise it is of shape (m, min(m, n)).
S : an array of length min(m, n) containing the singular values of A sorted by
decreasing magnitude.
V : an orthogonal matrix: V V' = 1. if full_matrices is true, V is of
shape (n, n). ortherwise it is of shape (min(m, n), n).
return value:
S if compute_uv is false
(U, S, V) if compute_uv is true
overview of the matrices:
full_matrices true:
A : m*n
U : m*m U' U = 1
S as matrix : m*n
V : n*n V V' = 1
full_matrices false:
A : m*n
U : m*min(n,m) U' U = 1
S as matrix : min(m,n)*min(m,n)
V : min(m,n)*n V V' = 1
examples:
>>> from mpmath import mp
>>> A = mp.matrix([[2, -2, -1], [3, 4, -2], [-2, -2, 0]])
>>> S = mp.svd_r(A, compute_uv = False)
>>> print(S)
[6.0]
[3.0]
[1.0]
>>> U, S, V = mp.svd_r(A)
>>> print(mp.chop(A - U * mp.diag(S) * V))
[0.0 0.0 0.0]
[0.0 0.0 0.0]
[0.0 0.0 0.0]
see also: svd, svd_c
"""
m, n = A.rows, A.cols
if not compute_uv:
if not overwrite_a:
A = A.copy()
S = svd_r_raw(ctx, A, V = False, calc_u = False)
S = S[:min(m,n)]
return S
if full_matrices and n < m:
V = ctx.zeros(m, m)
A0 = ctx.zeros(m, m)
A0[:,:n] = A
S = svd_r_raw(ctx, A0, V, calc_u = True)
S = S[:n]
V = V[:n,:n]
return (A0, S, V)
else:
if not overwrite_a:
A = A.copy()
V = ctx.zeros(n, n)
S = svd_r_raw(ctx, A, V, calc_u = True)
if n > m:
if full_matrices == False:
V = V[:m,:]
S = S[:m]
A = A[:,:m]
return (A, S, V)
##############################
@defun
def svd_c(ctx, A, full_matrices = False, compute_uv = True, overwrite_a = False):
"""
This routine computes the singular value decomposition of a matrix A.
Given A, two unitary matrices U and V are calculated such that
A = U S V and U' U = 1 and V V' = 1
where S is a suitable shaped matrix whose off-diagonal elements are zero.
Here ' denotes the hermitian transpose (i.e. transposition and complex
conjugation). The diagonal elements of S are the singular values of A,
i.e. the squareroots of the eigenvalues of A' A or A A'.
input:
A : a complex matrix of shape (m, n)
full_matrices : if true, U and V are of shape (m, m) and (n, n).
if false, U and V are of shape (m, min(m, n)) and (min(m, n), n).
compute_uv : if true, U and V are calculated. if false, only S is calculated.
overwrite_a : if true, allows modification of A which may improve
performance. if false, A is not modified.
output:
U : an unitary matrix: U' U = 1. if full_matrices is true, U is of
shape (m, m). ortherwise it is of shape (m, min(m, n)).
S : an array of length min(m, n) containing the singular values of A sorted by
decreasing magnitude.
V : an unitary matrix: V V' = 1. if full_matrices is true, V is of
shape (n, n). ortherwise it is of shape (min(m, n), n).
return value:
S if compute_uv is false
(U, S, V) if compute_uv is true
overview of the matrices:
full_matrices true:
A : m*n
U : m*m U' U = 1
S as matrix : m*n
V : n*n V V' = 1
full_matrices false:
A : m*n
U : m*min(n,m) U' U = 1
S as matrix : min(m,n)*min(m,n)
V : min(m,n)*n V V' = 1
example:
>>> from mpmath import mp
>>> A = mp.matrix([[-2j, -1-3j, -2+2j], [2-2j, -1-3j, 1], [-3+1j,-2j,0]])
>>> S = mp.svd_c(A, compute_uv = False)
>>> print(mp.chop(S - mp.matrix([mp.sqrt(34), mp.sqrt(15), mp.sqrt(6)])))
[0.0]
[0.0]
[0.0]
>>> U, S, V = mp.svd_c(A)
>>> print(mp.chop(A - U * mp.diag(S) * V))
[0.0 0.0 0.0]
[0.0 0.0 0.0]
[0.0 0.0 0.0]
see also: svd, svd_r
"""
m, n = A.rows, A.cols
if not compute_uv:
if not overwrite_a:
A = A.copy()
S = svd_c_raw(ctx, A, V = False, calc_u = False)
S = S[:min(m,n)]
return S
if full_matrices and n < m:
V = ctx.zeros(m, m)
A0 = ctx.zeros(m, m)
A0[:,:n] = A
S = svd_c_raw(ctx, A0, V, calc_u = True)
S = S[:n]
V = V[:n,:n]
return (A0, S, V)
else:
if not overwrite_a:
A = A.copy()
V = ctx.zeros(n, n)
S = svd_c_raw(ctx, A, V, calc_u = True)
if n > m:
if full_matrices == False:
V = V[:m,:]
S = S[:m]
A = A[:,:m]
return (A, S, V)
@defun
def svd(ctx, A, full_matrices = False, compute_uv = True, overwrite_a = False):
"""
"svd" is a unified interface for "svd_r" and "svd_c". Depending on
whether A is real or complex the appropriate function is called.
This routine computes the singular value decomposition of a matrix A.
Given A, two orthogonal (A real) or unitary (A complex) matrices U and V
are calculated such that
A = U S V and U' U = 1 and V V' = 1
where S is a suitable shaped matrix whose off-diagonal elements are zero.
Here ' denotes the hermitian transpose (i.e. transposition and complex
conjugation). The diagonal elements of S are the singular values of A,
i.e. the squareroots of the eigenvalues of A' A or A A'.
input:
A : a real or complex matrix of shape (m, n)
full_matrices : if true, U and V are of shape (m, m) and (n, n).
if false, U and V are of shape (m, min(m, n)) and (min(m, n), n).
compute_uv : if true, U and V are calculated. if false, only S is calculated.
overwrite_a : if true, allows modification of A which may improve
performance. if false, A is not modified.
output:
U : an orthogonal or unitary matrix: U' U = 1. if full_matrices is true, U is of
shape (m, m). ortherwise it is of shape (m, min(m, n)).
S : an array of length min(m, n) containing the singular values of A sorted by
decreasing magnitude.
V : an orthogonal or unitary matrix: V V' = 1. if full_matrices is true, V is of
shape (n, n). ortherwise it is of shape (min(m, n), n).
return value:
S if compute_uv is false
(U, S, V) if compute_uv is true
overview of the matrices:
full_matrices true:
A : m*n
U : m*m U' U = 1
S as matrix : m*n
V : n*n V V' = 1
full_matrices false:
A : m*n
U : m*min(n,m) U' U = 1
S as matrix : min(m,n)*min(m,n)
V : min(m,n)*n V V' = 1
examples:
>>> from mpmath import mp
>>> A = mp.matrix([[2, -2, -1], [3, 4, -2], [-2, -2, 0]])
>>> S = mp.svd(A, compute_uv = False)
>>> print(S)
[6.0]
[3.0]
[1.0]
>>> U, S, V = mp.svd(A)
>>> print(mp.chop(A - U * mp.diag(S) * V))
[0.0 0.0 0.0]
[0.0 0.0 0.0]
[0.0 0.0 0.0]
see also: svd_r, svd_c
"""
iscomplex = any(type(x) is ctx.mpc for x in A)
if iscomplex:
return ctx.svd_c(A, full_matrices = full_matrices, compute_uv = compute_uv, overwrite_a = overwrite_a)
else:
return ctx.svd_r(A, full_matrices = full_matrices, compute_uv = compute_uv, overwrite_a = overwrite_a)
|
JensGrabner/mpmath
|
mpmath/matrices/eigen_symmetric.py
|
Python
|
bsd-3-clause
| 58,534
|
[
"Gaussian"
] |
14f28f790af5706630e98e9e6b7d9ad587445900cb3fa265407100d967cd2d88
|
'''
Simulation data model for the EI network: in general a network of two
populations, E and I cells; more specifically, a grid cell network.
'''
import numpy as np
from ...analysis import spikes
def getNetParam(data, p):
'''Extract a network parameter (p) from the data dictionary'''
return data['net_attr'][p]
def getOption(data, o):
return data['options'][o]
def extractSpikes(mon):
'''
Extract spikes from a spike monitor (a dict-like object), that contains the
relevant fields.
Return a tuple (senders, spikeTimes).
'''
e = mon['events']
return (e['senders'], e['times'])
def sliceSignal(t, sig, tStart, tEnd):
idx = np.logical_and(t >= tStart, t <= tEnd)
return t[idx], sig[idx], idx
def extractStateVariable(mon, nIdx, varStr):
'''Extract state variable from a monitor.
Parameters
----------
mon : list of dicts
A list of (NEST) monitors, each monitoring one neuron.
nIdx : int
Neuron index
varStr : str
Name of the variable
output
A tuple (data, dt), for the signal
'''
n = mon[nIdx]
return n['events'][varStr], n['interval']
def sumAllVariables(mon, nIdx, varList):
'''
Extract all variables from the list of monitors and sum them. The variables
must implement the + operator.
Parameters
----------
mon : a list of dicts
A list that contains dictionaries of monitors. The list should be
compatible with the extractStateVariable function.
nIdx : int
Neuron index
varList : list of strings
Contains the list of variables that whould be extracted from the
monitor and summed up.
output
A tuple (sum, dt) that contains the sum of all the variables 'sum' and
the sampling rate of the signals ('dt').
'''
sigSum = None
dtCheck = None
for idx in range(len(varList)):
sig, dt = extractStateVariable(mon, nIdx, varList[idx])
if (idx == 0):
sigSum = sig
dtCheck = dt
else:
assert(dtCheck == dt)
sigSum += sig
return sigSum, dt
def extractSummedSignals(monList, varName, tStart, tEnd, monIdx=0):
'''
Extract state variables from a pair of monitors. One in the centre, the
other one at the edge of the neural sheet.
'''
t, dt = extractStateVariable(monList, monIdx, 'times')
sig, dt = sumAllVariables(monList, monIdx, varName)
t, sig, idx = sliceSignal(t, sig, tStart, tEnd)
return t, sig
class MonitoredSpikes(spikes.PopulationSpikes):
'''
This class extracts data from a DataStorage object (a dictionary).
'''
def __init__(self, data, monName, NName):
'''
Return the senders and spike times from a monitor in the data
dictionary
Parameters
----------
data : dict
A data dictionary containing the monitor monName.
monName : string
The spike monitor dictionary name.
NName : string
Name of the network parameter that specifies the total number of
neurons.
'''
N = data['net_attr'][NName]
senders, times = extractSpikes(data[monName])
spikes.PopulationSpikes.__init__(self, N, senders, times)
class MonitoredTorusSpikes(spikes.TorusPopulationSpikes):
'''Spikes that extract torus data from a NEST spike monitor.'''
def __init__(self, data, monName, NXName, NYName):
'''
Return the senders and spike times from a monitor in the data
dictionary
Parameters
----------
data : dict
A data dictionary containing the monitor monName.
monName : string
The spike monitor dictionary name.
NName : string
Name of the network parameter that specifies the total number of
neurons.
'''
Nx = data['net_attr'][NXName]
Ny = data['net_attr'][NYName]
senders, times = extractSpikes(data[monName])
super(MonitoredTorusSpikes, self).__init__(senders, times, (Nx, Ny))
|
MattNolanLab/ei-attractor
|
grid_cell_model/data_storage/sim_models/ei.py
|
Python
|
gpl-3.0
| 4,127
|
[
"NEURON"
] |
0758de1452feb05f89aba4a1c59ebfb02317c83ac39a8aeeb59e259fdf2dddd0
|
import numpy as np
import sys, os
import nrrd
def sphere(shape, radius, position):
# assume shape and position are both a 3-tuple of int or float
# the units are pixels / voxels (px for short)
# radius is a int or float in px
semisizes = (radius,) * 3
#ignore divide by zero
np.seterr(divide='ignore', invalid='ignore')
# genereate the grid for the support points
# centered at the position indicated by position
grid = [slice(-x0, dim - x0) for x0, dim in zip(position, shape)]
position = np.ogrid[grid]
# calculate the distance of all points from `position` center
# scaled by the radius
arr = np.zeros(shape, dtype=float)
for x_i, semisize in zip(position, semisizes):
arr += (np.abs(x_i / semisize) ** 2)
# the inner part of the sphere will have distance below 1
return arr <= 1.0
scale=1
if (len(sys.argv) < 2):
print('Error: missing arguments!')
print('e.g. python swc2nrrd.py template.nrrd neuron.swc Image.nrrd [width] [scale] [Xoffset,Yoffset,Zoffset]')
else:
Itemp = str(sys.argv[1])
Iswc = str(sys.argv[2])
Iout = str(sys.argv[3])
offset = [0.0,0.0,0.0]
bounded = True
w = 0
if (len(sys.argv) > 4):
w=np.int32(sys.argv[4])
if (len(sys.argv) > 5):
scale=np.float(sys.argv[5])
bounded = False
if (len(sys.argv) > 6):
offset=np.float(sys.argv[6].split(','))
print('Loading %s...'% (Itemp))
tempData1, tempHeader1 = nrrd.read(Itemp)
print('Loading %s...'% (Iswc))
with open(Iswc) as fI:
swcIn = fI.readlines()
lineDict = {}
for thisLine in swcIn:
if thisLine[0]!='#':
splitLine = thisLine.split(" ")
lineDict[int(splitLine[0])] = {'position':np.array([np.float(splitLine[2])+offset[0],np.float(splitLine[3])+offset[1],np.float(splitLine[4])+offset[2]],dtype=np.float),
'radius':splitLine[5],
'parent':int(splitLine[6])}
extent=tempHeader1['sizes']
print(extent)
outputImg = np.zeros(extent,dtype=np.uint8)
r=0
for thisDict in lineDict.values():
r=w
p = np.clip(np.floor(np.divide(np.divide(thisDict['position'],[tempHeader1['space directions'][0][0],tempHeader1['space directions'][1][1],tempHeader1['space directions'][2][2]]),scale)),[0,0,0],np.subtract(extent,1)).astype(np.int)
if thisDict['radius'] != "NA" and np.divide(float(thisDict['radius']),scale)>r:
r=np.divide(float(thisDict['radius']),scale)
if r<1:
outputImg[p[0],p[1],p[2]]=np.uint8(255)
else:
point = np.multiply(sphere(extent, r, p),np.uint8(255)).astype(np.uint8)
outputImg = np.maximum(outputImg, point).astype(np.uint8)
nrrd.write(Iout, np.uint8(outputImg), header=tempHeader1)
print('saved to ' + Iout)
|
Robbie1977/NRRDtools
|
swc2nrrd.py
|
Python
|
mit
| 2,954
|
[
"NEURON"
] |
1694ee444f781f48d8c8d7fea8c73fe2835de8ada8ffc8255ad27f7179113967
|
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import os
import numpy as np
from pyemma.coordinates.data import MDFeaturizer
from logging import getLogger
import pyemma.coordinates.api as api
import pyemma.util.types as types
import pkg_resources
from pyemma.util.files import TemporaryDirectory
logger = getLogger('pyemma.'+'TestReaderUtils')
def convert_traj(file, format, top, dir=None):
import subprocess
from mdtraj.scripts import mdconvert
if dir is not None:
outname = os.path.basename(file)
else:
outname = file
out = '{dir}{name}.{format}'.format(format=format, name=outname, dir=dir if dir is not None else '')
import sys
subprocess.check_call([sys.executable, '-m', 'mdtraj.scripts.mdconvert', file, '-o', out, '-t', top])
#subprocess.check_call(['env'], env=os.environ)
return out
class TestSource(unittest.TestCase):
def setUp(self):
path = pkg_resources.resource_filename('pyemma.coordinates.tests', 'data') + os.path.sep
self.pdb_file = os.path.join(path, 'bpti_ca.pdb')
self.traj_files = [
os.path.join(path, 'bpti_001-033.xtc'),
os.path.join(path, 'bpti_067-100.xtc')
]
def test_read_multiple_files_topology_file(self):
reader = api.source(self.traj_files, top=self.pdb_file)
self.assertIsNotNone(reader, "The reader should not be none.")
self.assertEqual(reader.topfile, self.pdb_file, "Reader topology file and input topology file should coincide.")
self.assertListEqual(reader.filenames, self.traj_files, "Reader trajectories and input"
" trajectories should coincide.")
self.assertEqual(reader.featurizer.topologyfile, self.pdb_file, "Featurizers topology file and input "
"topology file should coincide.")
def test_read_multiple_files_featurizer(self):
featurizer = MDFeaturizer(self.pdb_file)
reader = api.source(self.traj_files, features=featurizer)
self.assertIsNotNone(reader, "The reader should not be none.")
self.assertEqual(reader.topfile, self.pdb_file, "Reader topology file and input topology file should coincide.")
self.assertListEqual(reader.filenames, self.traj_files, "Reader trajectories and input"
" trajectories should coincide.")
self.assertEqual(reader.featurizer.topologyfile, self.pdb_file, "Featurizers topology file and input "
"topology file should coincide.")
def test_read_single_file_toplogy_file(self):
reader = api.source(self.traj_files[0], top=self.pdb_file)
self.assertIsNotNone(reader, "The reader should not be none.")
self.assertEqual(reader.topfile, self.pdb_file, "Reader topology file and input topology file should coincide.")
self.assertListEqual(reader.filenames, [self.traj_files[0]], "Reader trajectories and input"
" trajectories should coincide.")
self.assertEqual(reader.featurizer.topologyfile, self.pdb_file, "Featurizers topology file and input "
"topology file should coincide.")
def test_read_single_file_featurizer(self):
featurizer = MDFeaturizer(self.pdb_file)
reader = api.source(self.traj_files[0], features=featurizer)
self.assertIsNotNone(reader, "The reader should not be none.")
self.assertEqual(reader.topfile, self.pdb_file, "Reader topology file and input topology file should coincide.")
self.assertListEqual(reader.filenames, [self.traj_files[0]], "Reader trajectories and input"
" trajectories should coincide.")
self.assertEqual(reader.featurizer.topologyfile, self.pdb_file, "Featurizers topology file and input "
"topology file should coincide.")
def test_invalid_input(self):
# neither featurizer nor topology file given
self.assertRaises(ValueError, api.source, self.traj_files, None, None)
# no input files but a topology file
self.assertRaises(ValueError, api.source, None, None, self.pdb_file)
featurizer = MDFeaturizer(self.pdb_file)
# no input files but a featurizer
self.assertRaises(ValueError, api.source, None, featurizer, None)
# empty list of input files
self.assertRaises(ValueError, api.source, [], None, self.pdb_file)
# empty tuple of input files
self.assertRaises(ValueError, api.source, (), None, self.pdb_file)
def test_invalid_files(self):
# files do not have the same extension
self.assertRaises(ValueError, api.source, self.traj_files.append(self.pdb_file), None, self.pdb_file)
# files list contains something else than strings
self.assertRaises(ValueError, api.source, self.traj_files.append([2]), None, self.pdb_file)
# input file is directory
root_dir = os.path.abspath(os.sep)
self.assertRaises(ValueError, api.source, root_dir, None, self.pdb_file)
def test_h5_mdtraj_vs_plain(self):
with TemporaryDirectory() as td:
f = convert_traj(self.traj_files[0], format='h5', dir=td, top=self.pdb_file)
r = api.source(f, top=self.pdb_file)
from pyemma.coordinates.data import FeatureReader
self.assertIsInstance(r, FeatureReader)
import h5py
from pyemma.coordinates.data.h5_reader import H5Reader
plain_h5_file = os.path.join(td, 'f.h5')
with h5py.File(plain_h5_file, mode='a') as fh:
fh.create_dataset('test', data=np.random.random((100, 3)))
r = api.source(plain_h5_file)
self.assertIsInstance(r, H5Reader)
class TestSourceCallAll(unittest.TestCase):
@classmethod
def setUpClass(cls):
path = pkg_resources.resource_filename(__name__, 'data') + os.path.sep
cls.pdb_file = os.path.join(path, 'bpti_ca.pdb')
cls.xtc_file = os.path.join(path, 'bpti_mini.xtc')
cls.inp = api.source(cls.xtc_file, top=cls.pdb_file)
def setUp(self):
pass
def test_chunksize(self):
assert types.is_int(self.inp.chunksize)
def test_describe(self):
desc = self.inp.describe()
assert types.is_string(desc) or types.is_list_of_string(desc)
def test_dimension(self):
assert types.is_int(self.inp.dimension())
def test_featurizer(self):
# must have a featurizer
assert self.inp.featurizer is not None
def test_get_output(self):
O = self.inp.get_output()
assert types.is_list(O)
assert len(O) == 1
assert types.is_float_matrix(O[0])
assert O[0].shape[0] == 100
assert O[0].shape[1] == self.inp.dimension()
def test_in_memory(self):
assert isinstance(self.inp.in_memory, bool)
def test_iterator(self):
self.inp.chunksize = 100
assert self.inp.chunksize == 100
for itraj, chunk in self.inp:
assert types.is_int(itraj)
assert types.is_float_matrix(chunk)
assert chunk.shape[0] == self.inp.chunksize
assert chunk.shape[1] == self.inp.dimension()
def test_n_frames_total(self):
# map not defined for source
self.inp.n_frames_total() == 100
def test_number_of_trajectories(self):
# map not defined for source
self.inp.number_of_trajectories() == 1
def test_output_type(self):
assert self.inp.output_type() == np.float32()
def test_topfile(self):
types.is_string(self.inp.topfile)
def test_trajectory_length(self):
assert self.inp.trajectory_length(0) == 100
with self.assertRaises(IndexError):
self.inp.trajectory_length(1)
def test_trajectory_lengths(self):
assert len(self.inp.trajectory_lengths()) == 1
assert self.inp.trajectory_lengths()[0] == self.inp.trajectory_length(0)
def test_trajfiles(self):
assert types.is_list_of_string(self.inp.filenames)
if __name__ == "__main__":
unittest.main()
|
markovmodel/PyEMMA
|
pyemma/coordinates/tests/test_source.py
|
Python
|
lgpl-3.0
| 9,204
|
[
"MDTraj"
] |
53241c2291c0081d9da75107e0d090c08f04b7a51d354b134e4423316f2e857b
|
#-*- coding:utf-8 -*-
"""
PyHdust *poltools* module: polarimetry tools
History:
-grafpol working for *_WP1110....log files!
-grafpol working for log/out files with more than a single star
:co-author: Daniel Bednarski
:license: GNU GPL v3.0 (https://github.com/danmoser/pyhdust/blob/master/LICENSE)
"""
from __future__ import print_function
from builtins import input
from sys import exit, stderr
import os as _os
import re as _re
import pwd as _pwd
import time as _time
from glob import glob as _glob
import numpy as _np
import datetime as _dt
import shutil as _shutil
# from itertools import product as _product
from inspect import getouterframes as _getouterframes
from inspect import currentframe as _currentframe
import pyhdust.phc as _phc
import pyhdust.jdcal as _jdcal
from pyhdust import hdtpath as _hdtpath
from scipy.optimize import curve_fit as _curve_fit
from scipy.integrate import simps as _simps
from scipy.interpolate import interp1d as _interp1d
import matplotlib as _mpl
# from sys import _argv
# from matplotlib import rc as _rc
try:
import matplotlib.pyplot as _plt
from matplotlib.transforms import offset_copy as _offset_copy
import pyfits as _pyfits
except:
print('# Warning! matplotlib and/or pyfits module not installed!!!')
__author__ = "Daniel Moser"
__email__ = "dmfaes@gmail.com"
_mpl.rcParams['pdf.fonttype'] = 42
filters = ['u', 'b', 'v', 'r', 'i']
fonts = [20, 17, 17, 14, 13] # Font sizes for titles, axes labels, axes values, key label of graphs, subplot labels
# Setting an "initial value" for ccd
ccd = '---'
# Dictionary for the tags entered by the user
dictags = {0: ['bad modulation', 'bad-mod'],
1: ['very bad modulation', 'very-bad-mod'],
2: ['the pol values have values incompatible from each other', 'incomp-mods'],
3: ['some observational problem/error', 'obs-prob'],
4: ['polarimeter problem suspected', 'iagpol-prob'],
5: ['another relevant problem', 'other-prob'],
}
# Dictionary for the tags assigned automatically
# If you want to add another value, add inside verout routin also.
dictests = {0: ['std incompatible with the published', 'obs!=pub', 'W'],
1: ['sig >> theorical_sig', 's>>theor_s', 'OK'],
2: ['no standard in the night', 'no-std', 'W'],
3: ['standard from another day', 'oth-day-std', 'W'],
4: ['delta theta estimated from another filter', 'oth-dth', 'W'],
}
# Dictionary for pre-defined vfilters
vfil = { 'comp' : ['no-std','iagpol-prob','oth-day-std'],
'prob' : ['no-std','iagpol-prob','incomp-mods','obs-prob','other-prob','oth-day-std'],
'full' : ['no-std','iagpol-prob','incomp-mods','obs-prob','other-prob','oth-day-std','bad-mod','very-bad-mod'],
}
#################################################
#################################################
#################################################
def eprint(*args, **kwargs):
print(*args, file=stderr, **kwargs)
#################################################
#################################################
#################################################
def stdchk(stdname):
"""
Check if the standard star name contains a known name, and return
its position in `padroes.txt`.
"""
lstds = list(_np.loadtxt('{0}/refs/pol_padroes.txt'.format(_hdtpath()), dtype=str,\
usecols=[0]))
chk = False
i = -1
for std in lstds:
if stdname.find(std) > -1:
chk = True
i = lstds.index(std)
return chk, i
#################################################
#################################################
#################################################
def countStars(objdir, f):
"""
Count how many stars there are inside outfiles in 'objdir'
and filter f. Return 0 if there are no outfiles
"""
louts = _glob('{0}/*_{1}_*.out'.format(objdir,f))
if len(louts) == 0:
counts = 0
else:
file0 = _np.loadtxt(louts[0], dtype=str, delimiter='\n', comments=None)
counts = len(file0)-1 # -1 because the header line
return counts
#################################################
#################################################
#################################################
def thtFactor(MJD):
"""
Return the factor for polarization angle 'theta'. This factor
indicates when theta must be taken as 180-theta (factor==-1)
or +theta (factor==+1).
It's based on engine of IAGPOL polarimeter.
If MJD < 57082.5, return -1 (including MJD=-1, for no assigned
value); otherwise, return +1.
Theta out from polrap is correct for a WP rotating in
'counter-clockwise' direction, then:
factor = -1 when WP rotating in clockwise
factor = +1 when WP rotating in counter-clockwise
"""
if float(MJD) < 54101.5: # Eu desconfio que antes de 2007. Confirmar a data exata
factor=1.
elif float(MJD) < 57082.5: # before 2015, March 1st
factor=-1.
else:
factor=1.
return factor
#################################################
#################################################
#################################################
def readout(out, nstar=1):
"""
Read the *.out file from IRAF reduction and return a float array
Q U SIGMA P THETA SIGMAtheor. APERTURE STAR
'nstar' == star number inside 'out' file (usefull when there are
more than a single star inside .out)
"""
f0 = open(out)
data = f0.readlines()
f0.close()
data = data[nstar].split()
return [float(x) for x in data]
#################################################
#################################################
#################################################
def readoutMJD(out, nstar=1):
"""
Read the 'out' file from IRAF reduction in a float array (fout),
appending the MJD date and the angle of the beams from
calcite.
'nstar' == star number inside 'out' file. PS: calcice angle
is allways evaluated using the first star coordinates.
"""
path = _phc.trimpathname(out)[0]
outn = _phc.trimpathname(out)[1]
try:
data = readout(out, nstar=nstar)
except:
eprint('# ERROR: Can\'t open/read file {0}. Verify and run again.\n'.format(out))
exit(1)
WP = False
if '_WP' in outn:
outn = outn[:outn.rfind('_')]
WP = True
i = outn.rfind('_')+1
seq = int(outn[i:i+2])
npos = int(outn[i+2:i+5])
f = outn[outn.find('_')+1:outn.rfind('_')]
JD = _glob('{0}/JD_*_{1}'.format(path,f))
try:
f0 = open(JD[0])
date = f0.readlines()
f0.close()
datei = float(date[npos-1].split()[-1])-2400000.5
datef = float(date[npos-1+seq-1].split()[-1])-2400000.5
except:
eprint(('# ERROR: Found *_{0}_*.out files, but none JD file found as {1}/JD_*_{2}. '+\
'Verify and run again.\n').format(f,path,f))
exit(1)
if WP:
ver = outn[-1]
else:
i = outn[:-4].rfind('.') + 1
ver = outn[i:i + 1]
coords = _glob('{0}/coord_*_{1}.{2}.ord'.format(path,f,ver))
if len(coords) == 0 and ccd not in ('301','654'):
coords = _glob('{0}/coord_*_{1}.ord'.format(path,f))
if len(coords) == 0:
coords = _glob('{0}/coord_*_{1}_[0-9]*.ord'.format(path,f))
if len(coords) == 0:
eprint(('# ERROR: Found *_{0}_*.out files, but none COORD file found as '+\
'{1}/coord_*_{2}_*.ord. Verify and run again.\n').format(f,path,f))
exit(1)
try:
if ccd not in ('301', '654'):
coords = _np.loadtxt(coords[0])
ang = _np.arctan((coords[1, 1] - coords[0, 1]) / (coords[1, 0] - coords[0, 0])) * 180 / _np.pi
else:
coords = _np.array([[0., 0.], [0., 0.]])
ang = 0.
while ang < 0:
ang += 180.
except:
eprint('# ERROR: Can\'t open coords file {0}/coord_*_{1}_*.ord. Verify and run again.\n'.format(path, f))
exit(1)
if date != -1:
if datei == datef:
print('# Strange JD file for '+out)
date = (datef+datei)/2
return [float(x) for x in data] + [date] + [ang]
#################################################
#################################################
#################################################
def chooseout(objdir, obj, f, nstar=1, sigtol=lambda sig: 1.4*sig):
"""
Olha na noite, qual(is) *.OUT(s) de um filtro que tem o menor erro.
Retorna um mais valores para toda a sequencia (i.e., pasta).
Criterios definidos no anexo de polarimetria.
minerror == True: recebe o out de menor erro em qualquer caso.
sigtol: condicao de tolerancia para pegar o agrupamento de menor erro.
Se o sigma do agrupamento com todas N posicoes for menor que a funcao
sigtol sobre o sigma do agrupamento de menor erro, entao usa o agrupamento
com as N posicoes; do contrario usa o de menor erro.
Exemplo com 16 posicoes: erro do grupo de 16 posicoes == 0.000230;
menor erro == 0.000200. Se sigtol(sig) = 1.4*sig, como
sigtol(0.000200) == 0.000240 > 0.000230, usa o agrupamento de 16 posicoes.
O numero de posicoes eh baseado no numero de arquivos *.fits daquele
filtro.
'nstar' == star number inside 'out' file (usefull when there are
more than a single star inside .out)
"""
def minErrBlk16(serie='16001'):
"""
Calculate the out with best error out of type *_f_*serie.?.out
for star number 'nstar' (where 'f' is the filter, 'serie' is
the five-numbers concerning to the WP positions (like 16001)
and ? is some char.
Return err, out. If not found, return 1000.,''.
"""
err = 1000.
out = ''
ls = [objdir+'/'+fl for fl in _os.listdir('{0}'.format(objdir)) if _re.search(r'_{0}'. \
format(f) + r'_.*_?{0}\..\.out'.format(serie), fl)]
if len(ls) > 0:
err = float(readout(ls[0],nstar=nstar)[2])
out = ls[0]
for outi in ls:
if float(readout(outi,nstar=nstar)[2]) < err:
# print float(readout(outi,nstar=nstar)[2])
err = float(readout(outi,nstar=nstar)[2])
out = outi
return err, out
npos = len(_glob('{0}/*_{1}_*.fits'.format(objdir,f)))
if npos == 0:
npos = len(_glob('{0}/{1}/p??0'.format(objdir,f)))
louts = _glob('{0}/*_{1}_*.out'.format(objdir,f))
# Check reduction
if len(louts) == 0 and npos != 0:
eprint(('# ERROR: There are observations not reduced for {0}/{1}_{2}_*.fits. ' +\
'Verify and run again.\n').format(objdir,obj,f))
exit(1)
# Calculate the number of outfiles to be returned.
n=int(npos/16) # operacao em formato int!
rest = npos%16
if n!=0:
if rest == 0:
nlast = 16
elif rest >= 8:
n += 1
nlast = rest
elif rest < 8:
nlast = 16+rest
elif rest > 0:
n = 1
nlast = rest
# print(n, rest, nlast)
err = [1000.0]*n
outs = ['']*n
# n contem o numero de outs que serao obtidos
# nlast contem o numero de posicoes para o ultimo out
# Loop to get the n outfiles
for i in range(n):
# Get the best outfile with all WP positions.
if i+1 < n or (i+1==n and nlast >= 16):
serie='{0:02d}{1:03d}'.format(16,i*16+1)
else:
serie='{0:02d}{1:03d}'.format(nlast,i*16+1)
err[i], outs[i] = minErrBlk16(serie)
errtmp = err[i] # errtmp==1000 if no outfiles were found by minErrBlk16
# Tests if there is some better group, with smaller error
for outi in louts:
if outi.find('_WP') == -1:
indx = outi.rfind('_')+1
else:
indx = outi[:outi.find('_WP')].rfind('_')+1
combi = outi[indx:indx+5]
n1= int(combi[:2]) # First part of '16001', etc
n2= int(combi[2:]) # Last part of '16001', etc
# if i+1==n:
# print n1, n2
# Default case
if i+1 != n or (i+1 == n and nlast == 16):
# Get only the groups with independent data
if n2 >= 16*i+1 and n2 <= 16*i+1 + (16-n1):
if float(readout(outi,nstar=nstar)[2]) < errtmp:
errtmp = float(readout(outi,nstar=nstar)[2])
outtmp = outi
# Case i==n (and nlast!=16)
else:
# print 'entrou1'
# print n1,n2,16*i+1
if n2 >= 16*i+1:
if float(readout(outi,nstar=nstar)[2]) < errtmp:
errtmp = float(readout(outi,nstar=nstar)[2])
outtmp = outi
if errtmp != err[i] and err[i] > sigtol(errtmp):
outs[i] = outtmp
# if some element of outs is '', chooseout has failed to find the best out in such 16-position serie.
# But don't panic. It can happen due some espurious .fits file
# print [out for out in outs if out != '']
return [out for out in outs if out != '']
#################################################
#################################################
#################################################
def verout(out, obj, f, nstar=1, verbose=True, delta=3.5):
"""
Function to do tests on outfile 'out' concerning to
star number 'nstar', object name 'obj' in filter 'f'.
Tests: (1) test if P_pub for standards is compatible with
P_obs value within 10 sigmas (10 because
std pol can changes with the time).
(2) test if sig < 3*sig_theorical.
(3) test if there are some standard star (only if
'obj' is a target star)
- If verbose==True, show warnings in screen
- In objdir==None, outfile is supposed in current dir
Return a boolean list with three components concerning
to the tests (1)-(3) above + log string. If some test has failed,
the concerning value is assigned as \"True\"; otherwise,
\"False\".
"""
tests = [False]*len(dictests)
loglines = ''
# The complex lines below is to extract 'path' from 'out' (considering) fixing consecutives '//'
if out[0] == '/':
path = '/'+'/'.join(s for s in [s for s in out.split('/') if s][:-2])
else:
path = '/'.join(s for s in [s for s in out.split('/') if s][:-2])
[Q,U,sig,P,th,sigT,ap,star,MJD,calc] = readoutMJD(out, nstar=nstar)
sig_ratio = float(sig)/float(sigT)
ztest = verStdPol(obj, f, float(P)*100, float(sig*100))
# Some tests.
if ztest > 10.0: # Case the object is not a standard, ztest==-1 and tests[0] remains False.
tests[0] = True
if sig_ratio > 6.:
tests[1] = True
if not stdchk(obj)[0]: # Only if object is not a standard star, tests if there exists some standard star for it
tests[2] = not chkStdLog(f, calc, path=path, delta=delta, verbose=False)
# Print tests
if tests[0]:
loglines += ('# WARNING: {0}_{1}: The standard has polarization only compatible '+\
'within {2:.1f} sigma with the published value.\n').format(obj, f, ztest)
if tests[1]:
loglines += ('# WARNING: {0}_{1}: Polarization has sig >> theorical_sig ' +\
'({2:.4f} >> {3:.4f}).\n').format(obj, f, sig*100, sigT*100)
if tests[2]:
loglines += ('# WARNING: {0}_{1}: Standard star not found yet '+\
'(calc. {2:.1f})\n').format(obj, f, calc)
if verbose and loglines != '':
print('\n'+loglines)
return tests, loglines
#################################################
#################################################
#################################################
def queryout(objdir, obj, f, nstar=1, sigtol=lambda sig: 1.4*sig):
"""
Call chooseout for 'obj' at filter 'f' (in 'objdir'),
print the graphs and query to the user if he wants
to use the selected outs. If not, he musts answer
what to use.
'nstar' == star number inside 'out' file (usefull when there are
more than a single star inside .out)
"""
_plt.close('all')
outs = chooseout(objdir, obj, f, nstar=nstar, sigtol=sigtol)
if outs == ['']:
return outs, None, None
# Initialize the components for each outfile
tags = [[]]*len(outs)
flag = ['']*len(outs)
for i in range(len(outs)):
sortout = []
while True:
_plt.close('all')
# Only in the first iteration stack the values in sortout
if sortout == []:
sortout = grafall(objdir, f, n=i+1 ,nstar=nstar, bestouts=[outs[i]], shortmode=True)
else:
lixo = grafall(objdir, f, n=i+1 ,nstar=nstar, bestouts=[outs[i]], shortmode=True)
print('\n'+'_'*80)
print(' {0:<10s} {1:<5s} {2:<6s} {3:<7s} {4:<10s} {5:<7s} {6:<s}'.format('Obj', 'Filt', \
'Pol (%)', '', 'sig/ThSig', 'ztest', 'out/num'))
try:
[Q,U,sig,P,th,sigT,ap,star,MJD,calc] = readoutMJD(outs[i], nstar=nstar)
sig_ratio = float(sig)/float(sigT)
z = verStdPol(obj, f, float(P)*100, float(sig*100))
numout = '(#{0})'.format(sortout.index(outs[i]))
except:
eprint('# ERROR: It shouldn\'t enter here in queryout!\n')
exit(1)
# Reassigns ztest value for printing
if z == -1:
zstr = '-----'
else:
zstr = '{0:.1f}'.format(z)
# Prints the values
print(' {0:<10s} {1:<5s} {2:<6.4f}+-{3:<7.4f} {4:<10.1f} {5:<7s} {6:<s} {7:<s}'.\
format(obj.upper(), f.upper(), float(P)*100, float(sig)*100, \
sig_ratio, zstr, _phc.trimpathname(outs[i])[1], numout))
print('_'*80+'\n')
# Test the out file to print tests
testout, logout = verout(outs[i], obj, f, nstar=nstar, verbose=True)
while True:
verif = input('Use this out? (y/n): ')
if verif not in ('y','Y','n','N'):
print('Invalid choise!')
else:
break
if verif in ('y', 'Y'):
break
elif verif in ('n', 'N'):
opt='' # for the first iteration
while True:
opt = input('Type the out number: ')
# If opt is a valid value, assign the input number with the concerning out file
if opt in [str(j) for j in range(1,len(sortout)) if sortout[j] != '']:
outs[i] = sortout[int(opt)]
break
else:
print('Wrong value!')
opt=''
# Request what tags to assign (flexible through the dictags global dictionary)
print('\n# TAGS LIST\n 0: none')
for j in dictags.keys():
print(' {0}: {1}'.format(j+1, dictags[j][0]))
print('')
while True:
verif = True
tags[i] = [False for j in dictags.keys()]
strin = input('Select all tags that apply separated by commas (\'0\' for none): ')
if strin == '0':
flag[i]='OK'
break
opts = strin.split(',')
for opt in opts:
if opt in [str(j+1) for j in dictags.keys()]:
opt = int(opt)-1
tags[i][opt] = True
else:
print('Invalid choise!')
verif = False
break
# If some tag was selected, request a flag below
if verif:
verif2=''
while verif2 not in ('y','Y','n','N'):
verif2 = input('For you, this data should appear as usable? (y/n): ')
if verif2 in ('y','Y'):
flag[i] = 'W'
else:
flag[i] = 'E'
break
_plt.close('all')
return outs, tags, flag
#################################################
#################################################
#################################################
# Falta alterar para novos índices das colunas dos arquivos std.dat e obj.dat
# das mudanças que fiz. Bednarski.
def plotfrompollog(path, star, filters=None, colors=None):
""" Plot default including civil dates
"""
tab = _np.genfromtxt('{0}/{1}.log'.format(path,star), dtype=str, autostrip=True)
MJD = tab[:,0].astype(float)
nights = tab[:,1]
filt = tab[:,2]
calc = tab[:,3]
ang_ref = tab[:,4].astype(float)
dth = tab[:,5].astype(float)
P = tab[:,7].astype(float)
Q = tab[:,8].astype(float)
U = tab[:,9].astype(float)
th = tab[:,10].astype(float)
sigP = tab[:,11].astype(float)
sigth = tab[:,12].astype(float)
if colors == None:
colors = _phc.colors
if filters == None:
filters = ['b','v','r','i']
colors = ['b','y','r','brown']
leg = ()
fig, ax = _plt.subplots()
for f in filters:
i = [i for i,x in enumerate(filters) if x == f][0]
leg += (f.upper()+' band',)
ind = _np.where(filt == f)
x = MJD[ind]
y = P[ind]
yerr = sigP[ind]
ax.errorbar(x, y, yerr, marker='o', color=colors[i], fmt='--')
ax.legend(leg,'upper left')#, fontsize='small')
#ax.legend(leg,'lower left', fontsize='small')
ax.set_ylabel('Polarization (%)')
ax.plot(ax.get_xlim(),[0,0],'k--')
ylim = ax.get_ylim()
ax.set_xlabel('MJD')
xlim = ax.get_xlim()
ticks = _phc.gentkdates(xlim[0], xlim[1], 3, 'm',\
dtstart=_dt.datetime(2012,7,1).date())
mjdticks = [_jdcal.gcal2jd(date.year,date.month,date.day)[1] for date in \
ticks]
ax2 = ax.twiny()
ax2.set_xlabel('Civil date')
ax2.set_xlim(xlim)
ax2.set_xticks(mjdticks)
ax2.set_xticklabels([date.strftime("%d %b %y") for date in ticks])
_plt.setp( ax2.xaxis.get_majorticklabels(), rotation=45 )
_plt.subplots_adjust(left=0.1, right=0.95, top=0.84, bottom=0.1)
_plt.savefig('{0}/{1}.png'.format(path,star))
_plt.savefig('{0}/{1}.eps'.format(path,star))
_plt.close()
bres = 20
_plt.clf()
leg = ()
fig, ax = _plt.subplots()
for f in filters:
ind = _np.where(filt == f)
x, y, yerr = _phc.bindata(MJD[ind], P[ind], sigP[ind], bres)
leg += (f.upper()+' band',)
ax.errorbar(x, y, yerr, marker='o', color=colors[filters.index(f)], fmt='-')
ax.legend(leg,'upper left', fontsize='small')
ax.set_ylabel('Polarization (%) (binned)')
ax.plot(ax.get_xlim(),[0,0],'k--')
ax.set_ylim(ylim)
ax.set_xlabel('MJD')
xlim = ax.get_xlim()
ticks = _phc.gentkdates(xlim[0], xlim[1], 3, 'm',\
dtstart=_dt.datetime(2012,7,1).date())
mjdticks = [_jdcal.gcal2jd(date.year,date.month,date.day)[1] for date in \
ticks]
ax2 = ax.twiny()
ax2.set_xlabel('Civil date')
ax2.set_xlim(xlim)
ax2.set_xticks(mjdticks)
ax2.set_xticklabels([date.strftime("%d %b %y") for date in ticks])
_plt.setp( ax2.xaxis.get_majorticklabels(), rotation=45 )
_plt.subplots_adjust(left=0.1, right=0.95, top=0.84, bottom=0.1)
_plt.savefig('{0}/{1}_binned.png'.format(path,star))
_plt.savefig('{0}/{1}_binned.eps'.format(path,star))
_plt.close()
for f in filters:
ind = _np.where(filt == f)
avg,sigm = _phc.wg_avg_and_std(P[ind], sigP[ind])
print('# Averaged {} band is {:.3f} +/- {:.3f} %'.format(f.upper(),avg,\
sigm))
return
#################################################
#################################################
#################################################
# Bednarski: This function generates the graphs for all filter "filt" logfiles found inside "objdir"
def grafall(objdir, filt, nstar=1, n=1, bestouts=[], shortmode=False):
"""
Multiple plot modulations for the object inside 'objdir'
in filter 'filt'.
Return a list 'sortout' with the log files sorted by plot
number. Allways sortout[0]=='' and sortout[i] is the plot #i.
Optional:
bestouts - list of outs which have smaller error to
highlight in figures
shortmode - case True, plot only the groups 16001, 08001,
08009 (ver .1 and .2) and an eventual 7th group.
n - is concerning to the n-esim 16-sequence to use.
Exemple, if there are 40 WP positions, n=1 is to plot
the graphs for positions 1-16; n=2 is for 17-32;
n=3 is for 33-40.
"""
# Receive a list of logfiles and return lists for each group of WP/version:
# sublogs == list of sorted logfiles.
# groups == list with informations about lamina groups.
# ver == list with the reduction versions of files in sublogs.
#
# Ex, sublogs == [[*16001.1.log], [*16001.2.log], [*0800[1-9].1.log], [*0800[1-9].2.log]]
# groups == [ [16, 1, .1], [16, 1, .2], [8, 9, .1], [8, 9, .2]]
# ver == [ [.1], [best], [.1 ...], [.2, ...]]
def combineout(logs, n):
logsplit, groups, sublogs, ver = [], [], [[]], [[]]
# Split the name files at last "_" character in each .log name. Working for _WP1111110110 ... files
if len(logs) > 0:
direc = _phc.trimpathname(logs[0])[0]
else:
eprint("ERROR: no log files found to plot.")
return
for log in [_phc.trimpathname(li)[1] for li in logs]:
if log.find('_WP') == -1:
indx = log.rfind('_')+1
else:
indx = log[:log.find('_WP')].rfind('_')+1
combi = log[indx:indx+5]
n1= int(combi[:2])
n2= int(combi[2:])
if n2 >= 16*(n-1)+1 and n2 <= 16*(n-1)+1 + (16-n1):
logsplit += [[log[:indx], log[indx:]]]
# Sort by lamina (high to low) -> version (including versions with _WP111100 ...)
logsplit.sort(key=lambda x: [x[1][6:8],x[1][:]])
logsplit.sort(key=lambda x: x[1][:1], reverse=True)
j=0
# Separate the lamina groups
for i in range(len(logsplit)):
sublogs[j] += [direc+logsplit[i][0]+logsplit[i][1]]
if sublogs[j][-1][:-4]+'.out' in bestouts:
ver[j] += ['best']
else:
ver[j] += [logsplit[i][1][5:-4]]
if (i != len(logsplit)-1 and (logsplit[i][1][0:2] != logsplit[i+1][1][0:2] or \
logsplit[i][1][6:8] != logsplit[i+1][1][6:8])) or i == len(logsplit)-1:
groups += [[int(logsplit[i][1][0:2]), len(sublogs[j]), \
logsplit[i][1][5:-4]]]
if i != len(logsplit)-1:
j+=1
sublogs[:] += [[]]
ver[:] += [[]]
return groups, sublogs, ver
# Generate background color for the graphs, depending the reduction version
def gencolour(ver):
if ver == 'best': bkg = '#d3dcf9'
elif ver == '.1' or ver[:4] == '.1_WP': bkg = '#f0ffe0'
elif ver == '.2' or ver[:4] == '.2_WP': bkg = '#f0fff0'
else: bkg = '#f5f5f5'
return bkg
# Plot graphs for 'shortmode' and return 'sortout' list. Shortmode consists in the
# processing of groups nn001.1, nn001.2 (nn is the number of WP), 08001.1, 08001.2,
# 08009.1, 08009.2 and an eventual 7th element in 'bestouts' variable.
# The variable returned is a list with the outfiles displayed, sorted in same order
# that the showed.
# The input variables are exactly the outputs "sublogs" and "groups" from combineout subroutine.
def gengraphshort (sublogs, groups):
# Variables below are to mark the positions in lists
pos8ver1, pos8ver2, posnver1, posnver2 = -1,-1,-1,-1
maxver1, maxver2 = 0,0
# Find index for 16 and 08 groups positions
for i in range(len(groups)):
# Only shows the groups with 8 positions if there are 08001 to 08009 files
if groups[i][0] == 8 and groups[i][1] == 9 and groups[i][2] == '.1':
pos8ver1 = i
if maxver1 < 8:
maxver1 = 8
elif groups[i][0] > maxver1 and groups[i][2] == '.1':
maxver1 = groups[i][0]
posnver1 = i
if groups[i][0] == 8 and groups[i][1] == 9 and groups[i][2] == '.2':
pos8ver2 = i
if maxver2 < 8:
maxver2 = 8
elif groups[i][0] > maxver2 and groups[i][2] == '.2':
maxver2 = groups[i][0]
posnver2 = i
# Set the logfiles in a first time
tlogs = ['']*6
tver = ['.1','.2']*3
if posnver1 != -1:
tlogs[0] = sublogs[posnver1][0]
if posnver2 != -1:
tlogs[1] = sublogs[posnver2][0]
if pos8ver1 not in (-1,posnver1):
tlogs[2], tlogs[4] = sublogs[pos8ver1][0], sublogs[pos8ver1][8]
if pos8ver2 not in (-1,posnver2):
tlogs[3], tlogs[5] = sublogs[pos8ver2][0], sublogs[pos8ver2][8]
# Set the bestout
if bestouts != []:
if len(bestouts) > 1:
print('# WARNING: grafall: more than one value of best .out passed as' + \
' parameter in short mode. Only using the first one...\n')
if bestouts[0][:-4]+'.log' not in tlogs:
tlogs += [bestouts[0][:-4]+'.log']
tver += ['best']
else:
tver[tlogs.index(bestouts[0][:-4]+'.log')] = 'best'
# Set the logfiles once, erasing the void components
if (posnver1 == -1 and pos8ver1 == -1) or (posnver2 == -1 and pos8ver2 == -1):
logs = [tlogs[i] for i in range(len(tlogs)) if tlogs[i] != '']
ver = [tver[i] for i in range(len(tlogs)) if tlogs[i] != '']
mode='lin'
else:
mode='col'
logs = []
ver = []
if posnver1 != -1 or posnver2 != -1:
logs += tlogs[:2]
ver += tver[:2]
if pos8ver1 != -1 or pos8ver2 != -1:
logs += tlogs[2:6]
ver += tver[2:6]
if len(tlogs) == 7:
logs += tlogs[6:]
ver += tver[6:]
# Run gengraphl4!
gengraphl4(logs,ver,1,align=mode)
# Return the 'sortlog' file (sorted outfiles with extension '.log')
return [''] + [log[:-4]+'.out' for log in logs if log != '']
# Generate graphs for the cases nlog <= 8
# align: align by lines or columns? 1234//5678 ('lin') or 1357//2468 ('col')?
def gengraphl4(logs, ver, count, align='lin'):
if align=='lin':
if len(logs) < 4:
nlin, ncol = 1, len(logs)
else:
nlin, ncol = 2, 4
elif align=='col':
if len(logs) == 1:
nlin, ncol = 1, 1
else:
nlin, ncol = 2, int(len(logs)/2)
if len(logs)%2 != 0:
ncol += 1
else:
eprint('# ERROR: align mode {0} is not valid in grafall! Graphs not displayed!'.format(align))
return
if ncol > 4:
eprint('# ERROR: {0} figure(s) was(were) not displayed by grafall'.format(len(logs)-8))
return
if ncol == 1: linit=0.15
elif ncol == 2: linit=0.08
else: linit=0.05
if nlin == 1: binit=0.12; tinit=0.88
elif nlin == 2: binit=0.19; tinit=0.94
fig = _plt.figure(figsize=(4*ncol,3.4*nlin))
# Estabelece os grids e cria todos os eixos
grids = [ _plt.GridSpec(2*nlin, ncol, hspace=0, wspace=0.35, \
top=tinit, bottom=binit, left=linit, right=0.95) ]
if nlin == 2:
grids += [ _plt.GridSpec(2*nlin, ncol, hspace=0, wspace=0.35, \
top=0.81, bottom=0.06, left=linit, right=0.95) ]
ax = []
if align == 'lin':
for j in range(ncol):
if logs[j] != '':
ax += [ fig.add_subplot(grids[0][0,j]),\
fig.add_subplot(grids[0][1,j]) ]
else:
ax += [None,None]
for j in range(len(logs)-ncol):
if logs[j+ncol] != '':
ax += [ fig.add_subplot(grids[1][2,j]),\
fig.add_subplot(grids[1][3,j]) ]
else:
ax += [None,None]
elif align == 'col':
for j in range(ncol):
if logs[2*j] != '':
ax += [ fig.add_subplot(grids[0][0,j]),\
fig.add_subplot(grids[0][1,j]) ]
else:
ax += [None,None]
if 2*j+1 < len(logs) and logs[2*j+1] != '':
ax += [ fig.add_subplot(grids[1][2,j]),\
fig.add_subplot(grids[1][3,j]) ]
else:
ax += [None,None]
k=0
for j in range(len(logs)):
# Case of even logs, breaks after the last one
# print logs[j]
if len(logs) <= j:
break
elif logs[j] != '':
grafpol(logs[j], nstar, fig, ax[2*j], ax[2*j+1])
ax[2*j].text(0.85, 0.85, '#{0:<2d}'.format(count+k), \
horizontalalignment='left', verticalalignment='center', style='italic', \
transform=ax[2*j].transAxes, fontsize=20, color='red')
ax[2*j].set_facecolor(gencolour(ver[j]))
ax[2*j+1].set_facecolor(gencolour(ver[j]))
k += 1
_plt.show(block=False)
_plt.pause(0.1)
# Generate graphs for the cases nlog > 8
def gengraphm4(logs, ver, count):
nwin = len(logs)/12 + 1
for i in range(nwin):
# set nlin/ncol values
nlog = len(logs)-i*12
ncol = 4
if nlog <= 4: nlin=1; ncol=nlog
elif nlog <= 8: nlin=2
elif nlog <= 12: nlin=3
else: nlin=3; nlog=12
# set left/right parametes
if nlog == 1: linit=0.15
elif nlog == 2: linit=0.08
else: linit=0.05
# set top/bottom parameters
if nlog <= 4: delt=0.10; tinit=0.86; binit=0.00
elif nlog <= 8: delt=0.13; tinit=0.90; binit=0.00
else: delt=0.10; tinit=0.95; binit=0.05
fig = _plt.figure(figsize=(4*ncol,3*nlin))
# Creates the axes of first row
grids = [ _plt.GridSpec(2*nlin, ncol, hspace=0, wspace=0.35, \
top=tinit, bottom=binit+2*delt, left=linit, right=0.95) ]
ax = []
for j in range(0,ncol):
ax += [ fig.add_subplot(grids[0][0,j]),\
fig.add_subplot(grids[0][1,j]) ]
# Creates the axes of second row
if nlin > 1:
grids += [ _plt.GridSpec(2*nlin, ncol, hspace=0, wspace=0.35, \
top=tinit-delt, bottom=binit+delt, left=linit, right=0.95) ]
for j in range(0,ncol):
if j+ncol >= nlog:
break
ax += [ fig.add_subplot(grids[1][2,j]),\
fig.add_subplot(grids[1][3,j]) ]
# Creates the axes of third row
if nlin > 2:
grids += [ _plt.GridSpec(2*nlin, ncol, hspace=0, wspace=0.35, \
top=tinit-2*delt, bottom=binit, left=linit, right=0.95) ]
for j in range(0,ncol):
if j+2*ncol >= nlog:
break
ax += [ fig.add_subplot(grids[2][4,j]),\
fig.add_subplot(grids[2][5,j]) ]
# Generates the plots on the axes
for j in range(0,nlog):
grafpol(logs[j+12*i], nstar, fig, ax[2*j], ax[2*j+1])
ax[2*j].set_facecolor(gencolour(ver[j]))
ax[2*j+1].set_facecolor(gencolour(ver[j]))
ax[2*j].text(0.85, 0.85, '#{0:<2d}'.format(count+j+i*12), \
horizontalalignment='left', verticalalignment='center', \
style='italic', transform=ax[2*j].transAxes, fontsize=20, color='red')
_plt.show(block=False)
_plt.pause(0.1)
logs = _glob('{0}/*_{1}_*.log'.format(objdir, filt))
if logs == []:
eprint('# ERROR: log files not found to plot. May the file names \
{0}/*_{1}_*.log are wrong!'.format(objdir, filt))
return 1
gps, sublogs, ver = combineout(logs, n)
# 1) Case short mode
if shortmode:
sortout = gengraphshort(sublogs, gps)
# 2) Case long mode
else:
nlog = sum([len(subb) for subb in sublogs])
# If a few logfiles, tries to use only one window
if nlog <= 8:
test=True
# Test if all groups have two reduction versions
if len(gps)%2 == 0:
for i in range(0,len(gps),2):
# [:2] and not [:1] because gps is a list of lists
if gps[i][:2] != gps[i+1][:2]:
test=False
break
else:
test=False
if test:
tver = []
tlogs = []
for i in range(len(sublogs)):
tlogs += sublogs[i]
tver += ver[i]
gengraphl4(tlogs, tver, 1)
# Otherwise, loop on lamina groups
else:
i = 0
count = 1 # variable to print the graph number
while i < len(gps):
nout = 0
iver = []
ilogs = []
# print i
for j in range(i,len(gps)):
if nout <= 8 and gps[j][:2] == gps[i][:2]:
nout += gps[j][1]
iver += ver[j]
ilogs += sublogs[j]
else:
break
# if isn't last gps element, nout<=8 and number of versions is 2, 4, 6, etc
if j != len(gps)-1 and nout <= 8 and (j-i)%2 == 0:
gengraphl4(ilogs,iver,count)
count += nout
i = j
# print('entrou 1')
else:
gengraphm4(sublogs[i],ver[i],count)
count += len(sublogs[i])
# print('entrou 2')
i += 1
sortout = ['']
for ilogs in sublogs:
sortout += [log[:-4]+'.out' for log in ilogs]
# returns the sorted logs/outs, with '.log' changed to '.out'
return sortout
#################################################
#################################################
#################################################
def grafpol(filename, nstar=1, fig=None, ax1=None, ax2=None, save=False, extens='png'):
"""
Program to plot the best adjust and its residuals of IRAF reduction.
'filename' is the path to the .log output file from reduction.
nstar is the star number inside out/log files to be plotted.
NEW: Working for *_WP1110....log files!
NEW (2): Working for logfiles with more than a single star!
Two working modes:
1) If only filename is given: displays the graph or, case save=True,
save it into the logfile directory.
'extens' parameter changes the extension.
2) If filename, one figure and two axes are given: changes these axes,
adding plots in them. Doesn't display, either save at the end,
only adds a plot to the axes. Usefull for subplots in same figure.
Authors: Moser and Bednarski
Current version: May, 2015
"""
def readlog(filename):
try:
# CAUTION! BLANK LINES WILL BE SKIPPED!
file0 = _np.loadtxt(filename, dtype=str, delimiter='\n', comments=None)
except:
eprint('# ERROR: File {0} not found!\n'.format(filename))
exit(1)
[lixo,lixo,lixo,lixo,lixo,lixo,lixo,lixo,MJD,lixo] = \
readoutMJD(filename.replace('.log','.out'), nstar=nstar)
MJD = float(MJD)
# npts: number of WP valid to be plotted.
# totpts: used for the total number of WP (for the *.2_WP11110...1.out)
nstars = int(file0[6].split()[-1])
npts = int(file0[9].split()[-1]) # Blank lines were SKIPPED!
tnpts = int(file0[8].split()[-1])
delta = float(file0[14].split()[-1])
sigma = 1.
isinstar=False
if nstars < nstar:
eprint('# ERROR: File {0} has {1} stars (you have selected star #{2}).'.\
format(filename, nstars, nstar))
exit(1)
# Bednarski: corrected (25 -> 19, because the blank lines had been ignorated by
# np.loadtext function)
for i in range(19, len(file0)):
if 'STAR # {0:d}'.format(nstar) in file0[i]:
isinstar=True
elif 'STAR # {0:d}'.format(nstar+1) in file0[i]:
break
if isinstar and 'APERTURE' in file0[i]:
sig = float(file0[i+2].split()[2])
if sig < sigma:
sigma = sig
fator = thtFactor(MJD)
thet = fator * float(file0[i+2].split()[4])
while thet >= 180:
thet -= 180
while thet < 0:
thet += 180
'''
# Bed: Os Q e U sao os abaixo, conforme copiei da rotina graf.cl
if float(file0[i+2].split()[4]) < 0:
thet = - float(file0[i+2].split()[4])
else:
thet = 180. - float(file0[i+2].split()[4])
'''
# Recalculating the new QU parameters
Q = float(file0[i+2].split()[3])*_np.cos(2.*thet*_np.pi/180.)
U = float(file0[i+2].split()[3])*_np.sin(2.*thet*_np.pi/180.)
# print Q, U, thet, float(file0[i+2].split()[3])
n = int(npts/4)
if npts%4 != 0:
n = n+1
P_pts = []
for j in range(n):
P_pts += file0[i+4+j].split()
# I think the P values are in reverse order inside Pereyra's .log files:
# Uncomment the two lines below if you want to show the ascending x-axes
# (and not descending x-axes) and comment the next two lines.
# P_pts = _np.array(P_pts, dtype=float)[::-1]
# th_pts = fator*(22.5*_np.arange(1,tnpts+1)+delta/2.)
P_pts = _np.array(P_pts, dtype=float)
th_pts = -fator*(22.5*_np.arange(tnpts)-delta/2.)
j = filename.find('.')
delta2 = int(filename[-2+j:j])-1
# Bed: Funcionando para nlam >= 10 para impressão correta
# str_pts = map(str, _np.arange(1,tnpts+1)+delta2)[::-1]
str_pts = list(map(str, _np.arange(1,tnpts+1)+delta2))
# Case _WP11110...1.log file
if npts != tnpts:
refs = file0[9].split()[3:-2]
rm = [j for j in range(tnpts) if refs[j] == '0']
th_pts = _np.delete(th_pts, rm)
str_pts = _np.delete(str_pts, rm)
if sigma == 1.:
print('# ERROR reading the file %s !' % filename)
Q = U = 0
P_pts = th_pts = _np.arange(1)
str_pts = ['0','0']
return(Q, U, sigma, P_pts, th_pts, str_pts, nstars, fator)
def plotlog(ax1, ax2, Q,U,sigma,P_pts,th_pts,str_pts,filename,fator):
# extract the group number
WPpos = filename.find('_WP')
if WPpos == -1:
suff = filename[filename.rfind('_')+1:]
else:
suff = filename[filename.rfind('_', 0, WPpos)+1:]
ax1.set_title(r'Q={0:.3f}, U={1:.3f}, $\sigma$={2:.3f}'.format(Q*100,U*100,sigma*100),
fontsize=14, verticalalignment='bottom')
ax1.text(0.98, 0.01, '{0}'.format(suff), horizontalalignment='right', \
verticalalignment='bottom', transform=ax1.transAxes, fontsize=9)
ax1.set_ylabel('p (%)', size=9)
ysigma = _np.zeros(len(th_pts))+sigma
ax1.errorbar(th_pts,P_pts*100,yerr=ysigma*100)
th_det = _np.linspace(th_pts[0]*.98,th_pts[-1]*1.02,100)
P_det = Q*_np.cos(4*th_det*_np.pi/180)+U*_np.sin(4*th_det*_np.pi/180)
ax1.plot(th_det, P_det*100)
ax1.plot([th_det[0],th_det[-1]], [0,0], 'k--')
ax1.set_xlim([th_pts[0]+fator*4,th_pts[-1]*1.02-fator*1.5])
# ax1.set_xlim([th_pts[0]-4,th_pts[-1]*1.02+3][::-1])
# ax1.set_ylim([min(P_pts*100)*1.1, max(P_pts*100)*1.1])
ax2.set_xlabel('WP position', size=9)
ax2.set_ylabel('Residuals', size=9)
ax2.set_xlim([th_pts[0]+fator*4,th_pts[-1]*1.02-fator*1.5])
# ax2.set_xlim([th_pts[0]-4,th_pts[-1]*1.02+3][::-1])
ax2.plot([th_det[0],th_det[-1]], [0,0], 'k--')
_plt.setp(ax1.get_xticklabels(), visible=False)
ax1.set_yticks(ax1.get_yticks()[1:])
transOffset = _offset_copy(ax2.transData, fig=fig, x=0.00, y=0.10, units='inches')
P_fit = Q*_np.cos(4*th_pts*_np.pi/180)+U*_np.sin(4*th_pts*_np.pi/180)
# Bed: Agora plota os residuos relativos (residuos divididos por sigma)
ax2.errorbar(th_pts, (P_pts-P_fit)/sigma, yerr=1.)
for i in range(len(th_pts)):
ax2.text(th_pts[i], (P_pts-P_fit)[i]/sigma, str_pts[i], transform=transOffset)
if int(ax2.get_yticks()[0])-int(ax2.get_yticks()[-1]) > 5:
passo = 1
else:
passo = 2
ax2.set_yticks(range(int(ax2.get_yticks()[0]),int(ax2.get_yticks()[-1]+1), passo))
ax1.set_xticklabels(ax1.get_xticks(), size=7)
ax1.set_yticklabels(_np.round(ax1.get_yticks(),5), size=7)
ax2.set_xticklabels([int(ax2.get_xticks()[i]) for i in range(len(ax2.get_xticks()))], size=7)
ax2.set_yticklabels(ax2.get_yticks(), size=7)
return
if fig == None or ax1 == None or ax2 == None:
_plt.close('all')
fig = _plt.figure(1)
ax1 = _plt.subplot(2, 1, 1)
ax2 = _plt.subplot(2, 1, 2, sharex=ax1)
_plt.subplots_adjust(hspace = 0)
Q, U, sigma, P_pts, th_pts, str_pts, nstars, fator = readlog(filename)
plotlog(ax1,ax2, Q,U,sigma,P_pts,th_pts,str_pts,filename,fator)
if save:
if nstars == 1:
_plt.savefig(filename.replace('.log','.'+extens))
else:
_plt.savefig(filename.replace('.log','_star{0}.{1}'.format(nstar,extens)))
else:
_plt.show()
else:
Q, U, sigma, P_pts, th_pts, str_pts, nstars, fator = readlog(filename)
plotlog(ax1,ax2, Q,U,sigma,P_pts,th_pts,str_pts,filename,fator)
return
#################################################
#################################################
#################################################
def verStdPol(std, filt, p, sig):
"""
Calculate z test for standard 'std', filter 'filt', comparing
the observed polarization 'p' +- 'sig' and the published value.
Return z = abs(ppub-p)/sqrt(sigpub^2+sig^2) or -1 if there is
no such object or filter.
"""
lstds = _np.loadtxt('{0}/refs/pol_padroes.txt'.format(_hdtpath()), dtype=str, usecols=range(0,22))
# Get P_pub value
i = stdchk(std)[1]
if i == -1:
return -1
else:
j = filters.index(filt)+11 # +11 devido as colunas a serem puladas pra
# chegar a coluna das polarizacoes
ppub = float(lstds[i,j])
sppub = float(lstds[i,j+5])
if ppub==0. or (sppub==0. and sig==0.):
return -1
# if ztest > 2.5
# loglines += ('# CAUTION! Standard {0}, {1}, has polarization only compatible '+\
# 'within {2:.1f} sigma.\n').format(std, f, ztest)
return abs(ppub-p)/_np.sqrt(sppub**2+sig**2)
#################################################
#################################################
#################################################
def readTests(tests, tags=None, flag=None):
"""
Read boolean list 'tests' concerning to dictests
dictionary and return a string list with tags and
the flag value ('OK','E','W')
'tags' and 'flag' are optional and are concerning to
another tags already assigned and the current flag. The flag
returned is the worst flag found between them
(e.g., if input 'flag' is 'W' and tests results on flag
'OK', return flag 'W'; if tests results in flag 'E',
return 'E'). Also, if they are given as input, return
'tags'+tags concerning to 'tests' list.
"""
tagstr = ''
# Generate a string for such tests tags
for i in dictests.keys():
if tests[i]:
if tagstr != '':
tagstr += ','
tagstr += dictests[i][1]
# Generate a string for such tags
if tags != None:
for i in dictags.keys():
if tags[i]:
if tagstr != '':
tagstr += ','
tagstr += dictags[i][1]
if flag == None:
flag = 'OK'
# Get the worst case for the flag
if 'E' in [dictests[j][2] for j in range(len(tests)) if tests[j]]+[flag]:
flag2 = 'E'
elif 'W' in [dictests[j][2] for j in range(len(tests)) if tests[j]]+[flag]:
flag2 = 'W'
else:
flag2 = 'OK'
if tagstr == '':
tagstr = '---'
return tagstr, flag2
#################################################
#################################################
#################################################
# Bednarski: I added delta variable to (calc-calcst) tolerance
def chkStdLog(f, calc, path=None, delta=3.5, verbose=True):
"""
Verify if there are standards for filter `f` and
calcite `calc` inside path/std.dat. Return True if
successful, unless the standard has been reduced
and marked with `E` flag.
delta is the allowed variation for the angles between the two
beams for one same calcite.
"""
loglines = ''
if path == None or path == '.':
path = _os.getcwd()
# Read `obj.dat` and `std.dat`. If there are errors, assigns [''] to get inside
# ifs below and print error messages
try:
std = _np.loadtxt('{0}/std.dat'.format(path), dtype=str)
except:
std = _np.array([], dtype=str)
# Verify if std.dat has more than one line. Caso no, do reshape (transform
# list type [] in [[]] for further compatibility)
if _np.size(std) != 0:
# If std is of type [] -- one line with 9 elements:
if type(std[0]) != _np.ndarray and _np.size(std) == 9:
std = std.reshape(-1,9)
elif (type(std[0]) == _np.ndarray and _np.size(std[0]) != 9) \
or (type(std[0]) != _np.ndarray and _np.size(std) != 8):
# Save loglines only if this function was called by genLog
if _getouterframes(_currentframe(), 2)[1][3] == 'genLog':
writeLog(path, '# ERROR: polt.chkStdLog() not runned! Incompatible number '+ \
'of columns in `std.dat`.\n')
else:
eprint('# ERROR: Incompatible number of columns in `std.dat`.\n')
foundstd = False
for stdi in std:
# Skip if stdi is not to use ('Error' flag)
if stdi[7] == 'E':
continue
fst = stdi[3]
calcst = float(stdi[4])
if f == fst and abs(calc-calcst) < delta:
foundstd = True
break
if not foundstd and verbose:
print(('# WARNING: Standard star not found for filt. {0} and '+\
'calc. {1:.1f}\n').format(f, calc))
return foundstd
#################################################
#################################################
#################################################
def writeLog(path, strin):
"""
Append 'strin' string into 'path'/polt.log file
"""
f0 = open('{0}/polt.log'.format(path), 'a')
f0.writelines(strin)
f0.close()
return
#################################################
#################################################
#################################################
def genLog(path, subdirs, tgts, fileout, sigtol=lambda sigm: 1.4*sigm, \
autochoose=False, delta=3.5):
"""
Generate the .dat file with data of objects 'tgts[:]' inside
'path'/'subdirs[:]' directories
Save the results in 'path'/'fileout'
Usable to generate target and standard lists (obj.dat and std.dat)
delta: tolerance for the angle between the two beams of calcite.
If abs(angle1 - angle2) < delta, both observations 1 and 2
are assigned as the same calcite.
sigtol: tolerance to use the outfiles with all WP instead the
out with best error. Must be a 'function' that receives
a pol sigma value (in decimal system and NOT in per cent,
i.e., value from 0. to 1., where 1 is a 100% polarized
source) and return the maximum sigma for which to ignore
the best out. Its format must be a 'python's lambda function'!
The default values is sigtol=lambda sigm: 1.4*sigm, while
the old value was sigtol=lambda sigm: 1.1*sigm + 0.00005. If
you want to take just the groups with all WP, and none other,
you can specify sigtol=lambda sigm: 1000.*sigm, for example.
autochoose: choose best outfiles automatically, without
interaction?
"""
if fileout.split('.')[0] == 'std':
typ = 'standards'
elif fileout.split('.')[0] == 'obj':
typ = 'targets'
else:
typ = fileout
# print(subdirs)
# print(tgts)
# if mode not in ('std','obj'):
# print('\n# ERROR: mode \'{0}\' not valid (it\'s only valid \'std\' and \'obj\')'.format(mode))
# return 1
if len(tgts) != len(subdirs):
eprint('\n# ERROR: polt.genLog() NOT RUNNED for {0} (len(tgts) != len(subdirs))'.format(typ))
writeLog(path, '# ERROR: polt.genLog() NOT RUNNED for {0}! (len(tgts) != len(subdirs))\n'.format(typ))
return 1
continuerun = False
# Checking if there exists a previous run and if it has generated unless one line.
if _os.path.exists('{0}/{1}.tmp'.format(path,fileout)) and len(_np.loadtxt('{0}/{1}.tmp'.format(path,fileout), dtype=str)) != 0:
opt = ''
while opt not in ('y','Y','n','N'):
opt = input(('There exists one file concerning to a uncompleted previous run for {0}. ' +\
'Do you want to continue where it was stopped? (y/n): ').format(typ))
if opt in ('y','Y'):
continuerun = True
if not continuerun:
f0 = open('{0}/{1}.tmp'.format(path,fileout), 'w')
f0.writelines('{:12s} {:>7s} {:>10s} {:4s} {:>5s} {:<s} {:>4s} {:>5s} {:<s}\n'.format('#MJD','ccd',\
'target','filt','calc',':::outfile:::','star','flag','tags'))
f0.close()
# Case continuing a previous run, identify the stars already runned
else:
ftemp = _np.loadtxt('{0}/{1}.tmp'.format(path,fileout), dtype=str)
odone=[] # odone and fdone is lists that contains subdirectories, star number and the
fdone=[] # filters already done by the previous run
# If there is just one line, transform np array type [] for [[]]
if len(ftemp) > 0 and len(ftemp[-1]) != 9:
ftemp = ftemp.reshape(-1,9)
for line in ftemp:
# [3:] because the firsts characters are ':::'
objct = [line[5].split('/')[0][3:], line[6]]
if objct in odone:
indx = odone.index(objct)
fdone[indx] += [line[3]]
else:
odone += [objct]
fdone += [[line[3]]]
# print odone
# print fdone
# Loop on list of objects
for i in range(len(tgts)):
obj = tgts[i]
objdir = subdirs[i]
if obj == '':
continue
# Loop on filters
for f in filters:
nstars = countStars('{0}/{1}'.format(path,objdir), f)
# Check if there exist fits files for object/filter, but not .out files (target not reduced)
if nstars == 0 and (len(_glob('{0}/{1}/*_{2}_*.fits'.format(path,objdir,f))) > 0 \
or len(_glob('{0}/{1}/{2}/p??0'.format(path,objdir,f))) > 0):
eprint(('\n# ERROR: {0}_{1}: Fits files found, but the object was not reduced! ' +\
'Reduce and run again...\n\n - HINT: if these fits files compose some ' +\
'non-valid serie but need be kept in, move them for a subdir {2}/tmp, ' +\
'and hence, the path will not be sweept by routine.\n').format(objdir,f,objdir))
exit(1)
# Check if there exist some .out file for such object/filter, but not the fits files
elif nstars != 0 and (len(_glob('{0}/{1}/*_{2}_*.fits'.format(path,objdir,f))) == 0 \
and len(_glob('{0}/{1}/{2}/p??0'.format(path,objdir,f))) == 0):
eprint(('\n# ERROR: {0}_{1}: Fits files not found, but were found *_{2}_* files. ' +\
'It can be by three reasons:\n'+\
' 1) Fits files missing (in this case, search by them and add in such directory);\n' +\
' 2) The found *_{3}_* files can be \'spurious files\' (in this case, delete them);\n' +\
' 3) The preffix of *_{4}_*.fits files can be different of the rest of *_{5}_* ' +\
'files (in this case, rename them).\n').format(objdir,f,f,f,f,f))
exit(1)
# Working for more than a single star inside .out files
for nstar in range(1,nstars+1):
loglines = ''
# Skip if the object/filter was done already in a previous run
if continuerun and [objdir,str(nstar)] in odone and \
f in fdone[odone.index([objdir,str(nstar)])]:
continue
elif nstars == 1:
obj = tgts[i] # It is needed this line here again
else:
while True:
obj = input(('Type a name for star #{0:d} (of {1:d}) ' +\
'inside {2} dir, filter {3}: '). format(nstar, nstars, objdir, f))
if obj != '' and ' ' not in obj and '#' not in obj and ':' not in obj:
loglines += '# WARNING: {0}_{1}: There are more than one star inside .out files.'\
.format(objdir, f)+' Star #{0:d} was named as {1}.\n'.format(nstar, obj)
break
if autochoose:
outs = chooseout('{0}/{1}'.format(path,objdir), obj, f, nstar=nstar, sigtol=sigtol)
tags=None
flag=None
else:
outs, tags, flag = queryout('{0}/{1}'.format(path,objdir), obj, f, nstar=nstar, sigtol=sigtol)
print('')
# Loop on outfiles
lines = ''
for j in range(len(outs)):
if outs[j] != '':
[Q,U,sig,P,th,sigT,ap,star,MJD,calc] = readoutMJD(outs[j], nstar=nstar)
tests, logs = verout(outs[j], obj, f, nstar=nstar, verbose=False, delta=delta)
loglines += logs
if tags!=None and flag!=None:
tagstr, flagout = readTests(tests, tags=tags[j], flag=flag[j])
else:
tagstr, flagout = readTests(tests, tags=None, flag=None)
# It is needed to open and close in each object
lines += ('{0:12.6f} {1:>7s} {2:>10s} {3:>4s} {4:>5.1f} {5:<s} {6:>4d} ' \
'{7:>5s} {8:<s}\n').format(MJD, ccd, obj, f, float(calc), \
':::'+_os.path.relpath(outs[j], path)+':::', nstar, flagout, tagstr)
# Write lines after process all outfiles for object in one filter
if lines != '':
writeLog(path, loglines)
f0 = open('{0}/{1}.tmp'.format(path,fileout), 'a')
f0.writelines(lines)
f0.close()
# Read fileout+'.tmp', realign columns to fileout and delete fileout+'.tmp'
fin = open('{0}/{1}.tmp'.format(path,fileout), 'r')
lines = [li.split(':::') for li in fin.readlines()]
fin.close()
if len(lines) == 1:
writeLog(path, '# WARNING: No valid {0} were found by polt.genLog().\n'.format(typ))
else:
maxsize = max([len(li[1]) for li in lines])
linesout = []
for i in range(len(lines)):
if lines[i] in ('','\n'):
continue
linesout += [lines[i][0]+lines[i][1].rjust(maxsize+2)+lines[i][2]]
fout = open('{0}/{1}'.format(path,fileout), 'w')
fout.writelines(linesout)
fout.close()
try:
_os.unlink('{0}/{1}.tmp'.format(path,fileout))
except:
pass
return 0
#################################################
#################################################
#################################################
def genAllLog(path=None, sigtol=lambda sigm: 1.4*sigm, autochoose=False, delta=3.5):
"""
Generate the std.dat/obj.dat for one reduced night
path: path of night
delta: tolerance for the angle between the two beams of calcite.
If abs(angle1 - angle2) < delta, both observations 1 and 2
are assigned as the same calcite.
sigtol: tolerance to use the outfiles with all WP instead the
out with best error. Must be a 'function' that receives
a pol sigma value (in decimal system and NOT in per cent,
i.e., value from 0. to 1., where 1 is a 100% polarized
source) and return the maximum sigma for which to ignore
the best out. Its format must be a 'python's lambda function'!
The default values is sigtol=lambda sigm: 1.4*sigm, while
the old value was sigtol=lambda sigm: 1.1*sigm + 0.00005. If
you want to take just the groups with all WP, and none other,
you can specify sigtol=lambda sigm: 1000.*sigm, for example.
autochoose: choose best outfiles automatically, without
interaction?
The routine will vanishes 'path' directory, except the subdirectories called
"calib", "dark", "flat", "bias" and "tmp". You can use "tmp" subdirectore to
place all dummy subdirectories into.
Case there are standard stars to be used from another night, please, create
a plain text file called std.link within the night directory:
- Its content must have one or two lines with an average angle for
the missing calcite and the night of the same mission whose standard
is to be used.
- If there are no standard star in NONE night of the mission, use the
`s` token (which means `skip`) instead of the night indicator.
- An example of std.link content (inside 12set09/std.link, see it for
more details):
140.0 12set08
172.0 s
"""
if path == None or path == '.':
path = _os.getcwd()
# Verifies if std.dat and obj.dat files exist. Case True, queries to delete
# Doesn't delete polt.log because the aiming is keep every information about the previous run
if _os.path.exists('{0}/std.dat'.format(path)):
if _os.path.exists('{0}/obj.dat'.format(path)):
while True:
verif = input('Caution: obj.dat and std.dat already exists. Are you sure to overwrite it/them (y/n): ')
if verif in ('n','N'):
print('Aborted!')
return
elif verif in ('y','Y'):
break
for arq in (path+'/obj.dat', path+'/std.dat'): #, path+'/polt.log'):
try:
_os.unlink(arq)
except:
pass
elif not _os.path.exists('{0}/obj.dat'.format(path)) and _os.path.exists('{0}/obj.dat.tmp'.format(path)):
print('# WARNING: keeping file std.dat and processing only obj.dat...\n')
# Generates lists
try:
ltgts = _np.loadtxt('{0}/refs/pol_alvos.txt'.format(_hdtpath()), dtype=str)
if _os.path.exists('{0}/refs/pol_hip.txt'.format(_hdtpath())):
try:
ltgts = _np.concatenate((ltgts,_np.loadtxt('{0}/refs/pol_hip.txt'.\
format(_hdtpath()), dtype=str)))
except:
pass
if _os.path.exists('{0}/refs/pol_unpol.txt'.format(_hdtpath())):
try:
ltgts = _np.concatenate((ltgts,_np.loadtxt('{0}/refs/pol_unpol.txt'.\
format(_hdtpath()), dtype=str)))
except:
pass
lstds = _np.loadtxt('{0}/refs/pol_padroes.txt'.format(_hdtpath()), \
dtype=str, usecols=[0])
except:
eprint('# ERROR: Can\'t read files pyhdust/refs/pol_alvos.txt and/or pyhdust/refs/pol_padroes.txt.\n')
exit(1)
subdirs = [fld for fld in _os.listdir('{0}'.format(path)) if \
_os.path.isdir(_os.path.join('{0}'.format(path), fld))]
tgts=[] # variable for real target names, not the subdirectory names
stds=[] # variable for real standard names, not the subdirectory names
lines = ''
# set ccd name from first .fits file
try:
setCCD(_glob('{0}/{1}/*.fits'.format(path, next(k for j,k in enumerate(subdirs)\
if k!='')))[0])
except:
setCCD('') # set manually inside the function
# Verifies if object is a standard star or target
# (Works on directories with suffix also (like 'dsco_a0'))
for obj in [elem.split('_')[0] for elem in subdirs]:
obj_curr = obj
while obj not in _np.hstack((ltgts,lstds,_np.array(['calib','flat','dark','bias','tmp']))):
if obj_curr == obj:
print('\nObject {0} is not a known target or standard!!'.format(obj_curr))
else:
print('\nObject {0} (and {1}) is not a known target or standard!!'.format(obj_curr, obj))
obj = input('Type the common name (you can add a new target inside pyhdust/ref/pol_*.txt files, but be careful!): ')
if obj in lstds:
tgts.append('')
# Only assigns standard's name if there is no std.dat file. Otherwise,
# it's because the actual run will process only the targets, not standards.
if not _os.path.exists('{0}/std.dat'.format(path)):
stds.append(obj)
elif obj in ltgts:
tgts.append(obj)
stds.append('')
elif obj in ('calib','flat','dark','bias','tmp'):
tgts.append('')
stds.append('')
print('')
writeLog(path, '#### BEGIN\n')
if not _os.path.exists('{0}/std.dat'.format(path)):
genLog(path, subdirs, stds, fileout='std.dat', delta=delta, sigtol=sigtol, autochoose=autochoose)
genLog(path, subdirs, tgts, fileout='obj.dat', delta=delta, sigtol=sigtol, autochoose=autochoose)
# Write user name and date+time
username = _pwd.getpwuid(_os.getuid())[4]
if username.find != -1:
username = username[:username.find(',')]
loglines = _time.strftime("\nGenerated at: %Y/%m/%d - %I:%M %p\n")
loglines += ' by: ' + _pwd.getpwuid(_os.getuid())[0] + ' (' + username + ')\n\n'
writeLog(path, loglines)
with open('{0}/polt.log'.format(path), 'r') as fl:
print('\n{0}\nPOLTOOLS LOG (content of {1}/polt.log)\n\n{2}'.format('='*40, path, fl.read()))
return
#################################################
#################################################
#################################################
def corObjStd(night, f, calc, path=None, delta=3.5, verbose=True):
"""
Find the correction factor delta theta for filter 'f'
inside night 'night', for calcite 'calc' (the last variable
must be the angle between the ord. and extraord. beams).
Returns the values for matching standard stars inside
'night'/std.dat file, except when marked with an 'E' flag.
NEW: In case of missing data in filter 'f', this routine
tries to compute delta theta by using the data from
another filter, making use of the relations among the
values of delta theta for each filter). But only the
estimate presenting the smaller error will be taken.
verbose: auxiliary variable used as False inside
polt.genTarget routine. Keep it as True
otherwise.
Formulas:
* dth = fact*th^std_measured - th^std_published
* fact = -1 for observations before 2015 March 1st
+1 otherwise
delta: tolerance, in degree, of angle of two beams for
one same calcite (default: +/-3.5 degree)
Output, in order: stdnames, mdth, smdth, flag, tags
- stdnames: string with standard star names separated
by commas (,)
- mdth: mean delta theta (corrected by the output factor
+-1 of routine polt.thtFactor())
- smdth: the error of the mean delta theta
- flag: flag concerning to the standards.
- tags: string with the tags concerning to the standards,
separated by commas (,), or '---' if none.
"""
########################
def computeDth():
"""
Main routine
"""
try:
dthref = _np.loadtxt('{0}/refs/dths.txt'.format(_hdtpath()), dtype=str)
except:
eprint('# ERROR: Can\'t read files pyhdust/refs/dths.txt')
exit(1)
# Try to use the standard star observation at filter f
stdnames, dth, sdth, flag, tags = readFilter(f)
# print('STD REPORT: filter {0} runned...\t stdnames={1}\t dth={2}\t sdth={3}\t flag={4}\t tags={5}'.format(f, stdnames, dth, sdth, flag, tags))
# Case successful on find
if stdnames != '---':
mdth = sum(dth)/len(dth) # Mean dth
devth = _np.std(dth)/_np.sqrt(len(dth)) # Std dev of the mean
smdth = _np.sqrt(devth**2 + _np.dot(sdth,sdth)/(len(sdth)**2)) # Combined final error
# print('STD REPORT: filter {0} has...\t dth={1}\t mdth=mean(dth)={2}\t smdth={3}'.format(f, dth, mdth, smdth))
# Otherwise
else:
# print('STD REPORT: filter {0} has not standard... trying compute dth from another filter...'.format(f))
# print('{0:<10s} Trying compute delta_theta from another filter...'.format(f))
mdth = 0.
smdth = 10000.
# To identify the calcite I propose the if below, because is just like the beams
# seem to behave within the CCD.
# if calc == 0:
# calcite = ''
# print('{0:<12s} WARNING! Calcite name not identified for an angle of 0 degrees.'.format(night+', '+f+':'))
# while calcite not in ('a0','a2'):
# calcite = input(' Type the calcite name (a0/a2): ')
if (calc < 12 and calc >= 0) or (calc > 78 and calc < 102) or (calc > 168 and calc < 180):
calcite = 'a2'
elif calc >= 0 and calc < 180:
calcite = 'a0'
# Useful because sometimes I set the calc value manually inside std.dat/obj.dat for special
# cases, putting a negative value.
else:
calcite = ''
print('{0:<12s} WARNING! Calcite name not identified for an angle of {1} degrees.'.format(night+', '+f+':', calc))
while calcite not in ('a0','a2'):
calcite = input(' Type the calcite name (a0/a2): ')
for filt in filters:
if filt == f[0]:
continue
ddth = 0.
sddth = 0.
stdnamesaux, dth, sdth, flagaux, tagsaux = readFilter(filt)
# print('STD REPORT: filter {0} runned to correction of filter {1}...\t stdnames={2}\t dth={3}\t sdth={4}\t flag={5}\t tags={6}'.format(filt, f, stdnamesaux, dth, sdth, flagaux, tagsaux))
if stdnamesaux == '---':
# print('STD REPORT: filter {0} (runned to correction of filter {1}) has not standard... trying compute dth from another filter...'.format(filt, f))
continue
# Try to find the 'delta delta theta'
for dthi in dthref:
if dthi[0] == '{0}-{1}'.format(f[0],filt) and dthi[1] == calcite:
ddth = float(dthi[2])
sddth = float(dthi[3])
dth = [di+ddth for di in dth] # Refresh the new dth list
mdthaux = sum(dth)/len(dth) # Mean dth
devthaux = _np.std(dth)/_np.sqrt(len(dth)) # Std dev of the mean
smdthaux = _np.sqrt(devthaux**2 + _np.dot(sdth,sdth)/(len(sdth)**2) + sddth**2) # Combined final error
# print('STD REPORT: filter {0} (runned to correction of filter {1}) has the identified {2}-{3} value for calcite {4} ({5}): ddth={6}\t sddth={7}'.format(filt, f, f, filt, calc, calcite, ddth, sddth))
# Case there exists some observation in filter filt, as well as the study
# of delta that inside sths.txt file, compare if it have a best error
if smdthaux < smdth:
stdnames = stdnamesaux
flag = flagaux
tags = tagsaux
mdth = mdthaux
devth = devthaux
# print('STD REPORT: filter {0} (runned to correction of filter {1}) has a best error in mdth...\t dth={2}\t ddth={3}\t mdth=mean(dth)+ddth={4}\t smdth={5}<{6}. Refreshing...'.format(filt, f, [di-ddth for di in dth], ddth, mdth, smdthaux, smdth))
smdth = smdthaux
# else:
# print('STD REPORT: filter {0} (runned to correction of filter {1}) has NOT a best error in mdth: smdth={2}>={3}. Skipping this filter...'.format(filt, f, smdthaux, smdth))
break
# if ddth == 0 and sddth == 0:
# print('STD REPORT: filter {0} (runned to correction of filter {1}) doesn\'t have {2}-{3} ddth value found for calcite {4} ({5}). Skipping this filter...'.format(filt, f, f, filt, calc, calcite))
if stdnames == '---':
if verbose:
print(('{0:<12s} WARNING! None standard found in `std.dat` for filter {1}.').format(night+', '+f+':', f))
smdth = 0.
else:
if flag == 'OK':
flag = 'W'
if tags == '---':
tags = 'oth-dth'
else:
tags += ',oth-dth'
if verbose:
print(('{0:<12s} WARNING! Delta theta for filter {1} was computed using an delta theta from another filter.').format(night+', '+f+':', f))
else:
print(('{0:<12s} WARNING! Delta theta for filter {1} was computed using an delta theta from another filter.').format('', f))
# print('STD REPORT: filter {0} - final values...\t stdnames={1}\t mdth={2}\t smdth={3}\t flag={4}\t tags={5}\n'.format(f, stdnames, mdth, smdth, flag, tags))
return stdnames, mdth, smdth, flag, tags
########################
########################
def readFilter(filt):
"""
Receive a filter 'filt' and return two lists with the delta theta values
and the errors. Return also a string with the names of standards and
the flag and tags string.
"""
try:
stdref = _np.loadtxt('{0}/refs/pol_padroes.txt'.format(_hdtpath()), dtype=str, usecols=range(0,22))
except:
eprint('# ERROR: Can\'t read files pyhdust/refs/pol_padroes.txt')
exit(1)
dth = []
sdth = []
stdnames = '---'
tags = '---'
flag = 'OK'
if _os.path.exists('{0}/{1}/std.dat'.format(path,night)):
stds = _np.loadtxt('{0}/{1}/std.dat'.format(path,night), dtype=str)
if len(stds) > 0 and len(stds[-1]) != 9:
stds = stds.reshape(-1,9)
for stdinf in stds:
if stdinf[7] == 'E' and stdchk(stdinf[2])[0] and stdinf[3] == filt and \
abs(float(stdinf[4])-calc) <= delta:
if f == filt and verbose:
print(('{0:<12s} WARNING! Standard `{1}` ({2}) wasn\'t used because it ' +\
'had `E` flag. Skipping this standard data...').format(night+', '+f+':',stdinf[2],f))
# else:
# print(('{0:<12s} WARNING! Standard `{1}` ({2}) wasn\'t used because it ' +\
# 'had `E` flag. Skipping this obs serie...').format('',stdinf[2],f))
continue
elif stdinf[7] != 'E' and stdchk(stdinf[2])[0] and stdinf[3] == filt and \
abs(float(stdinf[4])-calc) <= delta:
# Bednarski: Show error message now
try:
nstar = int(stdinf[6])
Q, U, sig, P, th, sigT, tmp, tmp2 = readout('{0}/{1}'.\
format(path+'/'+night,stdinf[5]), nstar=nstar)
except:
if f == filt and verbose:
print(('{0:<12s} WARNING! Standard `{1}` ({2}) wasn\'t used because' +\
' can\'t open/read {3}. Skipping this standard data...').\
format(night+', '+f+':', stdinf[2], filt, stdinf[5]))
# else:
# print(('{0:<12s} WARNING! Standard `{1}` ({2}) wasn\'t used because' +\
# ' can\'t open/read {3}. Skipping this obs serie...').\
# format('', stdinf[2], filt, stdinf[5]))
continue
if stdref[stdchk(stdinf[2])[1],filters.index(filt[0])+1] == '0':
if f == filt and verbose:
print(('{0:<12s} WARNING! Standard `{1}` ({2}) wasn\'t used because' +\
' there is no published value in such filter. Skipping this standard data...').\
format(night+', '+f+':', stdinf[2], filt))
continue
# Refresh string for std names
if stdnames == '---':
stdnames = stdinf[2]
elif stdinf[2] not in stdnames:
stdnames += ','+stdinf[2]
# Receive the published theta and its error
# (Let filt[0] because sometimes filter can be 'v2' for example)
i = stdchk(stdinf[2])[1]
angref = float(stdref[i,filters.index(filt[0])+1])
sangref = float(stdref[i,filters.index(filt[0])+6])
# Calculate dth and its error
dth += [thtFactor(float(stdinf[0]))*float(th)-angref]
while dth[-1] >= 180:
dth[-1] -= 180
while dth[-1] < 0:
dth[-1] += 180
sdth += [_np.sqrt((28.65*float(sig)/float(P))**2 + sangref**2)]
# print('STD: dth({0}) = factor*th_obs - th_pub = {1} * {2:.2f} - {3:.2f} = {4:.3f}'.format(len(dth),thtFactor(float(stdinf[0])),float(th), angref, dth[-1]))
# print('STD: s_th_pub={0:.3f}'.format(sangref))
# Receive the flag
if flag == 'OK' and stdinf[7] == 'W':
flag = 'W'
# Refresh the tag list
for tagi in stdinf[8].split(','):
if tagi not in tags+',---':
if tags == '---':
tags = ''
else:
tags += ','
tags += tagi
# Fixes the cases where dth is close to 0 (for instance, if dth[0]=0.3,
# dth[1] must be -1.0 and not 179.0
for i in range(len(dth)):
if min(dth) < 10 and dth[i] > 170:
dth[i] -= 180
if stdnames == '---':
flag = 'W'
tags = 'no-std'
return stdnames, dth, sdth, flag, tags
########################
if path == None or path == '.':
path = _os.getcwd()
calc = float(calc)
if not _os.path.exists('{0}/{1}/std.dat'.format(path,night)):
# print('{0:<12s} WARNING! `std.dat` file not found.'.format(night+', '+f+':'))
return '---', 0., 0., 'W', 'no-std'
else:
return computeDth()
#################################################
#################################################
#################################################
def genTarget(target, path=None, path2=None, ispol=None, skipdth=False, delta=3.5, epssig=2.0):
""" Gen. target
Generate a table with all observations found for 'target',
unless those which have `E` flag.
The error in delta theta (factor from observation of standard
star) IS NOT propagated to the theta of Be star.
path: path to `red` directory. If None, it is supposed
that the user is already inside `red` directory.
path2: path where to save the data file. If None, it is
supposed as '.' directory.
skipdth: skip the observations without estimates for delta
theta (no standard star in none filter and no
standard star in previous and next nights)? Case
True, the exception is only when the data is
'unpolarized' (defined by 'epssig' variable).
epssig: sigP/P max for unpolarized target (sigP/P up to
epssig doesn't need standard star when skipdth=True)
ispol: the Serkowski parameters [P_max, lambda_max, theta_IS]
to correct IS polarization (P_max ein % and lambda_max in
Angstrom). If ispol==None, don't make the correction
of ISP.
Syntax of out tags: tags1:tags2:tags3, where tags1 is concerning
to the method to calculate delta_theta
correction factor, tags2 is the tag list of
the observation of object and tags3 is the
tags of the standard star that has been used.
If no standard star was found in some night for one filter, this
routine tries to use the standard from another filter to compute
the delta theta in missing filter. Case there are no standard star
in the other filters also, the routine tries to read a file named
as std.link, whose lines content must be [calc night]. It points to
a standard from another night `night` in calcite `calc` (an example
of sintax: [calc night] = [136.1 15out22]). Caso this file was
not found, the routine queries directly of what night to use the
standard stars.
Formulas:
* th_eq = fact*th_measured - fact*th^std_measured + th^std_published
* th_eq = fact*th_measured - dth
* dth = fact*th^std_measured - th^std_published
* fact = -1 for observations before 2015 March 1st
+1 otherwise
* Q_eq = P*cos(2*th_eq*pi/180)
* U_eq = P*sin(2*th_eq*pi/180)
* sigth = 28.65*sig/P
"""
verbose = ''
nlines = 0
if path == None or path == '.':
path = _os.getcwd()
if path2 == None or path2 == '.':
path2 = _os.getcwd()
# print target, path
# Read lists and verify if target is a valid target
try:
obj = _np.loadtxt('{0}/refs/pol_alvos.txt'.format(_hdtpath()), dtype=str)
if _os.path.exists('{0}/refs/pol_hip.txt'.format(_hdtpath)):
obj = _np.concatenate((obj,_np.loadtxt('{0}/refs/pol_hip.txt'.format(_hdtpath()), dtype=str)))
if _os.path.exists('{0}/refs/pol_unpol.txt'.format(_hdtpath)):
obj = _np.concatenate((obj,_np.loadtxt('{0}/refs/pol_unpol.txt'.format(_hdtpath()), dtype=str)))
std = _np.loadtxt('{0}/refs/pol_padroes.txt'.format(_hdtpath()), dtype=str, usecols=range(0,22))
except:
eprint('# ERROR: Can\'t read files pyhdust/refs/pol_alvos.txt and/or pyhdust/refs/pol_padroes.txt.')
exit(1)
if target in std[:,0]:
ftype='std'
else:
ftype='obj'
if target not in _np.hstack((std[:,0],obj)) and 'field' not in target:
print('\nWARNING: Target {0} is not a default target or standard!'.\
format(target))
print('\n'+'='*30+'\n')
nights = [fld for fld in _os.listdir(path) if _os.path.isdir(_os.path.join(path, fld))]
# Copy data from literature, if they exist
if _os.path.exists('{0}/literature/{1}_lit.log'.format(path,target)):
with open('{0}/literature/{1}_lit.log'.format(path,target), 'ro') as myfile:
lines = myfile.read()
lines = lines[lines.find('\n')+1:]
nlineslit = lines.count('\n') + 1
else:
lines = ''
nlineslit = 0
for night in nights:
# Check obj.dat/std.dat for the night
if _os.path.exists('{0}/{1}/{2}.dat'.format(path,night,ftype)):
try:
objs = _np.loadtxt('{0}/{1}/{2}.dat'.format(path,night,ftype), dtype=str)
except:
eprint('{0:<12s} WARNING! Can\'t read {1}.dat file. Ignoring this night...\n'.format(night+':',ftype))
continue
# Verify if std has more than one line. Case not, do the reshape
if _np.size(objs) == 9:
objs = objs.reshape(-1,9)
elif _np.size(objs) % 9 != 0:
eprint('{0:<12s} ERROR! Wrong column type in {1}.dat file. Ignoring this night...\n'.format(night+':',ftype))
exit(1)
valc = True
# Loop on found nights
for objinf in objs:
if objinf[2] == target or ('field' in objinf[2] and target == 'field'):
tags = ['---','---','---']
MJD, ccd, obj, f, calc, out, nstar, flag, tags[1] = objinf
if flag == 'E':
print(('{0:<12s} WARNING! Star found ({1}), but with `E` flag ' +\
'and tags `{2}`. Ignoring this data...').format(night+', '+f+':',f,tags[1]))
continue
try:
# Fator is a var to indicate when polarization angle must be taken as 180-theta or +theta
fator = thtFactor(float(MJD))
Q, U, sig, P, th, sigT, tmp, tmp2 = readout('{0}/{1}'.\
format(path+'/'+night,out), nstar=int(nstar))
except:
eprint('{0:<12s} ERROR! Can\'t open/read out file {1}. Ignoring this data...\n'.format(night+', '+f+':',out))
exit(1)
P = float(P)*100
th = float(th)
sig = float(sig)*100
sigth = 28.65*sig/P
# print objinf, objinf[2], target, tags[1]
# Try to get the night's standard
if ftype == 'obj':
# Print below the warning message and only one time by night
if valc and not _os.path.exists('{0}/{1}/std.dat'.format(path,night)):
print('{0:<12s} WARNING! `std.dat` file not found.'.format(night+':'))
valc = False
stdnames, mdth, smdth, flagstd, tags[2] = corObjStd(night, f, calc, path=path, delta=delta)
else:
stdnames = '---'
mdth, smdth = 0, 0
flagstd, tags[2] = 'OK', '---'
# if flagstd == 'E':
# mdth = 0.
# smdth = 0.
# stdnames = '---'
# flagstd = 'W'
# tags[2] = 'no-std'
# vald=True
# APPLY ALTERNATIVE METHOD TO COMPUTE DTHETA IN CASES WHERE THERE IS NO NIGHT'S STANDARD
while stdnames == '---' and ftype == 'obj':
night_alt=''
# print '{0}/{1}/std.link'.format(path,night)
# print _os.path.exists('{0}/{1}/std.link'.format(path,night))
if _os.path.exists('{0}/{1}/std.link'.format(path,night)):
# print 'entrou'
try:
file0 = _np.loadtxt('{0}/{1}/std.link'.format(path,night), dtype=str)
if type(file0[0]) != _np.ndarray and _np.size(file0) == 2:
file0 = file0.reshape(-1,2)
for line0 in file0:
if abs(float(line0[0])-float(calc)) <= delta:
night_alt = line0[1]
break
except:
eprint(('\n{0:<12s} ERROR! Bad format for the file {0}/std.link. Check and run again.').format(night))
exit(1)
# if temporary for me.
if night_alt == 's' or _os.path.exists('{0}/{1}/skipstd'.format(path,night)):
print(('{0:<12s} WARNING! No standard correction as specified inside std.link.\n').format(night+', '+f+':', night_alt))
break
if not _os.path.exists('{0}/{1}/std.link'.format(path,night)):
eprint(('\n{0:<12s} ERROR! There is no standard star for calcite {1} (filter {2}) and neither\n'+\
'a std.link file pointing to the night whose standard must be used:\n' +\
' 1) Check if {1} value is covered by the +-{3} deg tolerance for the angle\n'+\
' of the calcite beams (values in 5th column of obj.dat/std.dat).\n'+\
' 2) If there is no standard star indeed, create a plain text file\n'+\
' {0}/std.link.\n' +\
' 3) Its content must have one or two lines with an average angle for\n' +\
' the missing calcite and the night of the same mission whose standard\n'+\
' is to be used.\n' +\
' 4) Remember, this average angle needs cover all values of the angle of individual\n'+\
' observations at the such calcite within +-{3} deg.\n' +\
' 5) If there are no standard star in NONE night of the mission, use the\n'+\
' `s` token (which means `skip`) instead of the night indicator.\n\n'+\
' An example of std.link content (inside 12set09/std.link, see it for more details):\n'+\
' 140.0 12set08\n'+\
' 172.0 s'+\
'').format(night, calc, f, delta))
exit(1)
# Case there exists a std.link file, but not a line for the missing calcite, the procedure will
# enter inside elif below
elif night_alt=='':
eprint(('\n{0:<12s} ERROR! There is no standard star for calcite {1} and neither\n'+\
'a line inside std.link file pointing to another night.\n'+\
' 1) Check if {1} value is covered by the +-{2} tolerance for the angle\n'+\
' of the calcite.\n'+\
' 2) Case there is no standard star for such calcite in none night,\n' +\
' use the `s` token to `skip` the equatorial correction, adding a line\n'+\
' in std.link like `{3:.1f} s`.'
'').format(night, calc, delta, float(calc)))
exit(1)
# if night_alt=='':
# night_alt = input('\n{0:<12s} Do you want to select some standard from another day?\n{0:<12s} #Type the date or `s` to skip: '.format('','#'))
# print('')
# if night_alt in ('s','S'):
# break
if _os.path.exists('{0}/{1}'.format(path,night_alt)):
stdnames, mdth, smdth, flagstd, tags[2] = corObjStd(night_alt, f, calc, path=path, delta=delta, verbose=False)
valc = False
if stdnames != '---':
if flagstd == 'OK':
flagstd = 'W'
if tags[2] == '---':
tags[2] = 'oth-day-std'
else:
tags[2] += ',oth-day-std'
print(('{0:<12s} WARNING! Using standard from another night ({1}) as specified inside std.link.\n').format(night+', '+f+':', night_alt))
else:
print(('\n{0:<12s} ERROR! Standard not found inside the alternative night {1} pointed by std.link file (calcite {2})!').format(night, night_alt,calc))
exit(1)
else:
eprint(('\n{0:<12s} ERROR! Missing night named as {1} pointed by std.link file.').format(night, night_alt))
exit(1)
# print stdname, thstd, angref, flagstd, tags[2]
# Set the tags concerning to the standard
if stdnames == '---' and ftype == 'obj':
tags[0] = 'no-std'
elif ftype == 'obj':
if 'oth-day-std' in tags[2]:
tags[0] = 'oth-day-std'
if 'oth-dth' in tags[2] and tags[0] == '---':
tags[0] = 'oth-dth'
elif 'oth-dth' in tags[2]:
tags[0] += ',oth-dth'
# Refresh tags and flags
for specialtag in ('no-std','oth-day-std','oth-dth'):
for i in (1,2):
if tags[i] == specialtag:
tags[i] = '---'
if i == 1:
flag = 'OK'
elif tags[i][0:7] == specialtag+',':
tags[i] = tags[i].replace(specialtag+',','')
else:
tags[i] = tags[i].replace(','+specialtag,'')
# Set the "global" flag (for object+standard)
if flag == 'E' or flagstd == 'E':
flag = 'E'
elif flag == 'W' or flagstd == 'W':
flag = 'W'
else:
flag = 'OK'
# Applying the correction of standard star
th = fator*th-mdth
# Fixing the angle value and computing QU parameters
while th >= 180:
th-= 180
while th < 0:
th+= 180
Q = P*_np.cos(2*th*_np.pi/180)
U = P*_np.sin(2*th*_np.pi/180)
# Correction of IS polarization
# if ispol != None:
# QIS, UIS = serkowski(ispol[0], ispol[1], str(f), mode=1, pa=ispol[2])
# Q = Q - QIS
# U = U - UIS
# P = _np.sqrt(Q**2 + U**2)
# th = _np.arctan(Q/U)*90/_np.pi
# sigth = 28.65*sig/P
# Fix the angle to the correct in QU diagram
# if Q < 0:
# th += 90
# elif Q >= 0 and U < 0:
# th += 180
# Write the line
if stdnames != '---' or (not skipdth) or P/sig <= epssig or ftype == 'std':
if out.find('_WP') == -1:
outn = out[-11:]
else:
outn = out[out.find('_WP')-7:]
lines += ('{:12s} {:>7s} {:>7s} {:>4s} {:>5s} {:>12s} {:>6.1f} {:>6.1f}'+
' {:>8.4f} {:>8.4f} {:>8.4f} {:>7.2f} {:>7.4f} '+
'{:>6.2f} {:>13s} {:>4s} {:>5s} {:>s}').format(MJD, night, ccd, f, \
calc, stdnames, mdth, smdth, P, Q, U, th, sig, sigth, outn, nstar, \
flag, ';'.join(tags))
if target == 'field':
lines += ' {0}\n'.format(obj)
else:
lines += '\n'
nlines += 1
else:
print(('{0:<12s} ERROR! No valid delta_theta value estimated in filter {1}.' +\
' Ignoring this data...\n').format(night+', '+f+':', f))
else:
if verbose != '':
verbose += ', '
verbose += night
# Print "no obj/std.dat found" message
if verbose != '':
print('{0:<12s} WARNING! No `{1}.dat` file found for the following nights: {2}. Ignoring these nights...'.format('-------',ftype,verbose))
print('\n'+'='*30+'\n')
# Write the output
if lines != '':
# if ispol==None:
f0 = open('{0}/{1}.log'.format(path2,target),'w')
# else:
# f0 = open('{0}/{1}_iscor.log'.format(path2,target),'w')
#print(lines)
slines = sorted(lines[:-2].split('\n'))#, key=lambda x: [x[0],x[3]])
lines = '\n'.join(slines)
if target == 'field':
lines = ('#{:>11s} {:>7s} {:>7s} {:>4s} {:>5s} {:>12s} {:>6s} {:>6s}' +\
' {:>8s} {:>8s} {:>8s} {:>7s} {:>7s} {:>6s} {:>13s}' +\
' {:>4s} {:>5s} {:>s} {:s}\n').format('MJD', 'night',\
'ccd', 'filt', 'calc', 'stdstars', 'dth', 'sigdth', 'P', 'Q', 'U',\
'th', 'sigP', 'sigth', 'outfile', 'star', 'flag', 'tags', 'obj_name')+lines
else:
lines = ('#{:>11s} {:>7s} {:>7s} {:>4s} {:>5s} {:>12s} {:>6s} {:>6s}' +\
' {:>8s} {:>8s} {:>8s} {:>7s} {:>7s} {:>6s} {:>13s}' +\
' {:>4s} {:>5s} {:>s}\n').format('MJD', 'night',\
'ccd', 'filt', 'calc', 'stdstars', 'dth', 'sigdth', 'P', 'Q', 'U',\
'th', 'sigP', 'sigth', 'outfile', 'star', 'flag', 'tags')+lines
# if ispol==None:
# ispol = [0,0,0]
lines = ('# ISP parameters used:\n#\n# Pmax (%) lmax (A) PA\n# {0:>8.4f} {1:>9.2f} {2:>7.2f}\n#\n')\
.format(0.,0.,0.) + lines
f0.writelines(lines)
f0.close()
print('DONE! {0} lines (+{1} from literature) written in {2}/{3}.log.'.format(nlines,nlineslit,path2,target))
# Fix ISP if ispol is not null
if ispol!=None and ispol!=[0,0,0]:
fixISP('{0}/{1}.log'.format(path2,target),ispol=ispol)
# else:
# print('DONE! {0} lines written in {1}/{2}_iscor.log.'.format(nlines,path2,target))
else:
eprint('NOT DONE! No valid observation was found for target `{0}`.'.format(target))
# exit(1)
return
#################################################
#################################################
#################################################
def fixISP(logfile, ispol, path2=None):
"""
Correct the interstellar polarization in file logfile
logfile: outfile from genTarget.
ispol: the Serkowski parameters [P_max, lambda_max, theta_IS]
to correct IS polarization (P_max ein % and lambda_max in
Angstrom). If ispol==None, don't make the correction
of ISP.
path2: path where to save the data file. If None, it is
supposed the same of logfile.
"""
star = _phc.trimpathname(logfile)[1].split('.')[0].split('_')[0]
if star in _phc.bes:
be = _phc.bes[star]
else:
be = star
if path2 == None or path2 == '.':
path2 = _os.getcwd()
# if path2 == None or path2 == '.':
# path2 = _phc.trimpathname(logfile)[0]
# if path2=='':
# path2 = _os.getcwd()
try:
lines = _np.loadtxt(logfile, dtype=str)
except:
eprint('# ERROR: Can\'t read file {0}.'.format(logfile))
exit(1)
if type(lines[0]) != _np.ndarray and _np.size(lines) == 18:
lines = lines.reshape(-1,18)
linesout=''
for i,line in enumerate(lines):
# read filter
f = line[3][0]
# Correction of IS polarization
QIS, UIS = serkowski(ispol[0], ispol[1], str(f), mode=1, pa=ispol[2])
Q = float(line[9]) - QIS
U = float(line[10]) - UIS
P = _np.sqrt(Q**2 + U**2)
th = _np.arctan(Q/U)*90/_np.pi
# sigth = 28.65*sig/P
# Fix the angle to the correct in QU diagram
if Q < 0:
th += 90
elif Q >= 0 and U < 0:
th += 180
linesout += ('{:12s} {:>7s} {:>7s} {:>4s} {:>5s} {:>12s} {:>6.1f} {:>6.1f}'+
' {:>8.4f} {:>8.4f} {:>8.4f} {:>7.2f} {:>7.4f} '+
'{:>6.2f} {:>13s} {:>4s} {:>5s} {:>s}\n').format(line[0], line[1], \
line[2], line[3], line[4], line[5], float(line[6]), float(line[7]), \
P, Q, U, th, float(line[12]), float(line[13]), line[14], line[15], \
line[16], line[17])
# lines[i][8] = P
# lines[i][9] = Q
# lines[i][10] = U
# lines[i][11] = th
if linesout != '':
f0 = open('{0}/{1}_iscor.log'.format(path2,star),'w')
linesout = ('# ISP parameters used:\n#\n# Pmax (%) lmax (A) PA\n# {0:>8.4f}'+\
' {1:>9.2f} {2:>7.2f}\n#\n').format(ispol[0],ispol[1],ispol[2]) + \
('#{:>11s} {:>7s} {:>7s} {:>4s} {:>5s} {:>12s} {:>6s} {:>6s}' +\
' {:>8s} {:>8s} {:>8s} {:>7s} {:>7s} {:>6s} {:>13s}' +\
' {:>4s} {:>5s} {:>s}\n').format('MJD', 'night',\
'ccd', 'filt', 'calc', 'stdstars', 'dth', 'sigdth', 'P', 'Q', 'U',\
'th', 'sigP', 'sigth', 'outfile', 'star', 'flag', 'tags')+linesout
f0.writelines(linesout)
f0.close()
print('DONE! File written in {0}/{1}_iscor.log.'.format(path2,star))
else:
eprint('NOT DONE! No observation for target `{0}`.'.format(star))
exit(1)
return
#################################################
#################################################
#################################################
def serkowski(pmax, lmax, wlen, mode, pa=None, law='w82'):
"""
Mode==1
Receive ISP parameters 'pmax', 'lmax' e 'pa' and return
the Stokes QU parameters concerning to the value
of Serkowski's law at wavelenght 'wlen'.
Mode==2
Receive ISP parameters 'pmax', 'lmax' and return
the P value computed by the Serkowski's law at
wavelenght 'wlen'.
'wlen' and 'lmax' must be in Angstrom.
'wlen' can be 'u'/'b'/'v'/'r'/'i' also.
'law' defines what value use to K parameter:
w82 - Wilking (1982)
K = 1.86*lmax - 0.1
w80 - Wilking (1980)
K = 1.68*lmax - 0.002
serk - Serkowski
K = 1.15
Serkowski's Law:
P = pmax*np.exp(-K*np.log(lmax/wlen)**2)
"""
if law=='w82':
K = 1.86*lmax/10000 - 0.1 # Wilking (1982)
elif law=='w80':
K = 1.68*lmax/10000 - 0.002 # Wilking (1980)
elif law=='serk':
K = 1.15 # Serkowski
if pmax==0 and lmax==0:
P = 0.
elif mode==1 and wlen in _phc.lbds:
P = pmax*_np.exp(-K*_np.log(lmax/_phc.lbds[wlen])**2)
else:
P = pmax*_np.exp(-K*_np.log(lmax/wlen)**2)
if mode == 1:
# print wlen, P, pa
Q = P*_np.cos(2*pa*_np.pi/180)
U = P*_np.sin(2*pa*_np.pi/180)
return Q, U
elif mode == 2:
return P
else:
return
#################################################
def propQU(p, th, sp, sdth, estim='wk'):
"""
Propagate the delta theta error over the polarization
angle, computing the new errors for theta, Q and U.
Input:
- p, th: lists holding the P and theta values.
- sp, sdth: lists holding the P and delta theta errors.
Return lists containing the new errors for theta,
Q and U.: sth, sq, su.
Formulas:
sth = sqrt( sth0^2 + sdth^2 )
sq = sqrt( (cos(2*th)*sp)^2 + (p*sin(2*th)*sth)**2 )
su = sqrt( (sin(2*th)*sp)^2 + (p*cos(2*th)*sth)**2 )
Unbias theta error using 'estim' estimator:
if p/sp <= K, sth0 = psi
otherwise, sth0 = propagated error
where K is given by the estimator related to the
'estim' variable:
a) 'ml' : Maximum Likelihood (K=1.41, psi=51.96)
b) 'wk' : Wardle & Kronberg (K=1.0, psi=51.96)
c) '' : None (K=0, psi=51.96)
d) 'mts': Maier, Tenzer & Santangelo (estimates
from Bayesian analysis, psi=61.14)
"""
if estim=='wk':
k=1.
elif estim=='ml':
k=1.41
elif estim=='':
k=0.
elif estim!='mts':
eprint('# ERROR: estimation type `{0}` not valid!.'.format(estim))
exit(1)
sth,sq,su = [],[],[]
sth0=0
for i in range(len(p)):
if p[i] != 0:
if estim!='mts':
if sp[i]!=0 and p[i]/sp[i] > k:
sth0 = 28.65*sp[i]/p[i]
else:
sth0 = 51.96
else:
if sp[i]!=0 and p[i]/sp[i] > 6:
sth0 = 28.65*sp[i]/p[i]
elif sp[i]!=0:
a=32.50
b=1.350
c=0.739
d=0.801
e=1.154
sth0 = a*(b+_np.tanh(c*(d-p[i]/sp[i]))) - e*p[i]/sp[i]
else:
sth0 = 61.14
sth += [_np.sqrt(sth0**2 + sdth[i]**2)]
else:
if estim!='mts':
sth += [51.96]
else:
sth += [61.14]
sq += [_np.sqrt( (_np.cos(th[i]*_np.pi/90)*sp[i])**2 + (p[i]*_np.sin(th[i]*_np.pi/90)*sth[i]*_np.pi/90)**2 )]
su += [_np.sqrt( (_np.sin(th[i]*_np.pi/90)*sp[i])**2 + (p[i]*_np.cos(th[i]*_np.pi/90)*sth[i]*_np.pi/90)**2 )]
return sth, sq, su
#################################################
#################################################
#################################################
def genJD(path=None):
"""Generate de JD file for the fits inside the folder
"""
if path == None or path == '.':
path = _os.getcwd()
for f in filters:
lfits = _glob('*_{0}_*.fits'.format(f))
if len(lfits) > 0:
lfits.sort()
i = lfits[0].find('_{0}_'.format(f))
pref = lfits[0][:i+2]
lfits = _glob('{0}_*.fits'.format(pref))
lfits.sort()
if len(lfits)%8 != 0:
print('# Warning! Strange number of fits files!')
print(lfits)
JDout = ''
i = 0
for fits in lfits:
i += 1
imfits = _pyfits.open(fits)
dtobs = imfits[0].header['DATE']
if 'T' in dtobs:
dtobs, tobs = dtobs.split('T')
dtobs = dtobs.split('-')
tobs = tobs.split(':')
tobs = float(tobs[0])*3600+float(tobs[1])*60+float(tobs[2])
tobs /= (24*3600)
else:
eprint('# ERROR! Wrong DATE-OBS in header! {0}'.format(fits))
exit(1)
JD = _np.sum(_jdcal.gcal2jd(*dtobs))+tobs
JDout += 'WP {0} {1:.7f}\n'.format(i,JD)
f0 = open('JD_{0}'.format(pref),'w')
f0.writelines(JDout)
f0.close()
return
#################################################
#################################################
#################################################
def listNights(path, tgt):
"""
List Nights
"""
ltgts = _np.loadtxt('{0}/refs/pol_alvos.txt'.format(_hdtpath()), dtype=str)
lstds = _np.loadtxt('{0}/refs/pol_padroes.txt'.format(_hdtpath()), dtype=str,\
usecols=[0])
if tgt not in _np.hstack((ltgts,lstds)):
print('# Warning! Target {0} is not a default target or standard!!'.\
format(tgt))
lnights = []
nights = [fld for fld in _os.listdir(path) if \
_os.path.isdir(_os.path.join(path, fld))]
for night in nights:
tgts = [fld for fld in _os.listdir('{0}/{1}'.format(path,night)) if \
_os.path.isdir(_os.path.join('{0}/{1}'.format(path,night), fld))]
if tgt in tgts:
lnights += [night]
else:
out = [obj for obj in tgts if obj.find(tgt) > -1]
if len(out) > 0:
lnights += [night]
return lnights
#################################################
#################################################
#################################################
# Falta alterar para novos índices das colunas dos arquivos std.dat e obj.dat
# das mudanças que fiz. Bednarski.
def plotMagStar(tgt, path=None):
""" Function doc
@param PARAM: DESCRIPTION
@return RETURN: DESCRIPTION
"""
if path == None or path == '.':
path = _os.getcwd()
lmags = _np.loadtxt('{0}/refs/pol_mags.txt'.format(_hdtpath()), dtype=str)
if tgt not in lmags[:,0]:
eprint('# ERROR! {0} is not a valid mag. star!'.format(tgt))
return
data = _np.loadtxt('{0}/{1}.log'.format(path,tgt), dtype=str)
data = _np.core.records.fromarrays(data.transpose(), names='MJD,night,filt,\
calc,ang.ref,dth,P,Q,U,th,sigP,sigth', formats='f8,a7,a1,f8,f8,f8,f8,\
f8,f8,f8,f8,f8')
if False:
fig = _plt.figure()#figsize=(5.6,8))
ax0 = fig.add_subplot(311)
ax0.errorbar(data['MJD'], data['P'], data['sigP'], color='black')
ax1 = fig.add_subplot(312)
ax1.errorbar(data['MJD'], data['Q'], data['sigP'], color='blue')
ax2 = fig.add_subplot(313)
ax2.errorbar(data['MJD'], data['U'], data['sigP'], color='red')
idx = _np.where(lmags[:,0] == tgt)
P, ph0 = lmags[idx][0][1:]
ph0 = float(ph0) - _jdcal.MJD_0
phase = data['MJD']-ph0
phase /= float(P)
phase = _np.modf(phase)[0]
idx = _np.where(phase < 0)
phase[idx] = phase[idx]+1
fig2, (ax0, ax1, ax2, ax3) = _plt.subplots(4,1, sharex=True)
ax0.errorbar(phase, data['P'], yerr=data['sigP'], fmt='ok')
ax1.errorbar(phase, data['Q'], yerr=data['sigP'], fmt='or')
ax2.errorbar(phase, data['U'], yerr=data['sigP'], fmt='ob')
ax3.errorbar(phase, data['th'], yerr=data['sigth'], fmt='ok')
ax3.set_xlabel('Phase')
ax0.set_ylabel(u'P (%)')
ax1.set_ylabel(u'Q (%)')
ax2.set_ylabel(u'U (%)')
ax3.set_ylabel(r'$\theta$ (deg.)')
ax0.set_title('{0} ; P={1} days, ph0={2:.3f}'.format(tgt,P,ph0+_jdcal.MJD_0))
_plt.savefig('{0}/{1}.png'.format(path,tgt))
bphase, bP, bsigP = _phc.bindata(phase, data['P'], data['sigP'], 30)
bphase, bQ, bsigP = _phc.bindata(phase, data['Q'], data['sigP'], 30)
bphase, bU, bsigP = _phc.bindata(phase, data['U'], data['sigP'], 30)
bphase, bth, bsigth = _phc.bindata(phase, data['th'], data['sigth'], 30)
fig3, (ax0, ax1, ax2, ax3) = _plt.subplots(4,1, sharex=True)
ax0.errorbar(bphase, bP, yerr=bsigP, fmt='ok')
ax1.errorbar(bphase, bQ, yerr=bsigP, fmt='or')
ax2.errorbar(bphase, bU, yerr=bsigP, fmt='ob')
ax3.errorbar(bphase, bth, yerr=bsigth, fmt='ok')
ax3.set_xlabel('Phase')
ax0.set_ylabel(u'P (%)')
ax1.set_ylabel(u'Q (%)')
ax2.set_ylabel(u'U (%)')
ax3.set_ylabel(r'$\theta$ (deg.)')
ax0.set_title('{0} (binned); P={1} days, ph0={2:.3f}'.format(tgt,P,ph0+_jdcal.MJD_0))
_plt.savefig('{0}/{1}_bin.png'.format(path,tgt))
#_plt.show()
return
#################################################
#################################################
#################################################
def sortLog(filename):
""" Sort the *.out file """
f0 = open(filename)
lines = f0.readlines()
f0.close()
log = _np.loadtxt(filename, dtype=str)
log = log[log[:,0].argsort()]
fmt = '%12s %7s %1s %5s %5s %6s %5s %6s %6s %6s %5s %5s'
_np.savetxt(filename.replace('.log','.txt'), log, fmt=fmt, header=lines[0])
return
#################################################
#################################################
#################################################
def filtra_obs(n,obs):
""" ### FILTER OBSERV. ### """
nobs = [ ]
for i in range(len(obs)):
if obs[i][5]/obs[i][3] > n:
nobs = nobs+[obs[i]]
return _np.array(nobs)
#################################################
#################################################
#################################################
def filtraobs(data, r=20):
""" filtro! """
idx = data['P']/data['sigP'] > r
return data[idx]
#################################################
#################################################
#################################################
def setCCD(fitsfile):
"""
Set CCD name in global variable 'ccd'.
The CCD name can be: 'ikon', 'ixon', '301' or
'ikon-14912' (the last one is the Ikon CCD with
Deep Depletion).
"""
global ccd
ccd = ''
if fitsfile != '':
try:
fits = _pyfits.open(fitsfile)
instrume = '{0}'.format(fits[0].header['SERNO'])
if instrume.find('4335') != -1:
ccd = 'ixon'
elif instrume.find('4269') != -1:
ccd = 'ixon'
elif instrume.lower().find('10127') != -1:
ccd = 'ikon'
elif instrume.lower().find('9867') != -1:
ccd = 'ikon'
elif instrume.lower().find('14912') != -1:
ccd = 'ikon-14912'
else:
ccd = ''
except:
pass
while ccd not in ('ikon', 'ixon', '301', '654', 'ikon-14912'):
ccd = input('Type the CCD name (301/654/ikon/ikon-14912/ixon): ')
# VER ACIMA 2019
#################################################
#################################################
#################################################
def splitData(night, path_raw='raw', path_red='red'):
"""
Split the raw files and reduced files for a night.
Parameters:
night: path to the night (this directory will be fully preserved)
path_raw: directory with the raw data of the nights
path_red: directory with the output files of reduction
"""
print('')
if not _os.path.exists(path_raw) or not _os.path.exists(path_red):
eprint('Error: Directory \'{0}\' and/or \'{1}\' doesn\'t exist!\n'.format(path_raw, path_red))
return 1
elif not _os.path.exists(night):
eprint('Error: Directory \'{0}\' doesn\'t exist!\n'.format(night))
return 1
if night[len(night)-1] == '/':
night = night[:-1]
if night.find('/') == -1:
path = '.'
else:
path = _phc.trimpathname(night)[0]
night = _phc.trimpathname(night)[1]
#print path, night
# Verify if the splitted directories exist
if _os.path.exists('{0}/{1}'.format(path_raw, night)) or \
_os.path.exists('{0}/{1}'.format(path_red, night)):
while True:
verif = input('CAUTION: Directory \'{0}/{1}\' and/or \'{2}/{1}\' already exists! '.\
format(path_raw, night, path_red) + 'Are you sure to continue, ' + \
'overwriting all data inside these directories? (y/n): ')
print('')
if verif in ['y', 'Y']:
if _os.path.exists('{0}/{1}'.format(path_raw, night)):
try:
_shutil.rmtree('{0}/{1}'.format(path_raw, night))
except:
eprint('ERROR when deleting directory \'{0}/{1}\'. Check the permissions.\n'.format(path_raw, night))
return 2
if _os.path.exists('{0}/{1}'.format(path_red, night)):
try:
_shutil.rmtree('{0}/{1}'.format(path_red, night))
except:
eprint('ERROR when deleting directory \'{0}/{1}\'. Check the permissions.\n'.format(path_red, night))
return 2
break
elif verif in ['n', 'N']:
eprint('Aborted!\n')
return 1
else:
print('Value not valid!\n')
# Loop for splitting
for (direc, _, files) in _os.walk('{0}/{1}'.format(path, night)):
for f in files:
file_old = _os.path.join(direc, f)
# Case a raw file
if _re.search(r'[0-9][0-9].fits$', f) or _re.search(r'.doc$', f):
file_new = path_raw + '/' + _os.path.relpath(file_old, path)
# Case a file from reduction
else:
file_new = path_red + '/' + _os.path.relpath(file_old, path)
# Create all subdirectories
if not _os.path.exists(_phc.trimpathname(file_new)[0]):
try:
_os.makedirs(_phc.trimpathname(file_new)[0])
except:
eprint('ERROR when copying files. Check the permissions to directories' + \
' \'{0}\' and \'{1}\'\n'.format(path_raw, path_red))
return 2
_shutil.copy2(file_old, file_new)
print('Done!\n')
return 0
#################################################
#################################################
#################################################
def splitData301(night, path_raw='raw', path_red='red'):
"""
Split the raw files and reduced files for a night, renaming according
CCD iXon nomenclature.
Parameters:
night: path to the night (this directory will be fully preserved)
path_raw: directory with the raw data of the nights
path_red: directory with the output files of reduction
"""
print('')
# Verify if raw and red directories exist
if not _os.path.exists(path_raw) or not _os.path.exists(path_red):
eprint('Error: Directory \'{0}\' and/or \'{1}\' doesn\'t exist!\n'.format(path_raw, path_red))
return 1
elif not _os.path.exists(night):
eprint('Error: Directory \'{0}\' doesn\'t exist!\n'.format(night))
return 1
if night[len(night)-1] == '/':
night = night[:-1]
if night.find('/') == -1:
path = '.'
else:
path = _phc.trimpathname(night)[0]
night = _phc.trimpathname(night)[1]
# Verify if the splitted directories exist
if _os.path.exists('{0}/{1}'.format(path_raw, night)) or \
_os.path.exists('{0}/{1}'.format(path_red, night)):
while True:
verif = input('CAUTION: Directory \'{0}/{1}\' and/or \'{2}/{1}\' already exists!\n'.\
format(path_raw, night, path_red) + 'Are you sure to continue, ' + \
'overwriting all data inside these directories? (y/n): ')
print('')
if verif in ['y', 'Y']:
if _os.path.exists('{0}/{1}'.format(path_raw, night)):
try:
_shutil.rmtree('{0}/{1}'.format(path_raw, night))
except:
eprint('ERROR when deleting directory \'{0}/{1}\'. Check the permissions.\n'.format(path_raw, night))
return 2
if _os.path.exists('{0}/{1}'.format(path_red, night)):
try:
_shutil.rmtree('{0}/{1}'.format(path_red, night))
except:
eprint('ERROR when deleting directory \'{0}/{1}\'. Check the permissions.\n'.format(path_red, night))
return 2
break
elif verif in ['n', 'N']:
eprint('Aborted!\n')
return 1
else:
print('Value not valid!\n')
# Loop for splitting
for (direc, _, files) in _os.walk('{0}/{1}'.format(path, night)):
jds = [f for f in files if _re.search(r'^jd[0-9]*', f)]
for f in files:
file_old = _os.path.join(direc, f)
# If filename is 'logfile'
if f == 'logfile':
file_new = path_red + '/' + _os.path.relpath(file_old, path)
# If file is inside dark/flat/bias path
elif file_old.find('/dark/') != -1 or file_old.find('/flat/') != -1 or \
file_old.find('/bias/') != -1:
if _re.search(r'.fits$', f):
file_new = path_red + '/' + _os.path.relpath(file_old, path)
else:
file_new = path_raw + '/' + _os.path.relpath(file_old, path)
# If filename is type p010001, fb01001, s001001, d001001, b01001
elif _re.search(r'p[0-9]*$', f):
file_new = path_raw + '/' + _os.path.relpath(file_old, path)
# The other cases
else:
# Tries to get object and filter names (it can be a file of none object)
elem = filter(None, file_old.split('/{0}/'.format(night))[1].split('/'))
if len(elem) > 0:
obj = elem[0]
if len(elem) > 1 and elem[1] in ('u','b','v','r','i'):
filt = elem[1]
else:
filt = ''
# Case sum*.dat files
if _re.search(r'^sum', f):
# print f
if filt == '':
eprint(('\nERROR: No filter was identified for file {0}.\nPut this files ' +\
' inside a subdir named with the filter name and run again!').format(file_old))
return 3
file_new = path_red + '/' + night + '/' + obj + '/sum_' + obj + '_' +\
filt + '_0' + f[-8:]
# Case w*.dat, w*.log, w*.out files
elif _re.search(r'\.dat|\.log|\.out$', f):
if filt == '':
eprint(('# ERROR: No filter was identified for file {0}.\nPut this files ' +\
' inside a subdir named with the filter letter and run again!').format(file_old))
return 3
file_new = path_red + '/' + night + '/' + obj + '/w' + obj + '_' +\
filt + '_' + f[1:]
# Case coord.*.ord files
elif _re.search(r'^coord', f):
if filt == '':
eprint(('# ERROR: No filter was identified for file {0}.\nPut this files ' +\
' inside a subdir named with the filter letter and run again!').format(file_old))
return 3
file_new = path_red + '/' + night + '/' + obj + '/coord_' + obj + '_' +\
filt + f[-6:]
# Case JD* files, pass and concatenate only at the end of this loop
elif _re.search(r'^jd[0-9]*', f):
continue
else:
file_new = path_red + '/' + _os.path.relpath(file_old, path)
# Create all subdirectories
if not _os.path.exists(_phc.trimpathname(file_new)[0]):
try:
_os.makedirs(_phc.trimpathname(file_new)[0])
except:
eprint('ERROR when copying files. Check the permissions to directories' + \
' \'{0}\' and \'{1}\'\n'.format(path_raw, path_red))
return 2
print('OLD:' + file_old)
print('NEW:' + file_new + '\n')
_shutil.copy2(file_old, file_new)
# Concatenate all JD files now
if len(jds) > 0:
jds.sort()
with open('{0}/{1}/{2}/JD_{3}_{4}'.format(path_red,night,obj,obj,filt), 'a') as f_out:
for f in jds:
with open('{0}/{1}'.format(direc,f), 'r') as f_in:
f_out.write(f_in.readline())
print('Done!\n')
return 0
#################################################
#################################################
#################################################
def graf_t(logfile, path2=None, vfilter=['no-std'], save=False, extens='pdf', grafs=['pv','pb/pi']):
"""
Plot a P_V x t, theta_V x t and P_B/P_I x t graphs for the
Be star in the logfile .log file (the outfile from
polt.genTarget). Propagates error of standard star.
'extens' can be a list-type, whose elements are all formats
to be saved.
'grafs' is a list with the graphs to be ploted (up to down).
Each element can be 'pu','pb','pv','pr','pi' for % of polarization,
'thu','thb','thv','thr','thi' for polarization angle or
'pb/pi' for the inclination (color) of the polarization (P_b/P_i).
If 'no-std' is in vfilter, no data with 'no-std' tag will be
displayed, but the others filtered data will be showed
with a 'x' symbol.
If 'no-std' is not in vfilter, the data with 'no-std' will
be displayed normally and the other filtered will be
showed with a 'x' symbol.
"""
###########
## FUNC
def polColor(dayp, jdp, p, s, dayp_filt, jdp_filt, p_filt, s_filt):
"""
Return pb/pi from input lists
"""
jd, pbpi, spbpi = [],[],[]
pi = p[4][:] # It's needed to do a copy here inside polColor!
for i in range(len(dayp[1])):
for j in range(len(dayp[4])):
# p[4][j] != 0 is to prevent division by 0.
if dayp[1][i] == dayp[4][j] and pi[j] != 0:
# print dayp[1][i], dayp[4][j]
jd += [(jdp[1][i] + jdp[4][j])/2]
pbpi += [p[1][i]/pi[j]]
spbpi += [pbpi[-1]*_np.sqrt((s[1][i]/p[1][i])**2 + (s[4][j]/pi[j])**2)]
# print pbpi[-1], p[1][i], p[4][j]
# print ''
# This line below prevent to take the same point more once
pi[j] = 0.
break
return jd, pbpi, spbpi
###########
###########
## FUNC
def plot(fig, axs):
"""
Receive figure and an axes list (with three axes objects)and do the plots
filt WITHOUT show or save the image.
"""
cm = _plt.cm.gist_rainbow # Setting the color map
factor=0.7 # Factor to fix the font sizes
try:
lines = _np.loadtxt(logfile, dtype=str)
except:
eprint('# ERROR: Can\'t read file {0}.'.format(logfile))
exit(1)
if type(lines[0]) != _np.ndarray and _np.size(lines) == 18:
lines = lines.reshape(-1,18)
# ax.set_title('{0} filter'.format(filt.upper()), fontsize=fonts[0]*factor, verticalalignment='bottom')
# ax.text(0.98, 0.9, '{0} filter'.format(filt.upper()), horizontalalignment='right', \
# verticalalignment='bottom', transform=ax.transAxes, fontsize=fonts[1]*factor)
dayp, jdp, p, s = [[],[],[],[],[]], [[],[],[],[],[]], [[],[],[],[],[]], [[],[],[],[],[]]
daythet, jdthet, thet, sthet = [[],[],[],[],[]], [[],[],[],[],[]], [[],[],[],[],[]], [[],[],[],[],[]]
dayp_filt, jdp_filt, p_filt, s_filt = [[],[],[],[],[]], [[],[],[],[],[]], [[],[],[],[],[]], [[],[],[],[],[]]
daythet_filt, jdthet_filt, thet_filt, sthet_filt = [[],[],[],[],[]], [[],[],[],[],[]], [[],[],[],[],[]], [[],[],[],[],[]]
image = []
limJD=[10000000000.,0]
limP=[100.,0.]
limTh=[180.,0.]
# 1) RECEIVING THE VALUES OF ALL POLARIZATIONS AND PB/PI
for line in lines:
if line[16] != 'E':
idx = filters.index(line[3][0])
# print('idx in: {}'.format(idx))
# Refresh the limits
if float(line[0]) < limJD[0]:
limJD[0] = float(line[0])
if float(line[0]) > limJD[1]:
limJD[1] = float(line[0])
# Not filtered data
if not any(sub in line[17] for sub in vfilter): # if sub != 'no-std'):
jdp[idx] += [float(line[0])]
dayp[idx] += [line[1]]
p[idx] += [float(line[8])]
# if idx==4:
# print(p[idx])
s[idx] += [float(line[12])]
if 'no-std' not in line[17]:
jdthet[idx] += [float(line[0])]
daythet[idx] += [line[1]]
thet[idx] += [float(line[11])]
sthet[idx] += [_np.sqrt(float(line[7])**2 + float(line[13])**2)]
# Filtered data
elif 'no-std' not in line[17]:
jdp_filt[idx] += [float(line[0])]
dayp_filt[idx] += [line[1]]
p_filt[idx] += [float(line[8])]
s_filt[idx] += [float(line[12])]
jdthet_filt[idx] += [float(line[0])]
daythet_filt[idx] += [line[1]]
thet_filt[idx] += [float(line[11])]
sthet_filt[idx] += [_np.sqrt(float(line[7])**2 + float(line[13])**2)]
# print(p[filters.index(filt)])
jdpbpi, pbpi, spbpi = polColor(dayp, jdp, p, s, dayp_filt, jdp_filt, p_filt, s_filt)
# print(p[filters.index(filt)])
# print jdp, p, s
# print jdp_filt, p_filt, s_filt
# 2) MAIN LOOP TO PLOT ALL SUBGRAPHS
for j,grafj in enumerate(grafs):
f2=''
if grafj == 'pb/pi':
o = 'p'
f1 = grafj[1]
f2 = grafj[4]
xxx = jdpbpi
yyy = pbpi
syyy = spbpi
xxx_filt, yyy_filt, syyy_filt = [],[],[]
# cor = jdpbpi
label = r'$P_{0}/P_{1}$'.format(f1.upper(),f2.upper())
elif _re.match('^p[ubvri]$', grafj) is not None:
o = 'p'
f1 = grafj[1]
xxx = jdp[filters.index(f1)]
yyy = p[filters.index(f1)]
syyy = s[filters.index(f1)]
xxx_filt = jdp_filt[filters.index(f1)]
yyy_filt = p_filt[filters.index(f1)]
syyy_filt = s_filt[filters.index(f1)]
# cor = jdp[filters.index(f1)]
label = r'$P_{0}$ (%)'.format(f1.upper())
if yyy+yyy_filt != []:
limm = [float(min(yyy+yyy_filt)),float(max(yyy+yyy_filt))]
if limm[0] < limP[0]:
limP[0] = limm[0]
if limm[1] > limP[1]:
limP[1] = limm[1]
elif _re.match('^th[ubvri]$', grafj) is not None:
o = 'th'
f1 = grafj[2]
xxx = jdthet[filters.index(f1)]
yyy = thet[filters.index(f1)]
syyy = sthet[filters.index(f1)]
xxx_filt = jdthet_filt[filters.index(f1)]
yyy_filt = thet_filt[filters.index(f1)]
syyy_filt = sthet_filt[filters.index(f1)]
# cor = jdthet[filters.index(f1)]
label = r'$\theta_{0}$ (%)'.format(f1.upper())
if yyy+yyy_filt != []:
limm = [float(min(yyy+yyy_filt)),float(max(yyy+yyy_filt))]
if limm[0] < limTh[0]:
limTh[0] = limm[0]
if limm[1] > limTh[1]:
limTh[1] = limm[1]
else:
eprint('# ERROR: parameter `grafs` is not valid!')
exit(1)
# print(grafj)
# print(f1, f2)
# print(filters.index(f1))
# print('???')
# print(xxx)
# print(yyy)
# print(syyy)
# print('???_FILT')
# print(xxx_filt)
# print(yyy_filt)
# print(syyy_filt)
# Plot data
if xxx != []:
if limJD[0] != limJD[1]:
col = _plt.cm.gist_rainbow([(jdi-limJD[0])/(limJD[1]-limJD[0]) for jdi in xxx])
image += [axs[j].scatter(xxx, yyy, marker='o', c=xxx, vmin=limJD[0], \
vmax=limJD[1], edgecolors='white', s=60, cmap=cm)]
else:
col = _plt.cm.gist_rainbow([0.5])
lixo = [axs[j].scatter(xxx, yyy, marker='o', c=col, \
edgecolors='white', s=60, cmap=cm)]
for i in range(len(xxx)):
axs[j].errorbar(xxx[i], yyy[i], yerr=syyy[i], linestyle='', \
elinewidth=0.6, marker='', c=col[i], alpha=0.7)
# Plot ignored data
if xxx_filt != []:
if limJD[0] != limJD[1]:
col = _plt.cm.gist_rainbow([(jdi-limJD[0])/(limJD[1]-limJD[0]) for jdi in xxx_filt])
image += [axs[j].scatter(xxx_filt, yyy_filt, marker='x', c=xxx_filt, vmin=limJD[0], \
vmax=limJD[1], s=50, linewidths=1.8, cmap=cm)]
else:
col = _plt.cm.gist_rainbow([0.5])
lixo = [axs[j].scatter(xxx_filt, yyy_filt, marker='x', c=col, \
s=50, linewidths=1.8, cmap=cm)]
for i in range(len(xxx_filt)):
axs[j].errorbar(xxx_filt[i], yyy_filt[i], yerr=syyy_filt[i], linestyle='', \
elinewidth=0.6, marker='', color=col[i], alpha=0.7)
# Setting the label
axs[j].set_ylabel(label, size=fonts[1]*factor)
axs[j].yaxis.label.set_fontsize(fonts[1]*factor)
for item in axs[j].get_yticklabels():
item.set_fontsize(fonts[2]*factor)
# Fix limits
# ax.autoscale(False)
# ax.plot(ax.get_xlim(), [0,0], 'k--')
# ax.plot([0,0], ax.get_ylim(), 'k--')
# Setting the xlabel
axs[-1].set_xlabel(r'MJD', size=fonts[1]*factor)
axs[-1].xaxis.label.set_fontsize(fonts[1]*factor)
for item in axs[-1].get_xticklabels():
item.set_fontsize(fonts[2]*factor)
# Expanding the ranges to avoid the data point to fill under the border of graph
if limTh != []:
if limTh[0] < 2.:
limTh=[0,limTh[1]+2]
else:
limTh=[limTh[0]-2,limTh[1]+2]
if limP != []:
if limP[0] < .03:
limP=[0,limP[1]+.03]
else:
limP=[limP[0]-.03,limP[1]+.03]
if limJD != []:
# fattor is a variable which depends the range of MJD values.
if limJD[1]-limJD[0] < 2:
fattor = 2
else:
fattor = (limJD[1]-limJD[0])/25
if limJD[0] < 100.:
limJD=[0,limJD[1]+fattor]
else:
limJD=[limJD[0]-fattor,limJD[1]+fattor]
# print(image)
return [image], limJD, limP, limTh
###########
_plt.close('all')
nome = _phc.trimpathname(logfile)[1].split('.')[0].split('_')
star = nome[0]
if len(nome) > 1:
suffix = '_'+nome[1]
else:
suffix = ''
if star in _phc.bes:
be = _phc.bes[star]
else:
be = star
images, limJD, limP, limTh = [], [], [], []
if path2 == None or path2 == '.':
path2 = _os.getcwd()
# Verify if vfilter is a special filter
if vfilter in vfil.keys():
vfilter = vfil[vfilter]
elif type(vfilter) != list:
vfilter = []
# Generate the four axes (sorted as BVRI)
ngrafs = len(grafs)
if ngrafs == 0 or ngrafs > 10:
eprint('ERROR: number of graphs in `grafs` variable must be between 0 and 10.')
exit(1)
# figuresize is proportional to the number of graphs
fig = _plt.figure(1,figsize=(9,len(grafs)*2))
fig.suptitle(be,fontsize=fonts[0])
axs = [_plt.subplot(ngrafs, 1, 1)]
p0, th0 = -1, -1
if _re.match('^p[ubvri]$', grafs[0]) is not None:
p0 = 0
elif _re.match('^th[ubvri]$', grafs[0]) is not None:
th0 = 0
elif grafs[0] != 'pb/pi':
eprint('# ERROR: parameter `grafs` is not valid!')
exit(1)
# Creating and sharing the axes
for i in range(1,ngrafs):
if grafs[i] == 'pb/pi':
axs += [_plt.subplot(ngrafs, 1, i+1, sharex=axs[0])]
elif _re.match('^p[ubvri]$', grafs[i]) is not None:
if p0 == -1:
axs += [_plt.subplot(ngrafs, 1, i+1, sharex=axs[0])]
p0 = i
else:
axs += [_plt.subplot(ngrafs, 1, i+1, sharex=axs[0], sharey=axs[p0])]
elif _re.match('^th[ubvri]$', grafs[i]) is not None:
if th0 == -1:
axs += [_plt.subplot(ngrafs, 1, i+1, sharex=axs[0])]
th0 = i
else:
axs += [_plt.subplot(ngrafs, 1, i+1, sharex=axs[0], sharey=axs[th0])]
else:
eprint('# ERROR: parameter `grafs` is not valid!')
exit(1)
# Fix the spacing among the subgraphs
_plt.subplots_adjust(hspace=0.08, wspace=0.06)
# Do the graphs
images, limJD, limP, limTh = plot(fig, axs)
# Setting the ranges
if limJD != []:
axs[0].set_xlim(limJD)
if p0 != -1 and limP != []:
axs[p0].set_ylim(limP)
if th0 != -1 and limTh != []:
axs[th0].set_ylim(limTh)
ndias = limJD[1]-limJD[0]
nanos = ndias/365
nmeses = nanos*12
print(limJD)
print(nanos)
print(nmeses)
print(ndias)
try:
if nanos >= 7:
axs[0] = _phc.civil_ticks(axs[0],civcfg=[int(nanos/10)+1,'y'],\
civdt=[_jdcal.jd2gcal(2400000.5,limJD[0])[0],1,1],label='%Y')
elif nanos >= 1:
axs[0] = _phc.civil_ticks(axs[0],civcfg=[int(nmeses/6),'m'],label='%Y/%m')
elif nmeses >= 8:
axs[0] = _phc.civil_ticks(axs[0],civcfg=[2,'m'],label='%Y/%m')
elif nmeses >= 4:
axs[0] = _phc.civil_ticks(axs[0],civcfg=[1,'m'],label='%Y/%m')
else:
axs[0] = _phc.civil_ticks(axs[0],civcfg=[int(ndias/4),'d'],label='%Y/%m/%d')
# elif nmeses < 1:
# print(ndias/4)
# axs[0] = _phc.civil_ticks(axs[0],civcfg=[int(ndias/4),'d'],label='%Y/%m/%d')
except:
print('# WARNING: can`t generate civil dates on x axes.')
# Unset the ticklabels
for axi in axs[:-1]:
_plt.setp(axi.get_xticklabels(), visible=False)
axi.set_xlabel('')
fig.subplots_adjust(right=0.8)
# Plot colormap
# if images != [[]]:
# cax = fig.add_axes([0.85, 0.3, 0.02, 0.5])
# cb = _plt.colorbar(images[0][0], cax=cax, orientation='vertical')
# cb.set_label('MJD')
# cb.ColorbarBase(cax, orientation='vertical', cmap=_plt.cm.gist_rainbow)
# cb.set_ticklabels(range(int(limJD[0]),int(limJD[1]),50))
if save:
if type(extens) in (list, _np.ndarray):
for exi in extens:
_plt.savefig('{0}/{1}{2}_{3}.{4}'.format(path2,star,suffix,grafs,exi), bbox_inches='tight')
else:
_plt.savefig('{0}/{1}{2}_{3}.{4}'.format(path2,star,suffix,grafs,extens), bbox_inches='tight')
else:
_plt.show(block=False)
return
#################################################
#################################################
#################################################
def graf_qu(logfile, path2=None, mode=1, thetfile=None, isp=[], odr=True, mcmc=False, \
nn=[120, 200, 600], thet_ran=[0., 180.], b_ran=[-1., 1.], Pb_ran=[0., 1.], \
Yb_ran=[-1., 1.], Vb_ran=[0., 1.], clip=True, sclip=4.5, nmax=5, \
vfilter=['no-std'], save=False, extens='pdf', limQ=None, limU=None, limJD=None):
"""
Plot a QU diagram for the Be star in the logfile .log
file (the outfile from polt.genTarget) and fit a line
if specified. Propagates error of standard star.
ODR: dot-line
MCMC: continuous line
mode=1 plot BVRI graphs in the same figure + U in another;
mode=2 plot UBVRI graphs in separated figures.
INPUT
logfile: Logfile with QU data
path2: Path to save the output graphs
mode: 1) Plot a figure to filter U and another for
BVRI filters; 2) Plot a figure for filter
thetfile: thet_int.csv file (out from fs.genInt) to
to plot the lines using the values inside.
In this case, mcmc variable don`t matters in
the running.
isp: interstellar polarization to plot direction
in QU diagram
odr: Run phc.fit_linear to fit a line?
mcmc: Run fitMCMCline to fit a line?
nn: fitMCMCline: [n_walkers, n_burnin, n_mcmc]
thet_ran: fitMCMCline: [thet_min, thet_max]
b_ran: fitMCMCline: [b_min, b_max]
Pb_ran: fitMCMCline: [Pb_min, Pb_max], with Pb_min >= 0
Yb_ran: fitMCMCline: [Yb_min, Yb_max]
Vb_ran: fitMCMCline: [Vb_min, Vb_max], with Vb_min >= 0
clip: phc.fit_linear: apply sigma clipping?
sclip: phc.fit_linear: sigma value to clip
nmax: phc.fit_linear: sigma clipping max number of
iterations
vfilter: list of flags to filter (will be marked with
a 'x' symbol and won't considered in odr
fitting). The observations with flag 'no-std'
never are shown. mcmc fitting uses the filtered
observations, except those with 'no-std' flag.
save: Save the graphs? If False, just shows
extens: Extension for the graphs
OUTPUT
1) None if odr and mcmc are False
2) When mcmc==True:
[[[param_u], [sparam_+_u], [sparam_-_u], n_u, obj_u],
...
[[param_i], [sparam_+_i], [sparam_-_i], n_i, obj_i]],
where param, sparam_+ and sparam_- are arrays with
the five parameters for MCMC fitting (thet, b, Pb,
Yb and Vb) and its errors (at right and left), n is
the number of points and obj is one graphical dummy object.
3) When odr==True AND mcmc==False:
Idem, but param, sparam_+ and sparam_- are arrays with the
two parameters for ODR fitting (a, b) and its errors.
CAUTION:
- The angle returned IS NOT the PA angle obtained from the slop,
but the own inclination angle of the fitted line! PA is this
angle divided by 2.
- PA angle is indefined by a factor of +-90 degrees
- This routine NEVER shows the data with 'no-std' tag,
independently of vfilter variable!
"""
import csv
def plotQU(filt, fig, ax, vfilter, odr_fit, mcmc_fit, limq=None, limu=None, limjd=None):
"""
Receive figure and axes objects and do the plot for filter
filt WITHOUT show or save the image.
limq=[qmin, qmax] and limu=[umin, umax] are lists for the min
and max limits for the axes. Case no specified, they are
automatically chosen.
Return [param, sparam_+, sparam_-, n, image]
param, sparam_+ and sparam_- are lists when two peaks are selected
from chi2 distribution.
"""
# Setting the color map
cm = _plt.cm.gist_rainbow
# Factor to fix the font sizes
if mode==1:
factor=0.7
else:
factor=1.
try:
lines = _np.loadtxt(logfile, dtype=str)
except:
eprint('# ERROR: Can\'t read file {0}.'.format(logfile))
exit(1)
# ax.set_title('{0} filter'.format(filt.upper()), fontsize=fonts[0]*factor, verticalalignment='bottom')
ax.text(0.98, 0.9, '{0} filter'.format(filt.upper()), horizontalalignment='right', \
verticalalignment='bottom', transform=ax.transAxes, fontsize=fonts[1]*factor)
ax.set_xlabel(r'Q (%)', size=fonts[1]*factor)
ax.set_ylabel(r'U (%)', size=fonts[1]*factor)
if type(lines[0]) != _np.ndarray and _np.size(lines) == 18:
lines = lines.reshape(-1,18)
JD, p, q, u, s, thet, sdth = [],[],[],[],[],[],[]
JD_filt, p_filt, q_filt, u_filt, s_filt, thet_filt, sdth_filt = [],[],[],[],[],[],[]
sq, su, sq_filt, su_filt = [],[],[],[]
image = []
# Getting the values of the points and filtered points
for line in lines:
if line[3][0] == filt and line[16] != 'E' and not any(sub in line[17] for sub in vfilter):
JD += [float(line[0])]
p += [float(line[8])]
q += [float(line[9])]
u += [float(line[10])]
thet += [float(line[11])]
s += [float(line[12])]
sdth += [float(line[7])]
elif line[3][0] == filt and line[16] != 'E' and 'no-std' not in line[17]:
JD_filt += [float(line[0])]
p_filt += [float(line[8])]
q_filt += [float(line[9])]
u_filt += [float(line[10])]
thet_filt += [float(line[11])]
s_filt += [float(line[12])]
sdth_filt += [float(line[7])]
# Propagate errors
lixo, sq, su = propQU(p, thet, s, sdth)
lixo, sq_filt, su_filt = propQU(p_filt, thet_filt, s_filt, sdth_filt)
# print 'original:'
# print q
# print q_filt
# Case some valid data was found
if [q, u] != [[],[]]:
# Fitting and plotting by least squares (must be found at least 2 points)
if odr_fit and len(q) > 1:
print('='*60)
print('='*6 + ' FILTER ' + filt.upper())
print('='*60)
print('')
tht, stht, param, sparam1, num = fitodr(q,u,sq,su,JD,q_filt,u_filt,sq_filt,su_filt,JD_filt,filt)
sparam2 = sparam1
delt = (max(q+q_filt)-min(q+q_filt))/8
xadj = _np.linspace(min(q+q_filt)-delt,max(q+q_filt)+delt,3)
yadj = param[0]*xadj+param[1]
ax.plot(xadj, yadj, ':', color='dimgray', linewidth=1.7*factor, label='odr')
# Fitting and plotting by MCMC
if mcmc_fit or thetfile != None:
if thetfile != None:
param, sparam1, sparam2, n = [],[],[], 0
if _os.path.exists(thetfile):
fr = open(thetfile, 'r')
csvread = csv.reader(fr, delimiter=';')#, quoting=csv.QUOTE_NONE, quotechar='')
for i, line in enumerate(csvread):
if line[0] == star:
# These variables below contain informations about the column numbers where the
# filter 'filt' begins inside thetfile file
posfilt = 2 + filters.index(filt)*4
posfilt2 = 26 + filters.index(filt)*3
param = [[float(line[posfilt])*2,float(line[posfilt2]),0,0,0]]
sparam1 = [[float(line[posfilt+1])*2,float(line[posfilt2+1]),0,0,0]]
sparam2 = [[float(line[posfilt+2])*2,float(line[posfilt2+2]),0,0,0]]
# Case there exists a second solution
# print line[41:]
if filt in line[41:]:
posfilt = 41 + line[41:].index(filt)
param = [param[0]] + [[float(line[posfilt+1])*2,float(line[posfilt+4]),0,0,0]]
sparam1 = [sparam1[0]] + [[float(line[posfilt+2])*2,float(line[posfilt+5]),0,0,0]]
sparam2 = [sparam2[0]] + [[float(line[posfilt+3])*2,float(line[posfilt+6]),0,0,0]]
if param == []:
print('# WARNING: No star named {0} found inside thetfile file. The lines won`t be plotted.'.format(be))
else:
print('# WARNING: No thetfile file found. The lines won`t be plotted.')
else:
if not odr_fit or len(q) <= 1:
print('='*60)
print('='*6 + ' FILTER ' + filt.upper())
print('='*60)
print('')
param, sparam1, sparam2 = fitmcmc(q+q_filt,u+u_filt,sq+sq_filt,su+su_filt,filt)
num=len(q+q_filt)
# print q, q_filt
# print q+q_filt
# print param
# Only plot the curve when the number of points is > 1
if len(q+q_filt) > 1:
# print 'new', param
delt = (max(q+q_filt)-min(q+q_filt))/8
xadj = _np.linspace(min(q+q_filt)-delt,max(q+q_filt)+delt,3)
for i,parami in enumerate(param):
# print parami
b0 = parami[1]/_np.cos(parami[0]*_np.pi/180)
a0 = _np.tan(parami[0]*_np.pi/180)
yadj = a0*xadj+b0
# print xadj, yadj
if i==0:
ax.plot(xadj, yadj, '-', color='dimgray', linewidth=1.7*factor, label='mcmc')
else:
ax.plot(xadj, yadj, '-.', color='dimgray', linewidth=1.7*factor, label='mcmc')
# Reshape the lists when there is just one peak selected from mcmc.
if len(param) == 1:
param = param[0]
sparam1 = sparam1[0]
sparam2 = sparam2[0]
elif not odr or len(q) == 1:
param, sparam1, sparam2 = [], [], []
num = 0
# Specify the colors according to the JD if limjd is None
if limjd == None or limjd == []:
limjd = [min(JD+JD_filt), max(JD+JD_filt)]
# Plot data
if len(q) > 0:
# image += [ax.scatter(pts[0], pts[1], marker='o', edgecolors=colors, facecolors=colors, s=50)]
if limjd[0] != limjd[1]:
col = _plt.cm.gist_rainbow([(jdi-limjd[0])/(limjd[1]-limjd[0]) for jdi in JD])
image += [ax.scatter(q, u, marker='o', c=JD, vmin=limjd[0], \
vmax=limjd[1], edgecolors='white', s=72*factor, cmap=cm)]
else:
col = _plt.cm.gist_rainbow([0.5])
lixo = ax.scatter(q, u, marker='o', c=col, \
edgecolors='white', s=72*factor, cmap=cm)
for i in range(len(q)):
ax.errorbar(q[i], u[i], xerr=sq[i], yerr=su[i], linestyle='', \
elinewidth=1.05*factor, capsize=4*factor, marker='', c=col[i], alpha=0.7)
# Plot ignored data
if len(q_filt) > 0:
# image += [ax.scatter(pts_filt[0], pts_filt[1], marker='x', edgecolors=colors_filt, facecolors=colors_filt, s=60, linewidths=2)]
# print "Entrou IF"
if limjd[0] != limjd[1]:
col = _plt.cm.gist_rainbow([(jdi-limjd[0])/(limjd[1]-limjd[0]) for jdi in JD_filt])
image += [ax.scatter(q_filt, u_filt, marker='x', c=JD_filt, \
vmin=limjd[0], vmax=limjd[1], s=72*factor, linewidths=1.4, cmap=cm)]
else:
col = _plt.cm.gist_rainbow([0.5])
lixo = ax.scatter(q_filt, u_filt, marker='x', c=col, \
s=72*factor, linewidths=1.4, cmap=cm)
for i in range(len(q_filt)):
ax.errorbar(q_filt[i], u_filt[i], xerr=sq_filt[i], yerr=su_filt[i], \
linestyle='', elinewidth=1.05*factor, capsize=4*factor, marker='', c=col[i], alpha=0.7)
# If mode==2, print the colorbar here
if mode == 2:
if image != []:
fig.subplots_adjust(right=0.8)
cax = fig.add_axes([0.85, 0.3, 0.02, 0.5])
cb = _plt.colorbar(image[0], cax=cax, orientation='vertical')
cb.set_label('MJD')
else:
param, sparam1, sparam2 = [], [], []
num = 0
image = []
# Fix limits
ax.autoscale(False)
if limq != None and limq != []:
ax.set_xlim(limq)
if limu != None and limu != []:
ax.set_ylim(limu)
ax.plot(ax.get_xlim(), [0,0], 'k--')
ax.plot([0,0], ax.get_ylim(), 'k--')
# plot the diretions of ISP
for ispi in isp:
# delt = (max(q+q_filt)-min(q+q_filt))/8
if ((2*ispi > 270 and 2*ispi <= 360) or (2*ispi< 90 and 2*ispi>= 0)):
if ax.get_xlim()[1]>0:
xisp = _np.array([0,ax.get_xlim()[1]])
else:
print('# WARNING: Angle {0} degree don`t displayed inside the graph area.'.format(ispi))
continue
elif (2*ispi > 90 and 2*ispi < 270):
if ax.get_xlim()[0]<0:
xisp = _np.array([ax.get_xlim()[0],0])
else:
print('# WARNING: Angle {0} degree don`t displayed inside the graph area.'.format(ispi))
continue
else:
print('# WARNING: This routine can`t plot the ISP on an angle of {0} degree.'.format(ispi))
continue
yisp = _np.tan(2*ispi*_np.pi/180)*xisp
ax.plot(xisp, yisp, ':', color='red', linewidth=1.7*factor)
# Setting sizes
ax.xaxis.label.set_fontsize(fonts[1]*factor)
ax.yaxis.label.set_fontsize(fonts[1]*factor)
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fonts[2]*factor)
return [param, sparam1, sparam2, num, image]
def fitodr(q,u,sq,su,JD,q_filt,u_filt,sq_filt,su_filt,JD_filt,filt):
"""
Fit ODR
"""
jd, jd_filt, pts, spts, pts_filt, spts_filt = [],[],[[],[]],[[],[]],[[],[]],[[],[]]
jd_filt = JD_filt[:]
pts_filt[0] = q_filt[:]
pts_filt[1] = u_filt[:]
spts_filt[0] = sq_filt[:]
spts_filt[1] = su_filt[:]
# Fit the simple least squares (only considering y errors) to find
# initial parameters to the next adjust
param0, cov = _curve_fit(lambda x,a,b: a*x + b, q, u, sigma=su)
sparam0 = [_np.sqrt(cov[0][0]), _np.sqrt(cov[1][1])]
tht0 = _np.arctan(param0[0])*180/_np.pi
stht0 = (180*sparam0[0])/(_np.pi*(param0[0]**2+1))
# Fit by the total least squares method (orthogonal distance regression) with clipping
param, sparam, cov, chi2, niter,bolfilt = _phc.fit_linear(q, u, sq, su, param0=param0,
clip=clip, sclip=sclip, nmax=nmax)
tht = _np.arctan(param[0])*180/_np.pi
stht = (180*sparam[0])/(_np.pi*(param[0]**2+1))
# Splitting the data into the selected data and the filtered by the clipping
for i in range(len(q)):
if bolfilt[i] == 1:
jd += [JD[i]]
pts[0] += [q[i]]
pts[1] += [u[i]]
spts[0] += [sq[i]]
spts[1] += [su[i]]
else:
jd_filt += [JD[i]]
pts_filt[0] += [q[i]]
pts_filt[1] += [u[i]]
spts_filt[0] += [sq[i]]
spts_filt[1] += [su[i]]
# Calculate the reduced chi-squared
if len(pts[0]) > 2:
rchi2 = chi2[0]/(len(pts[0])-2)
else:
rchi2 = 0
# Print informations only if mcmc==False (to prevent to print twice)
if not mcmc:
print(55*'-')
print(' Total least squares fit (y = a*x+b):')
print(55*'-')
print(' a = {0:.3f} +- {1:.3f}'.format(param[0], sparam[0]))
print(' b = {0:.3f} +- {1:.3f}'.format(param[1], sparam[1]))
print(' theta = {0:.2f} +- {1:.2f} (+- n*90)'.format(tht, stht))
print(' N = {0:d}'.format(len(pts[0])))
print('')
print(' red chi^2 = {0:2f}'.format(rchi2))
print(55*'-')
print('')
return tht, stht, param, sparam, len(pts[0])
def fitmcmc(q, u, sq, su, filt):
"""
Fit MCMC
The returned variable are "lists"
"""
dictvar = ['theta','b', 'P_b', 'Y_b', 'V_b']
ranges = [thet_ran, b_ran, Pb_ran, Yb_ran, Vb_ran]
opt2 = ''
while True:
thet_mcmc, b_mcmc, Pb_mcmc, Yb_mcmc, Vb_mcmc, fig1, fig2 = fitMCMCline(q, u, sq, su, \
star=star+'_'+filt, plot_adj=True, margin=True, n_burnin=nn[1], \
n_mcmc=nn[2], n_walkers=nn[0], thet_ran=ranges[0], \
b_ran=ranges[1], Pb_ran=ranges[2], Yb_ran=ranges[3], \
Vb_ran=ranges[4], extens=extens)
if opt2 in ('y','Y'):
param = [param[0]] + [[thet_mcmc[0], b_mcmc[0], Pb_mcmc[0], Yb_mcmc[0], Vb_mcmc[0]]]
sparam1 = [sparam1[0]] + [[thet_mcmc[1], b_mcmc[1], Pb_mcmc[1], Yb_mcmc[1], Vb_mcmc[1]]]
sparam2 = [sparam2[0]] + [[thet_mcmc[2], b_mcmc[2], Pb_mcmc[2], Yb_mcmc[2], Vb_mcmc[2]]]
else:
param = [[thet_mcmc[0], b_mcmc[0], Pb_mcmc[0], Yb_mcmc[0], Vb_mcmc[0]]]
sparam1 = [[thet_mcmc[1], b_mcmc[1], Pb_mcmc[1], Yb_mcmc[1], Vb_mcmc[1]]]
sparam2 = [[thet_mcmc[2], b_mcmc[2], Pb_mcmc[2], Yb_mcmc[2], Vb_mcmc[2]]]
# NEW: Requests if the user want to use another interval
opt = ''
while opt not in ('y','Y','n','N'):
print('Do you want to prune the limits and run again MCMC?')
opt = input('(y/n): ')
if opt in ('y','Y'):
while True:
ranges = [[],[],[],[],[]]
print('')
for i, var in enumerate(dictvar):
while True:
try:
etr = input('{0}: specify in format `{0}_min,{0}_max`: '.format(var))
# p_int = [float(ei)-params_fit[0] for ei in petr.split(',')]
ranges[i] = [float(ei) for ei in etr.split(',')]
if len(ranges[i]) == 2:
if ranges[i][1] > ranges[i][0]:
break
else:
print('Error: {0}_max must be greather than {0}_min!'.format(var))
else:
print('Invalid input!')
except:
print('Invalid input!')
opt = ''
while opt not in ('y','Y','n','N'):
print('\nIs it correct?')
for i, var in enumerate(dictvar):
print(' {0}_min,{0}_max: {1},{2}'.format(var, ranges[i][0], ranges[i][1]))
opt = input('(y/n): ' )
if opt in ('y','Y'):
_plt.close(fig1)
_plt.close(fig2)
break
else:
# To precent a 'third' peak
if opt2 in ('y','Y'):
opt2 = 'N'
while opt2 not in ('y','Y','n','N'):
print('Do you want select a second peak?')
opt2 = input('(y/n): ')
_plt.close(fig1)
_plt.close(fig2)
if opt2 in ('n','N'):
break
return param, sparam1, sparam2
def fixLimits():
"""
Found the min and max Q, U and JD values to set the limits of plot
(excluding the observations for U filter).
Return 3 lists: [qmin, qmax], [umin, umax], [JDmin, JDmax]
Return [],[],[] if there is none observations.
"""
try:
lines = _np.loadtxt(logfile, dtype=str)
except:
eprint('# ERROR: Can\'t read file {0}.'.format(logfile))
exit(1)
if type(lines[0]) != _np.ndarray and _np.size(lines) == 18:
lines = lines.reshape(-1,18)
q = [float(line[9]) for line in lines if line[3][0] != 'u' and line[16] != 'E' and 'no-std' not in line[17]]
# not any(sub in line[17] for sub in vfilter)]
u = [float(line[10]) for line in lines if line[3][0] != 'u' and line[16] != 'E' and 'no-std' not in line[17]]
# not any(sub in line[17] for sub in vfilter)]
JD = [float(line[0]) for line in lines if line[3][0] != 'u' and line[16] != 'E' and 'no-std' not in line[17]]
# not any(sub in line[17] for sub in vfilter)]
if q==[]:
return [],[],[]
# A scaled value to shift
deltq = (max(q)-min(q))/8
deltu = (max(u)-min(u))/8
return [min(q)-deltq, max(q)+deltq], [min(u)-deltu, max(u)+deltu], [min(JD), max(JD)]
if mcmc and type(extens) in [list, _np.ndarray]:
eprint('ERROR: `extens` parameter CAN`T be a list type if the `mcmc` parameter is setted as True.')
exit(1)
_plt.close('all')
nome = _phc.trimpathname(logfile)[1].split('.')[0].split('_')
star = nome[0]
if len(nome) > 1:
suffix = '_'+nome[1]
else:
suffix = ''
if star in _phc.bes:
be = _phc.bes[star]
else:
be = star
arr, images = [],[]
if path2 == None or path2 == '.':
path2 = _os.getcwd()
# Verify if vfilter is a special filter
if vfilter in vfil.keys():
vfilter = vfil[vfilter]
elif type(vfilter) != list:
vfilter = []
######
## 1) Mode 1 plots QU diagram for BVRI filters in the same image and for U in another
if mode==1:
### 1.1 Do the graph for U filter
fig = _plt.figure()
fig.suptitle(be,fontsize=fonts[0])
ax = _plt.subplot(1, 1, 1)
arr += [plotQU('u', fig, ax, vfilter, odr, mcmc)]
# _plt.close(fig_aux)
if save:
if type(extens) in (list, _np.ndarray):
for exi in extens:
fig.savefig('{0}/{1}_qu_u{2}.{3}'.format(path2,star,suffix,exi), bbox_inches='tight')
else:
fig.savefig('{0}/{1}_qu_u{2}.{3}'.format(path2,star,suffix,extens), bbox_inches='tight')
# _plt.close(fig)
else:
fig.show()
if odr:
print('\n')
# Generate the four axes (sorted as BVRI)
fig = _plt.figure()
axs = [_plt.subplot(2, 2, 1)]
axs += [_plt.subplot(2, 2, 2, sharey=axs[0])]
axs += [_plt.subplot(2, 2, 3, sharex=axs[0])]
axs += [_plt.subplot(2, 2, 4, sharex=axs[1], sharey=axs[2])]
for ax in axs:
# ax.locator_params(axis='x', nbins=6)
xloc = _plt.MaxNLocator(6)
ax.xaxis.set_major_locator(xloc)
# Fix the spacing among the subgraphs and set the QU limits
_plt.subplots_adjust(hspace=0.05, wspace=0.05)
limq, limu, limjd = fixLimits()
if limQ != None: limq=limQ
if limU != None: limu=limU
if limJD != None: limjd=limJD
### 1.2 Do the graphs fo BVRI
nax = 0
for filt in ('b','v','r','i'):
arr += [plotQU(filt, fig, axs[nax], vfilter, odr, mcmc, limq=limq, limu=limu, limjd=limjd)]
nax += 1
if arr[-1][-1] != []:
images += [arr[-1][-1]]
if odr:
print('\n')
# Unset the ticklabels
_plt.setp(axs[0].get_xticklabels(), visible=False)
_plt.setp(axs[1].get_xticklabels(), visible=False)
_plt.setp(axs[1].get_yticklabels(), visible=False)
_plt.setp(axs[3].get_yticklabels(), visible=False)
axs[0].set_xlabel('')
axs[1].set_xlabel('')
axs[1].set_ylabel('')
axs[3].set_ylabel('')
fig.subplots_adjust(right=0.8)
# Plot colormap
if images != []:
cax = fig.add_axes([0.85, 0.3, 0.02, 0.5])
cb = _plt.colorbar(images[0][0], cax=cax, orientation='vertical')
cb.set_label('MJD')
# cb.ColorbarBase(cax, orientation='vertical', cmap=_plt.cm.gist_rainbow)
# cb.set_ticklabels(range(int(limjd[0]),int(limjd[1]),50))
if save:
if type(extens) in (list, _np.ndarray):
for exi in extens:
fig.savefig('{0}/{1}_qu{2}.{3}'.format(path2,star,suffix,exi), bbox_inches='tight')
else:
fig.savefig('{0}/{1}_qu{2}.{3}'.format(path2,star,suffix,extens), bbox_inches='tight')
# _plt.close(fig)
else:
fig.show()
######
## 2) Mode 2 plots QU diagram for UBVRI filters in different images
elif mode==2:
for filt in ('u','b','v','r','i'):
fig = _plt.figure()
ax = _plt.subplot(1, 1, 1)
arr += [plotQU(filt, fig, ax, vfilter, odr, mcmc, limq=limQ, limu=limU, limjd=limJD)]
# _plt.close(fig_aux)
if save:
if type(extens) in (list, _np.ndarray):
for exi in extens:
fig.savefig('{0}/{1}_qu_{2}{3}.{4}'.format(path2,star,filt,suffix,exi), bbox_inches='tight')
else:
fig.savefig('{0}/{1}_qu_{2}{3}.{4}'.format(path2,star,filt,suffix,extens), bbox_inches='tight')
# _plt.close(fig)
else:
_plt.show()
if odr or mcmc or thetfile != None:
return arr
else:
return
def sintLeff(ccdn='ixon', step=5., save=True, extens='pdf'):
"""
Sintetizes the response curve, considering the
CCD Quantum Efficience (QE) and filter transmitance
from OPD and the stellar models from Pickles (1998).
Interpolations are made by using cubic splines.
This code DON'T use none curve for sky transmitance!
Creates two data files:
leff_stars_[ccdn].dat : table with l_eff calculated
for each star type
leff_[ccdn].dat : table with the parameters for the
adjusted cubic function for l_eff(b-v)
(or l_eff(u-b) to the filter U). The
M-type stars were excluded from the
adjusts, because the molecular lines
were affecting these curves.
'ccdn': CCD to use in QE curve
ixon: CON, Frame Transfer
ikon: High Sensitive, Frame Transfer
ikon-14912: High Sensitive, Frame Transfer
301: not avaible
New eventual CCD files must sample the QE, at least,
inside the range [2800,11000] Angstrom.
'step': step, in angstrom, used for the integration
(Simpson's method) to calculate lambda_eff.
Allowed values are 5, 10, 15, ..., 500.
FORMULAS:
The lbd_eff is computed as **the flux-weighted mean
wavelenght in terms of photons**:
lbd_eff = \int(lbd*phi(lbd) * d_lbd) / \int(phi(lbd) * d_lbd)
where phi(lbd) = lbd * F_lbd * QE(lbd) * T(lbd)
F_lbd: stellar flux in erg cm-2 s-1 A-1
QE(lbd): curve for quantum efficience
T(lbd): curve for filter transmitance
d_lbd: infinitesimal element of wavelength
"""
# List the files for standard stars models
stars = _glob('{0}/stars/uk*.dat'.format(_hdtpath()))
# Open file with informations about the standard stars models
dstars = _np.loadtxt('{0}/stars/synphot.dat'.format(_hdtpath()),usecols=[4,6,7], dtype=str)
lbds = _np.arange(2800.,11000.001,step)
# Open file with CCD Quantum Efficience (QE)
try:
fqe = _np.loadtxt('{0}/refs/QE_{1}.dat'.format(_hdtpath(),ccdn),skiprows=1, dtype=float, unpack=True) # unpack is to get the transposed array
except:
eprint('ERROR: CCD name \'{0}\' not identified!'.format(ccdn))
exit(1)
if step not in range(5, 505, 5):
eprint('ERROR: step value not valid! Put some value among 5, 10, 15, ..., 500')
exit(1)
# Interpolate QE
qe = _interp1d(fqe[0], fqe[1], kind='cubic')
# Delete the old .dat files
if _os.path.exists('leff_stars_{0}.dat'.format(ccdn)):
_os.unlink('leff_stars_{0}.dat'.format(ccdn))
if _os.path.exists('leff_{0}.dat'.format(ccdn)):
_os.unlink('leff_{0}.dat'.format(ccdn))
with open('leff_stars_{0}.dat'.format(ccdn), 'w') as f0:
f0.write('{0:5s} {1:>6s} {2:>7s} {3:>7s} {4:>10s}\n'.format('#filt','stype','u-b','b-v','leff'))
with open('leff_{0}.dat'.format(ccdn), 'w') as f0:
f0.write('# For U filter: leff = l0 + k1*(u-b) + k2*(u-b)^2 + k3*(u-b)^3\n')
f0.write('# For BVRI filters: leff = l0 + k1*(b-v) + k2*(b-v)^2 + k3*(b-v)^3\n#\n')
f0.write(('{0:5s} {1:>8s} {2:>10s} {3:>8s} {4:>8s} {5:>7s} {6:>7s} {7:>7s}' +\
'{8:>7s} {9:>7s}\n').format('#filt','adj_col','l0','k1','k2','k3',\
'sl0','sk1','sk2','sk3'))
for filt in filters:
# Open file with Filter Transmitance
ftr = _np.loadtxt('{0}/filters/T{1}_POL.dat'.format(_hdtpath(),filt.upper()),skiprows=1)
# Interpolate Filter Transmitance
if filt=='u':
tr = _interp1d(_np.arange(2800., 11000.001, 50.), ftr, kind='cubic')
else:
tr = _interp1d(_np.arange(2800., 11000.001, 100.), ftr, kind='cubic')
for star in stars:
# Open file with flux for star model (genfromtxt allows skip lines in both header and footer)
fspec = _np.genfromtxt(star, usecols=[0,1], unpack=True, skip_header=330, skip_footer=2800)
# Mount the array for star spectrum
spec = _np.array([], dtype=float)
for i in range(len(fspec[0])):
if fspec[0][i] in lbds:
spec = _np.append(spec, [fspec[1][i]])
# Interpolate the star spectrum
# spec = _interp1d(fspec[0], fspec[1], kind='cubic')
stype = star.split('/')[-1][2:-4].upper()
# read the color index
for di in dstars:
if di[0] == stype:
bv = float(di[2])
ub = float(di[1])-float(di[2])
break
# Convolute all the curves in the response function
resp = _np.array([], dtype=float)
for i in range(len(lbds)):
resp = _np.append(resp, [lbds[i]*spec[i]*tr(lbds[i])*qe(lbds[i])])
# Integrate reponse*lambda and response to compute lambda_eff
l_on = _simps(lbds*lbds*resp, lbds)
l_under = _simps(lbds*resp, lbds)
leff = l_on/l_under
line = '{0:5s} {1:>6s} {2:>7.3f} {3:>7.3f} {4:>10.3f}'.format(filt,stype,ub,bv,leff)
print(line)
with open('leff_stars_{0}.dat'.format(ccdn), 'a') as f0:
f0.write(line+'\n')
if False:
_plt.figure()
ax = _plt.axes()
_plt.xlabel(r'$\lambda\ (\AA)$', size=fonts[1])
_plt.ylabel(r'Curves', size=fonts[1])
_plt.plot(lbds,resp/max(resp), '-', c='black', label='Combined')
_plt.plot(lbds,spec/max(spec), 'r-.', label='{0} spec'.format(stype))
tr2 = [tr(lbd) for lbd in lbds]
_plt.plot(lbds,tr2/max(tr2), 'g--', label='Transm')
qe2 = [qe(lbd) for lbd in lbds]
_plt.plot(lbds,qe2/max(qe2), 'b--', label='QE')
_plt.autoscale(False)
_plt.ylim([-0.1,1.1])
_plt.plot([leff,leff], [-0.1,1.1], 'k--', label=r'$\lambda_{eff}$')
_plt.legend(loc='best', prop={'size':fonts[3]})
# Setting sizes
ax.xaxis.label.set_fontsize(fonts[1])
ax.yaxis.label.set_fontsize(fonts[1])
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fonts[2])
_plt.savefig('{0}_{1}_{2}.{3}'.format(stype, ccdn, filt, extens), bbox_inches='tight')
# Once concluded, we need compute lambda_eff as function of u-b and b-v
print('\n\n\n# GENERAL FITTING\n')
leffs = _np.loadtxt('leff_stars_{0}.dat'.format(ccdn), dtype=str, unpack=True)
for filt in filters:
ub, bv, leff = _np.array([], dtype=float), _np.array([], dtype=float), _np.array([], dtype=float)
for i in range(len(leffs[0])):
# Excluding 'M' type because it is problematic, due to the molecular lines
if leffs[0][i] == filt and leffs[1][i][0] != 'M':
ub = _np.append(ub, [float(leffs[2][i])])
bv = _np.append(bv, [float(leffs[3][i])])
leff = _np.append(leff, [float(leffs[4][i])])
_plt.figure()
ax = _plt.axes()
_plt.xlabel(r'Color', size=fonts[1])
_plt.ylabel(r'$\lambda_{eff}\ (\AA)$', size=fonts[1])
_plt.plot(bv, leff, 'o', c='grey', label='B-V')
_plt.plot(ub, leff, 's', c='blue', label='U-B')
if filt == 'u':
color = ub
colorstr = 'U-B'
else:
color = bv
colorstr = 'B-V'
# Fit the lambdas/colors
param, cov = _curve_fit(lambda x,l0,k1,k2,k3: l0 + k1*x + k2*(x**2) + k3*(x**3), color, leff)
sparam = _np.array([_np.sqrt(cov[0][0]), _np.sqrt(cov[1][1]),_np.sqrt(cov[2][2]), _np.sqrt(cov[3][3])])
x = _np.linspace(-1,2,100)
y = param[0] + param[1]*x + param[2]*(x**2) + param[3]*(x**3)
_plt.plot(x, y, '--', c='black', label='{0} fit'.format(colorstr))
_plt.legend(loc='best', prop={'size':fonts[3]})
# Setting sizes
ax.xaxis.label.set_fontsize(fonts[1])
ax.yaxis.label.set_fontsize(fonts[1])
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fonts[2])
line = ('{0:5s} {1:>8s} {2:>10.2f} {3:>8.2f} {4:>8.2f} {5:>7.2f} {6:>7.2f} {7:>7.2f}' +\
'{8:>7.2f} {9:>7.2f}').format(filt,colorstr,param[0],param[1],param[2],param[3],\
sparam[0],sparam[1],sparam[2],sparam[3])
print(line)
with open('leff_{0}.dat'.format(ccdn), 'a') as f0:
f0.write(line+'\n')
if save:
_plt.savefig('leff_{0}_{1}.{2}'.format(ccdn, filt, extens), bbox_inches='tight')
else:
_plt.show()
return
def lbds(color, filt, ccdn, airmass=1.3, skiperror=False):
"""
Return the lambda_eff in angstrom for star with
color index 'color', in filter 'filt', CCD 'ccdn'
and airmass 'airmass'. The default airmass is 1.3
for an average value.
If skiperror==True, tries to use lambda_eff as the value
from phc.lbds[] in case of missing information for
'filt' or 'ccd'.
CAUTION:
If filt=='u', the color must be U-B
Otherwise, the color must be B-V
FORMULAS:
- Atm. reddening: redn = 2.5*_np.log10(_np.e)*
*airmass*(taub-tauv)
- Redd. color: color_av = color + redn
- lambda_eff: l_eff = l0 + k1*color_av +
+ k2*(color_av**2) +
+ k3*(color_av**3)
where tauu, taub, tau are the optical deepth of
atmosphere (values used from Kepler de Oliveira
et al, Astronomia e Astrofisica).
"""
data = _np.loadtxt('{0}/filters/leff.dat'.format(_hdtpath()), dtype=str)
# Optical deepth according to Kepler de Oliveira et al (Astronomia
# e Astrofisica), for altitude above 2000m
tauu=1.36
taub=0.52
tauv=0.37
if filt=='u':
redn = 2.5*_np.log10(_np.e)*airmass*(tauu-taub)
else:
redn = 2.5*_np.log10(_np.e)*airmass*(taub-tauv)
l0=0
for line in data:
if line[0] == filt and line[2] == ccdn:
l0 = float(line[3])
k1 = float(line[4])
k2 = float(line[5])
k3 = float(line[6])
break
if l0==0:
if skiperror:
leff = _phc.lbds[filt]
else:
eprint('# ERROR: parameters to calculate lambda_eff in filter {0} and CCD {1} not found.'.format(filt,ccdn))
exit(1)
else:
color = color + redn
leff = l0 + k1*color + k2*(color**2) + k3*(color**3)
return leff
#################################################
#################################################
#################################################
### MAIN ###
if __name__ == "__main__":
pass
def fitMCMCline(x, y, sx, sy, star='', margin=False, plot_adj=True, fig=None, ax=None, \
n_burnin=350, n_mcmc=600, \
n_walkers=120, thet_ran=[0., 180.], \
b_ran=[-1., 1.], Pb_ran=[0., 1.], \
Yb_ran=[-1., 1.], Vb_ran=[0., 1.], extens='pdf'):
"""
Fit a line using Markov Chain Monte Carlo for data
with both x and y errors and with bad points
from emcee code.
The model is tha sum of two models:
a) a line model for the good points, y = ax + b, with
data displaced ortogonally by a gaussian diplacentment
(covariance is suposed null!);
b) a gaussian distribution orthogonal to the line for
the bad points with amplitude, mean and variance equal
to Pb, Yb and Vb (see Hoog, Bovy and Lang,
``Data analysis recipes: Fitting a model to data'')
The MCMC does a ``mixture'' among both model for each
point. So, it is not needed know about the bad and
good points necessairly! The five parameters to be
found are theta=arctan(a), b, Pb, Yb and Vb.
INPUT:
x/y/sx/sy: array/list with the data
star: star name to be printed in the graph and
its filename. If it's a void str '', this
routine give a random number to prevent
overwriting data.
margin: marginalize the corner graphs over Pb,
Yb, Vb (generating the graph only for
theta and b)?
plot_adj: show a plot of data+fit?
fig: Figure to append the plots for data+fit.
If None, a new figure is generated.
ax: Axes, as like fig above.
n_burnin: number of iterations for burning-in
n_mcmc: number of iterations to run emcee
n_walkers: number of walkers to map the posterior
probabilities.
thet_ran: [thet_min, thet_max]
b_ran: [b_min, b_max]
Pb_ran: [Pb_min, Pb_max], with Pb_min >= 0
Yb_ran: [Yb_min, Yb_max]
Vb_ran: [Vb_min, Vb_max], with Vb_min >= 0
extens: extension for the graph file
OUTPUT:
theta_fit: [theta, theta_+, theta_-], the theta value
and its errors (at right and left from it).
theta is the median of distribution
probability and theta_+, theta_- are the
range within which there are 68.3% of the
points in such distribution.
b*cos(theta): Idem, for b*cos(theta).
Pb: Idem, for Pb.
Yb: Idem, for Yb.
Vb: Idem, for Vb.
fig1: Figure pointer to the corner graph ([]
if show==False).
fig2: Figure pointer to the data+fit graph ([]
if plot_adj==False).
FORMULAS:
Supposing i as each data point, the log of Likelihood
function (L) takes form:
log(L) = sum(log(p_good_i+p_bad_i))
with
p_good_i = (1-Pb)/sqrt(2*pi*var_i)*
exp(-0.5*(disp_i**2/var_i)),
p_bad_i = Pb/sqrt(2*pi*(Vb+var)*
exp(-0.5*(disp_i-Yb)**2)/(Vb+var_i))
where disp_i is the total projection of the (x_i,y_i)
values over the line and var_i, the projected variance:
disp_i = v*Z_i -b cos(thet)
var_i = v*S_i*v
with
v = (-sin(theta), cos(theta)) (versor orthogonal
to the line)
Z_i = (x_i, y_i) (data point)
S_i = | sx_i^2 sxy_i^2| = |sx_i^2 0 | = (covariance
|syx_i^2 sy_i^2| | 0 sy_i^2| matrix)
"""
import emcee
import triangle.nov
from matplotlib.ticker import MaxNLocator
def lnprob(params, xx, yy, sxx, syy):
"""
Return the log of posterior probability (p_pos) in
bayesian statistics for the parameters 'params' and the
data poits xx, yy, sxx and syy.
p_pos = L*p_prior (unless by a normalization constant),
where L is the likelihood function and p_prior is the
prior probability function.
a) Likelihood
In our case, for gaussian and independent uncertaities,
in both x and y axes and with bad points:
log(L) = sum(log(p_good_i+p_bad_i))
with
p_good_i = (1-Pb)/sqrt(2*pi*var_i)*exp(-0.5*(disp_i**2/var_i)),
p_bad_i = Pb/sqrt(2*pi*(Vb+var)*exp(-0.5*(disp_i-Yb)**2)/(Vb+var_i))
where disp_i is the total projection of the (x_i,y_i) values
over the line and var_i, the projected variance; Pb, Yb, Vb are
the gaussian model for the bad points - the amplitude, mean and
variance (see Hoog, Bovy and Lang, ``Data analysis recipes:
Fitting a model to data'')
Taking the model for the line y = ax + b, where a = tan(thet),
let
v = (-sin(thet), cos(thet)) (versor orthogonal to the line)
Z_i = (x_i, y_i)
S_i = | sx_i^2 sxy_i^2| (covariance matrix)
|syx_i^2 sy_i^2|
The formulas for disp_i and var_i are:
disp_i = v*Z_i -b cos(thet)
var_i = v*S_i*v
b) p_prior
Now, p_prior = constant for 'params' values inside the
range defined by 'intervalos'; otherwise, it is 0.
That is the only determination that we can do.
So, p_pos = log(L) or -inf case 'params' are out from
the allowed range.
"""
thet, b, Pb, Yb, Vb = params
b0 = b/_np.cos(thet*_np.pi/180)
# Set prior ln prob
lnprior = 0
for i, interv in enumerate(intervalos):
if params[i] < interv[0] or params[i] > interv[1]:
lnprior = -_np.inf
# Return posterior prob
if not _np.isfinite(lnprior):
return -_np.inf
else:
sin = _np.sin(thet*_np.pi/180)
cos = _np.cos(thet*_np.pi/180)
# cov = sin*cos*(b0**2)
disp = -x*sin + y*cos - b
# var = (1-cov)*(sin*sxx)**2 + (1-1/cov)*(cos*syy)**2
# Projected variance WITHOUT covariance terms
var = (sin*sxx)**2 + (cos*syy)**2
prob_good = (1-Pb)/(_np.sqrt(2*_np.pi*var))*_np.exp(-0.5*(disp**2/var))
prob_bad = Pb/_np.sqrt(2*_np.pi*(Vb+var))*_np.exp(-0.5*((disp-Yb)**2)/(Vb+var))
# print '-'*40
# print prob_good, prob_bad, disp, var#, cov
for i in range(len(prob_good)):
if prob_good[i]+prob_bad[i] <= 0:
return -_np.inf
return lnprior + sum(_np.log(prob_good + prob_bad))
def run_emcee(sampler, p0):
"""
Run emcee.
p0 is the initial positions for the walkers
"""
print("Burning-in ...")
pos, prob, state = sampler.run_mcmc(p0, n_burnin)
sampler.reset()
print("Running MCMC ...")
pos, prob, state = sampler.run_mcmc(pos, n_mcmc, rstate0=state)
#~ Print out the mean acceptance fraction.
af = sampler.acceptance_fraction
print("Mean acceptance fraction:", _np.mean(af))
# Compute the results using all interval
fig1, thet_mcmc, b_mcmc, Pb_mcmc, Yb_mcmc, Vb_mcmc = gen_results(sampler, intervalos, save=True, show=True)
# Plot convergence map
plot_conv(sampler, [thet_mcmc[0], b_mcmc[0], Pb_mcmc[0], Yb_mcmc[0], Vb_mcmc[0]])
if plot_adj:
ax2.errorbar(x,y,xerr=sx,yerr=sy, linestyle='', marker='o')
if len(x) > 1:
xadj = _np.linspace(min(x),max(x),2)
yadj = podr[0]*xadj+podr[1]
ax2.plot(xadj, yadj, '-.', color='dimgray', label='odr')
xadj = _np.linspace(min(x),max(x),2)
b0 = b_mcmc[0]/_np.cos(thet_mcmc[0]*_np.pi/180)
a0 = _np.tan(thet_mcmc[0]*_np.pi/180)
yadj = a0*xadj+b0
ax2.plot(xadj, yadj, '-',color='dimgray', label='mcmc')
ax2.legend(loc='best')
fig2.show()
# NEW: Requests if the user want to use some specific interval
opt = ''
while opt not in ('y','Y','n','N'):
print('Do you want to select specific ranges to compute the values?')
opt = input('(y/n): ')
if opt in ('y','Y'):
while True:
ranges = [[],[],[],[],[]]
print('')
for i, var in enumerate(dictvar[0]):
while True:
try:
etr = input('{0}: specify in format `{0}_min,{0}_max`: '.format(var))
# p_int = [float(ei)-params_fit[0] for ei in petr.split(',')]
ranges[i] = [float(ei) for ei in etr.split(',')]
if len(ranges[i]) == 2:
if ranges[i][1] > ranges[i][0]:
break
else:
print('Error: {0}_max must be greather than {0}_min!'.format(var))
else:
print('Invalid input!')
except:
print('Invalid input!')
opt = ''
while opt not in ('y','Y','n','N'):
print('\nIs it correct?')
for i, var in enumerate(dictvar[0]):
print(' {0}_min,{0}_max: {1},{2}'.format(var, ranges[i][0], ranges[i][1]))
opt = input('(y/n): ' )
if opt in ('y','Y'):
print('')
break
_plt.close(fig1)
fig1, thet_mcmc, b_mcmc, Pb_mcmc, Yb_mcmc, Vb_mcmc = gen_results(sampler, ranges, save=True, show=True)
# else:
# _plt.close(fig2)
return fig1, thet_mcmc, b_mcmc, Pb_mcmc, Yb_mcmc, Vb_mcmc, opt
def gen_results(sampler, ranges, show=True, save=True):
# Read the sampler
samples = sampler.chain[:, :, :].reshape((-1, ndim))
samples_new = _np.empty(shape=[0, ndim])
# Filtering 'samples' array
print('Please wait, computing values from samples...')
new=False
for i, rang in enumerate(ranges):
if intervalos[i][0] != rang[0] or intervalos[i][1] != rang[1]:
new = True
break
if new:
for elem in samples:
ver = True
for i in range(ndim):
if elem[i] > ranges[i][1] or elem[i] < ranges[i][0]:
ver = False
break
if ver:
samples_new = _np.vstack([samples_new, elem])
else:
samples_new = samples
# Ploting corner graph
fig1 = triangle.nov.corner(samples_new, title=star, \
# truths=[p_mcmc[0], l_mcmc[0]], \
# extents=[(p_range[0],l_range[0]),(p_range[1],l_range[1])], \
quantiles=[0.16075, 0.50, 0.83925], \
labels=dictvar[1], \
verbose=False)
if save:
fig1.savefig('{0}_correl.{1}'.format(star,extens))
if show:
fig1.show()
else:
fig1 = []
if margin:
# Ploting corner graph
fig3 = triangle.nov.corner(samples_new[:,0:2], title=star, \
# truths=[p_mcmc[0], l_mcmc[0]], \
# extents=[(p_range[0],l_range[0]),(p_range[1],l_range[1])], \
quantiles=[0.16075, 0.50, 0.83925], \
labels=dictvar[1], \
verbose=False)
fig3.savefig('{0}_correl_m.{1}'.format(star,extens))
# Computing the medians and errors according to the range from median
# inside which there are 68.3% of the data
thet_mcmc, b_mcmc, Pb_mcmc, Yb_mcmc, Vb_mcmc = list((lambda v: (v[1], v[2]-v[1], v[1]-v[0])),
zip(*_np.percentile(samples_new, [16.075, 50, 83.925], axis=0)))
#~ Print the output
""" TBD """
print('')
print(55*'-')
print(' 2) MCMC best values (y = tan(theta)*x + b*cos(theta)):')
print(55*'-')
print(' theta = {0:9.4f} +{1:.4f} -{2:.4f}'.format(thet_mcmc[0],thet_mcmc[1],thet_mcmc[2]))
print(' b*cos(theta) = {0:9.4f} +{1:.4f} -{2:.4f}'.format(b_mcmc[0],b_mcmc[1],b_mcmc[2]))
print(' Pb = {0:9.4f} +{1:.4f} -{2:.4f}'.format(Pb_mcmc[0],Pb_mcmc[1],Pb_mcmc[2]))
print(' Yb = {0:9.4f} +{1:.4f} -{2:.4f}'.format(Yb_mcmc[0],Yb_mcmc[1],Yb_mcmc[2]))
print(' Vb = {0:9.4f} +{1:.4f} -{2:.4f}'.format(Vb_mcmc[0],Vb_mcmc[1],Vb_mcmc[2]))
# print 'reduced chi2 = {0:.4f}'.format(chi)
print(55*'-')
print('')
return fig1, thet_mcmc, b_mcmc, Pb_mcmc, Yb_mcmc, Vb_mcmc
def plot_conv(sampler, param):
"""
Plot convergence map. 'param' are the values to be highlighted
"""
fig4, axes = _plt.subplots(ndim, 1, sharex=True, figsize=(8, 15))
for i in range(ndim):
axes[i].plot(sampler.chain[:, :, i].T, color="k", alpha=0.4)
axes[i].yaxis.set_major_locator(MaxNLocator(5))
axes[i].axhline(param[i], color="#888888", lw=2)
axes[i].set_ylabel(dictvar[1][i])
axes[4].set_xlabel("Step number")
fig4.tight_layout(h_pad=0.0)
fig4.savefig('{0}_conv.{1}'.format(star,extens))
return
def fitodr():
"""
Fit by Least Squares
"""
# Fit the simple least squares (only considering y errors) to find
# initial parameters to the next adjust
param0, cov = _curve_fit(lambda t,a,b: a*t + b, x, y, sigma=sy)
# sparam0 = [_np.sqrt(cov[0][0]), _np.sqrt(cov[1][1])]
# tht0 = _np.arctan(param0[0])*180/_np.pi
# stht0 = (180*sparam0[0])/(_np.pi*(param0[0]**2+1))
# Fit by the total least squares method (orthogonal distance regression) with clipping
param, sparam, cov, chi2, niter,bolfilt = _phc.fit_linear(x, y, sx, sy, param0=param0,
clip=False)
tht = _np.arctan(param[0])*180/_np.pi
if tht < 0:
tht += 180.
elif tht >= 180:
tht -= 180
stht = (180*sparam[0])/(_np.pi*(param[0]**2+1))
nn = sum(bolfilt)
# Calculate the reduced chi-squared
if nn > 2:
rchi2 = chi2[0]/(nn-2)
else:
rchi2 = 0
# Print informations
print(55*'-')
print(' 1) Total least squares fit (y = a*x+b):')
print(55*'-')
print(' a = {0:.3f} +- {1:.3f}'.format(param[0], sparam[0]))
print(' b = {0:.3f} +- {1:.3f}'.format(param[1], sparam[1]))
print(' theta = {0:.2f} +- {1:.2f}'.format(tht, stht))
print(' N = {0:d}'.format(nn))
print('')
print(' red chi^2 = {0:2f}'.format(rchi2))
print(55*'-')
print('')
return param, sparam
# Dictionary for the graph labels
dictvar = [['theta',
'b*cos(theta)',
'P_b',
'Y_b',
'V_b'],
[r'$\theta$',
r'$b\,\cos \theta $',
r'$P_b$',
r'$Y_b$',
r'$V_b$']]
# try:
# _plt.close(fig2)
# except:
# pass
# x = _np.array([201., 244., 47., 287., 203., 58., 210., 202., 198., 158., 165., 201., 157., 131., 166., 160., 186., 125., 218., 146.])
# y = _np.array([592., 401., 583., 402., 495., 173., 479., 504., 510., 416., 393., 442., 317., 311., 400., 337., 423., 334., 533., 344.])
# sx = _np.array([9.,4.,11.,7.,5.,9.,4.,4.,11.,7.,5.,5.,5.,6.,6.,5.,9.,8.,6.,5.])
# sy = _np.array([61.,25.,38.,15.,21.,15.,27.,14.,30.,16.,14.,25.,52.,16.,34.,31.,42.,26.,16.,22.])
# thet, b, Pb, Yb, Vb
# Setting parameters and limits
intervalos = _np.array([thet_ran, b_ran, Pb_ran, Yb_ran, Vb_ran])
ndim = 5
# Converting lists to np.array
if type(x) == list:
x = _np.array(x)
if type(y) == list:
y = _np.array(y)
if type(sx) == list:
sx = _np.array(sx)
if type(sy) == list:
sy = _np.array(sy)
# Fit by Least Squares
if len(x) > 1:
podr, spodr = fitodr()
if plot_adj:
if ax==None or fig==None:
fig2 = _plt.figure()
ax2 = _plt.subplot(1, 1, 1)
else:
fig2 = fig
ax2 = ax
# If 'star' was not specified, generate a random number to append to the graph name to be saved
if star == '':
star = 'rand' + str(int(_np.random.rand(1)[0]*10000))
# Define random values to be used as priori numbers within the interval
p0 = _np.array( [_np.random.rand(ndim) for n in xrange(n_walkers)] )
for k in range(ndim):
p0[:,k] = intervalos[k][0]+p0[:,k]*(intervalos[k][1]-intervalos[k][0])
# Initialize the sampler and run mcmc
sampler = emcee.EnsembleSampler(n_walkers, ndim, lnprob, args=[x, y, sx, sy], a=3)#, threads=2)
fig1, thet_mcmc, b_mcmc, Pb_mcmc, Yb_mcmc, Vb_mcmc, opt = run_emcee(sampler, p0)
# Plot only if plot_adj==True or a new computation was done
if plot_adj and opt in ('y','Y'):
xadj = _np.linspace(min(x),max(x),2)
b0 = b_mcmc[0]/_np.cos(thet_mcmc[0]*_np.pi/180)
a0 = _np.tan(thet_mcmc[0]*_np.pi/180)
yadj = a0*xadj+b0
ax2.plot(xadj, yadj, '--',color='dimgray', label='mcmc_new')
ax2.legend(loc='best')
fig2.show()
elif not plot_adj:
fig2 = []
return thet_mcmc, b_mcmc, Pb_mcmc, Yb_mcmc, Vb_mcmc, fig1, fig2
|
dbednarski/pyhdust
|
pyhdust/poltools.py
|
Python
|
gpl-3.0
| 201,725
|
[
"Gaussian"
] |
1f70c6e4ca4bd07db633e9947133f70c5664d956bc92be63e113811814fb6857
|
"""
Note: This is a third-party package that was included
to adapt it to my needs. I'm not the author.
For the original source, please visit:
https://django-channels-presence.readthedocs.io/en/latest/
"""
|
kkmsc17/smes
|
backend/channels_presence/__init__.py
|
Python
|
agpl-3.0
| 207
|
[
"VisIt"
] |
44e6e1444e37843673742e8c763fb4d010823c35fc8f468f8e9c14f5e07a42a3
|
"""gro.py: Used for loading Gromacs GRO files.
"""
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2015 Stanford University and the Authors
#
# Authors: Robert McGibbon, Lee-Ping Wang, Peter Eastman
# Contributors: Jason Swails
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
#
# Portions of this code originate from the OpenMM molecular simulation
# toolkit, copyright (c) 2012 Stanford University and the Authors. Those
# portions are distributed under the following terms:
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################
##############################################################################
# Imports
##############################################################################
import os
import sys
import itertools
from re import sub, match, findall
# import element as elem
import numpy as np
import mdtraj as md
from mdtraj.utils import in_units_of, cast_indices, ensure_type
from mdtraj.formats import pdb
from mdtraj.core import element as elem
from mdtraj.formats.registry import FormatRegistry
##############################################################################
# Code
##############################################################################
@FormatRegistry.register_loader('.gro')
def load_gro(filename, stride=None, atom_indices=None, frame=None):
"""Load a GROMACS GRO file.
Parameters
----------
filename : str
Path to the GRO file on disk.
stride : int, default=None
Only read every stride-th model from the file
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. These indices are zero-based.
frame : int, optional
Use this option to load only a single frame from a trajectory on disk.
If frame is None, the default, the entire trajectory will be loaded.
If supplied, ``stride`` will be ignored.
"""
from mdtraj.core.trajectory import _parse_topology, Trajectory
with GroTrajectoryFile(filename, 'r') as f:
topology = f.topology
if frame is not None:
f.seek(frame)
n_frames = 1
else:
n_frames = None
return f.read_as_traj(n_frames=n_frames, stride=stride,
atom_indices=atom_indices)
@FormatRegistry.register_fileobject('.gro')
class GroTrajectoryFile(object):
"""Interface for reading and writing to GROMACS GRO files.
Parameters
----------
filename : str
The filename to open. A path to a file on disk.
mode : {'r', 'w'}
The mode in which to open the file, either 'r' for read or 'w' for write.
force_overwrite : bool
If opened in write mode, and a file by the name of `filename` already
exists on disk, should we overwrite it?
Attributes
----------
n_atoms : int
The number of atoms in the file
topology : md.Topology
The topology. TODO(rmcgibbo) note about chain
See Also
--------
load_gro : High-level wrapper that returns a ``md.Trajectory``
"""
distance_unit = 'nanometers'
def __init__(self, filename, mode='r', force_overwrite=True):
self._open = False
self._file = None
self._mode = mode
if mode == 'r':
self._open = True
self._frame_index = 0
self._file = open(filename, 'r')
try:
self.n_atoms, self.topology = self._read_topology()
finally:
self._file.seek(0)
elif mode == 'w':
self._open = True
if os.path.exists(filename) and not force_overwrite:
raise IOError('"%s" already exists' % filename)
self._frame_index = 0
self._file = open(filename, 'w')
else:
raise ValueError("invalid mode: %s" % mode)
def write(self, coordinates, topology, time=None, unitcell_vectors=None,
precision=3):
"""Write one or more frames of a molecular dynamics trajectory to disk
in the GROMACS GRO format.
Parameters
----------
coordinates : np.ndarray, dtype=np.float32, shape=(n_frames, n_atoms, 3)
The cartesian coordinates of each atom, in units of nanometers.
topology : mdtraj.Topology
The Topology defining the model to write.
time : np.ndarray, dtype=float32, shape=(n_frames), optional
The simulation time corresponding to each frame, in picoseconds.
If not supplied, the numbers 0..n_frames will be written.
unitcell_vectors : np.ndarray, dtype=float32, shape=(n_frames, 3, 3), optional
The periodic box vectors of the simulation in each frame, in nanometers.
precision : int, optional
The number of decimal places to print for coordinates. Default is 3
"""
if not self._open:
raise ValueError('I/O operation on closed file')
if not self._mode == 'w':
raise ValueError('file not opened for writing')
coordinates = ensure_type(coordinates, dtype=np.float32, ndim=3, name='coordinates', can_be_none=False, warn_on_cast=False)
time = ensure_type(time, dtype=float, ndim=1, name='time', can_be_none=True, shape=(len(coordinates),), warn_on_cast=False)
unitcell_vectors = ensure_type(unitcell_vectors, dtype=float, ndim=3, name='unitcell_vectors',
can_be_none=True, shape=(len(coordinates), 3, 3), warn_on_cast=False)
for i in range(coordinates.shape[0]):
frame_time = None if time is None else time[i]
frame_box = None if unitcell_vectors is None else unitcell_vectors[i]
self._write_frame(coordinates[i], topology, frame_time, frame_box, precision)
def read_as_traj(self, n_frames=None, stride=None, atom_indices=None):
"""Read a trajectory from a gro file
Parameters
----------
n_frames : int, optional
If positive, then read only the next `n_frames` frames. Otherwise read all
of the frames in the file.
stride : np.ndarray, optional
Read only every stride-th frame.
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. This may be slightly slower than the standard read because it required
an extra copy, but will save memory.
Returns
-------
trajectory : Trajectory
A trajectory object containing the loaded portion of the file.
"""
from mdtraj.core.trajectory import Trajectory
topology = self.topology
if atom_indices is not None:
topology = topology.subset(atom_indices)
coordinates, time, unitcell_vectors = self.read(stride=stride, atom_indices=atom_indices)
if len(coordinates) == 0:
return Trajectory(xyz=np.zeros((0, topology.n_atoms, 3)), topology=topology)
coordinates = in_units_of(coordinates, self.distance_unit, Trajectory._distance_unit, inplace=True)
unitcell_vectors = in_units_of(unitcell_vectors, self.distance_unit, Trajectory._distance_unit, inplace=True)
traj = Trajectory(xyz=coordinates, topology=topology, time=time)
traj.unitcell_vectors = unitcell_vectors
return traj
def read(self, n_frames=None, stride=None, atom_indices=None):
"""Read data from a molecular dynamics trajectory in the GROMACS GRO
format.
Parameters
----------
n_frames : int, optional
If n_frames is not None, the next n_frames of data from the file
will be read. Otherwise, all of the frames in the file will be read.
stride : int, optional
If stride is not None, read only every stride-th frame from disk.
atom_indices : np.ndarray, dtype=int, optional
The specific indices of the atoms you'd like to retrieve. If not
supplied, all of the atoms will be retrieved.
Returns
-------
coordinates : np.ndarray, shape=(n_frames, n_atoms, 3)
The cartesian coordinates of the atoms, in units of nanometers.
time : np.ndarray, None
The time corresponding to each frame, in units of picoseconds, or
None if no time information is present in the trajectory.
unitcell_vectors : np.ndarray, shape=(n_frames, 3, 3)
The box vectors in each frame, in units of nanometers
"""
if not self._open:
raise ValueError('I/O operation on closed file')
if not self._mode == 'r':
raise ValueError('file not opened for reading')
coordinates = []
unitcell_vectors = []
time = []
contains_time = True
atom_indices = cast_indices(atom_indices)
atom_slice = slice(None) if atom_indices is None else atom_indices
if n_frames is None:
frameiter = itertools.count()
else:
frameiter = range(n_frames)
for i in frameiter:
try:
frame_xyz, frame_box, frame_time = self._read_frame()
contains_time = contains_time and (frame_time is not None)
coordinates.append(frame_xyz[atom_slice])
unitcell_vectors.append(frame_box)
time.append(frame_time)
except StopIteration:
break
coordinates, unitcell_vectors, time = map(np.array, (coordinates, unitcell_vectors, time))
if not contains_time:
time = None
else:
time = time[::stride]
return coordinates[::stride], time, unitcell_vectors[::stride]
def _read_topology(self):
if not self._open:
raise ValueError('I/O operation on closed file')
if not self._mode == 'r':
raise ValueError('file not opened for reading')
pdb.PDBTrajectoryFile._loadNameReplacementTables()
n_atoms = None
topology = md.Topology()
chain = topology.add_chain()
residue = None
atomReplacements = {}
for ln, line in enumerate(self._file):
if ln == 1:
n_atoms = int(line.strip())
elif ln > 1 and ln < n_atoms + 2:
(thisresnum, thisresname, thisatomname, thisatomnum) = \
[line[i*5:i*5+5].strip() for i in range(4)]
thisresnum, thisatomnum = map(int, (thisresnum, thisatomnum))
if residue is None or residue.resSeq != thisresnum:
if thisresname in pdb.PDBTrajectoryFile._residueNameReplacements:
thisresname = pdb.PDBTrajectoryFile._residueNameReplacements[thisresname]
residue = topology.add_residue(thisresname, chain, resSeq=thisresnum)
if thisresname in pdb.PDBTrajectoryFile._atomNameReplacements:
atomReplacements = pdb.PDBTrajectoryFile._atomNameReplacements[thisresname]
else:
atomReplacements = {}
thiselem = thisatomname
if len(thiselem) > 1:
thiselem = thiselem[0] + sub('[A-Z0-9]','',thiselem[1:])
try:
element = elem.get_by_symbol(thiselem)
except KeyError:
element = elem.virtual
if thisatomname in atomReplacements:
thisatomname = atomReplacements[thisatomname]
topology.add_atom(thisatomname, element=element, residue=residue,
serial=thisatomnum)
topology.create_standard_bonds()
return n_atoms, topology
def _read_frame(self):
if not self._open:
raise ValueError('I/O operation on closed file')
if not self._mode == 'r':
raise ValueError('file not opened for reading')
atomcounter = itertools.count()
comment = None
boxvectors = None
topology = None
xyz = np.zeros((self.n_atoms, 3), dtype=np.float32)
got_line = False
firstDecimalPos = None
atomindex = -1
for ln, line in enumerate(self._file):
got_line = True
if ln == 0:
comment = line.strip()
continue
elif ln == 1:
assert self.n_atoms == int(line.strip())
continue
if firstDecimalPos is None:
try:
firstDecimalPos = line.index('.', 20)
secondDecimalPos = line.index('.', firstDecimalPos+1)
except ValueError:
firstDecimalPos = secondDecimalPos = None
crd = _parse_gro_coord(line, firstDecimalPos, secondDecimalPos)
if crd is not None and atomindex < self.n_atoms - 1:
atomindex = next(atomcounter)
xyz[atomindex, :] = (crd[0], crd[1], crd[2])
elif _is_gro_box(line) and ln == self.n_atoms + 2:
sline = line.split()
boxvectors = tuple([float(i) for i in sline])
# the gro_box line comes at the end of the record
break
else:
raise Exception("Unexpected line in .gro file: "+line)
if not got_line:
raise StopIteration()
time = None
if 't=' in comment:
# title string (free format string, optional time in ps after 't=')
time = float(findall('t= *(\d+\.\d+)',comment)[-1])
# box vectors (free format, space separated reals), values: v1(x) v2(y)
# v3(z) v1(y) v1(z) v2(x) v2(z) v3(x) v3(y), the last 6 values may be
# omitted (they will be set to zero).
box = [boxvectors[i] if i < len(boxvectors) else 0 for i in range(9)]
unitcell_vectors = np.array([
[box[0], box[3], box[4]],
[box[5], box[1], box[6]],
[box[7], box[8], box[2]]])
return xyz, unitcell_vectors, time
def _write_frame(self, coordinates, topology, time, box, precision):
comment = 'Generated with MDTraj'
if time is not None:
comment += ', t= %s' % time
varwidth = precision + 5
fmt = '%%5d%%-5s%%5s%%5d%%%d.%df%%%d.%df%%%d.%df' % (
varwidth, precision, varwidth, precision, varwidth, precision)
assert topology.n_atoms == coordinates.shape[0]
lines = [comment, ' %d' % topology.n_atoms]
if box is None:
box = np.zeros((3,3))
for i in range(topology.n_atoms):
atom = topology.atom(i)
residue = atom.residue
serial = atom.serial
if serial is None:
serial = atom.index
if serial >= 100000:
serial -= 100000
lines.append(fmt % (residue.resSeq, residue.name, atom.name, serial,
coordinates[i, 0], coordinates[i, 1], coordinates[i, 2]))
lines.append('%10.5f%10.5f%10.5f%10.5f%10.5f%10.5f%10.5f%10.5f%10.5f' % (
box[0,0], box[1,1], box[2,2],
box[0,1], box[0,2], box[1,0],
box[1,2], box[2,0], box[2,1]))
self._file.write('\n'.join(lines))
self._file.write('\n')
def seek(self, offset, whence=0):
"""Move to a new file position
Parameters
----------
offset : int
A number of frames.
whence : {0, 1, 2}
0: offset from start of file, offset should be >=0.
1: move relative to the current position, positive or negative
2: move relative to the end of file, offset should be <= 0.
Seeking beyond the end of a file is not supported
"""
raise NotImplementedError()
def tell(self):
"""Current file position
Returns
-------
offset : int
The current frame in the file.
"""
return self._frame_index
def close(self):
"Close the file"
if self._open:
self._file.close()
self._open = False
def __enter__(self):
"Support the context manager protocol"
return self
def __exit__(self, *exc_info):
"Support the context manager protocol"
self.close()
##############################################################################
# Utilities
##############################################################################
def _isint(word):
"""ONLY matches integers! If you have a decimal point? None shall pass!
@param[in] word String (for instance, '123', '153.0', '2.', '-354')
@return answer Boolean which specifies whether the string is an integer (only +/- sign followed by digits)
"""
return match('^[-+]?[0-9]+$',word)
def _isfloat(word):
"""Matches ANY number; it can be a decimal, scientific notation, what have you
CAUTION - this will also match an integer.
@param[in] word String (for instance, '123', '153.0', '2.', '-354')
@return answer Boolean which specifies whether the string is any number
"""
return match('^[-+]?[0-9]*\.?[0-9]*([eEdD][-+]?[0-9]+)?$',word)
def _parse_gro_coord(line, firstDecimal, secondDecimal):
""" Determines whether a line contains GROMACS data or not
@param[in] line The line to be tested
"""
if firstDecimal is None or secondDecimal is None:
return None
digits = secondDecimal - firstDecimal
try:
return tuple(float(line[20+i*digits:20+(i+1)*digits]) for i in range(3))
except ValueError:
return None
def _is_gro_box(line):
""" Determines whether a line contains a GROMACS box vector or not
@param[in] line The line to be tested
"""
sline = line.split()
if len(sline) == 9 and all([_isfloat(i) for i in sline]):
return 1
elif len(sline) == 3 and all([_isfloat(i) for i in sline]):
return 1
else:
return 0
|
mattwthompson/mdtraj
|
mdtraj/formats/gro.py
|
Python
|
lgpl-2.1
| 19,964
|
[
"Gromacs",
"MDTraj",
"OpenMM"
] |
1ed8eb26f53e52e2b27d50105000c94cc7a399889b6449bb61541c681d21b928
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for manipulating qualified names.
A qualified name is a uniform way to refer to simple (e.g. 'foo') and composite
(e.g. 'foo.bar') syntactic symbols.
This is *not* related to the __qualname__ attribute used by inspect, which
refers to scopes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
class CallerMustSetThis(object):
pass
class Symbol(collections.namedtuple('Symbol', ['name'])):
"""Represents a Python symbol."""
class Literal(collections.namedtuple('Literal', ['value'])):
"""Represents a Python numeric literal."""
def __str__(self):
if isinstance(self.value, str):
return "'{}'".format(self.value)
return str(self.value)
def __repr__(self):
return str(self)
# TODO(mdan): Use subclasses to remove the has_attr has_subscript booleans.
class QN(object):
"""Represents a qualified name."""
def __init__(self, base, attr=None, subscript=None):
if attr is not None and subscript is not None:
raise ValueError('A QN can only be either an attr or a subscript, not '
'both: attr={}, subscript={}.'.format(attr, subscript))
self._has_attr = False
self._has_subscript = False
if attr is not None:
if not isinstance(base, QN):
raise ValueError(
'for attribute QNs, base must be a QN; got instead "%s"' % base)
if not isinstance(attr, str):
raise ValueError('attr may only be a string; got instead "%s"' % attr)
self._parent = base
# TODO(mdan): Get rid of the tuple - it can only have 1 or 2 elements now.
self.qn = (base, attr)
self._has_attr = True
elif subscript is not None:
if not isinstance(base, QN):
raise ValueError('For subscript QNs, base must be a QN.')
self._parent = base
self.qn = (base, subscript)
self._has_subscript = True
else:
if not isinstance(base, (str, Literal)):
# TODO(mdan): Require Symbol instead of string.
raise ValueError(
'for simple QNs, base must be a string or a Literal object;'
' got instead "%s"' % type(base))
assert '.' not in base and '[' not in base and ']' not in base
self._parent = None
self.qn = (base,)
def is_symbol(self):
return isinstance(self.qn[0], str)
def is_simple(self):
return len(self.qn) <= 1
def is_composite(self):
return len(self.qn) > 1
def has_subscript(self):
return self._has_subscript
def has_attr(self):
return self._has_attr
@property
def attr(self):
if not self._has_attr:
raise ValueError('Cannot get attr of non-attribute "%s".' % self)
return self.qn[1]
@property
def parent(self):
if self._parent is None:
raise ValueError('Cannot get parent of simple name "%s".' % self.qn[0])
return self._parent
@property
def owner_set(self):
"""Returns all the symbols (simple or composite) that own this QN.
In other words, if this symbol was modified, the symbols in the owner set
may also be affected.
Examples:
'a.b[c.d]' has two owners, 'a' and 'a.b'
"""
owners = set()
if self.has_attr() or self.has_subscript():
owners.add(self.parent)
owners.update(self.parent.owner_set)
return owners
@property
def support_set(self):
"""Returns the set of simple symbols that this QN relies on.
This would be the smallest set of symbols necessary for the QN to
statically resolve (assuming properties and index ranges are verified
at runtime).
Examples:
'a.b' has only one support symbol, 'a'
'a[i]' has two support symbols, 'a' and 'i'
"""
# TODO(mdan): This might be the set of Name nodes in the AST. Track those?
roots = set()
if self.has_attr():
roots.update(self.parent.support_set)
elif self.has_subscript():
roots.update(self.parent.support_set)
roots.update(self.qn[1].support_set)
else:
roots.add(self)
return roots
def __hash__(self):
return hash(self.qn + (self._has_attr, self._has_subscript))
def __eq__(self, other):
return (isinstance(other, QN) and self.qn == other.qn and
self.has_subscript() == other.has_subscript() and
self.has_attr() == other.has_attr())
def __lt__(self, other):
return str(self) < str(other)
def __gt__(self, other):
return str(self) > str(other)
def __str__(self):
root = self.qn[0]
if self.has_subscript():
return '{}[{}]'.format(root, self.qn[1])
if self.has_attr():
return '.'.join(map(str, self.qn))
else:
return str(root)
def __repr__(self):
return str(self)
def ssf(self):
"""Simple symbol form."""
ssfs = [n.ssf() if isinstance(n, QN) else n for n in self.qn]
ssf_string = ''
for i in range(0, len(self.qn) - 1):
if self.has_subscript():
delimiter = '_sub_'
else:
delimiter = '_'
ssf_string += ssfs[i] + delimiter
return ssf_string + ssfs[-1]
def ast(self):
"""AST representation."""
# The caller must adjust the context appropriately.
if self.has_subscript():
return gast.Subscript(
value=self.parent.ast(),
slice=self.qn[-1].ast(),
ctx=CallerMustSetThis)
if self.has_attr():
return gast.Attribute(
value=self.parent.ast(), attr=self.qn[-1], ctx=CallerMustSetThis)
base = self.qn[0]
if isinstance(base, str):
return gast.Name(
base, ctx=CallerMustSetThis, annotation=None, type_comment=None)
elif isinstance(base, Literal):
return gast.Constant(base.value, kind=None)
else:
assert False, ('the constructor should prevent types other than '
'str and Literal')
class QnResolver(gast.NodeTransformer):
"""Annotates nodes with QN information.
Note: Not using NodeAnnos to avoid circular dependencies.
"""
def visit_Name(self, node):
node = self.generic_visit(node)
anno.setanno(node, anno.Basic.QN, QN(node.id))
return node
def visit_Attribute(self, node):
node = self.generic_visit(node)
if anno.hasanno(node.value, anno.Basic.QN):
anno.setanno(node, anno.Basic.QN,
QN(anno.getanno(node.value, anno.Basic.QN), attr=node.attr))
return node
def visit_Subscript(self, node):
# TODO(mdan): This may no longer apply if we overload getitem.
node = self.generic_visit(node)
s = node.slice
if isinstance(s, (gast.Tuple, gast.Slice)):
# TODO(mdan): Support range and multi-dimensional indices.
# Continuing silently because some demos use these.
return node
if isinstance(s, gast.Constant) and s.value != Ellipsis:
subscript = QN(Literal(s.value))
else:
# The index may be an expression, case in which a name doesn't make sense.
if anno.hasanno(s, anno.Basic.QN):
subscript = anno.getanno(s, anno.Basic.QN)
else:
return node
if anno.hasanno(node.value, anno.Basic.QN):
anno.setanno(node, anno.Basic.QN,
QN(anno.getanno(node.value, anno.Basic.QN),
subscript=subscript))
return node
def resolve(node):
return QnResolver().visit(node)
def from_str(qn_str):
node = parser.parse_expression(qn_str)
node = resolve(node)
return anno.getanno(node, anno.Basic.QN)
|
sarvex/tensorflow
|
tensorflow/python/autograph/pyct/qual_names.py
|
Python
|
apache-2.0
| 8,237
|
[
"VisIt"
] |
1094ed10f5374b93f377cd8853d3523086704504289f48f672d0ed2f9322c064
|
import keras
import tensorflow as tf
import numpy as np
import menpo.io as mio
import menpo
from scipy.interpolate import interp1d
import scipy as sp
from keras import backend as K
from matplotlib import pyplot as plt
from pathlib import Path
from scipy.io import loadmat
from menpo.image import Image
from menpo.shape import PointCloud
from menpo.transform import Translation, Scale
from .channel_transform import sample_colours_from_colourmap
ResizeMethod = tf.image.ResizeMethod
# tf functions
def tf_caffe_preprocess(image):
VGG_MEAN = np.array([102.9801, 115.9465, 122.7717])
# RGB -> BGR
image = tf.reverse(image, [False, False, True])
# Subtract VGG training mean across all channels
image = image - VGG_MEAN.reshape([1, 1, 3])
return image
def tf_rotate_points(points, image, angle):
s = tf.shape(image)
image_center = tf.to_float(s[:2]) / 2.
# center coordinates since rotation center is supposed to be in the image center
points_centered = points - image_center
rot_matrix = tf.dynamic_stitch([[0], [1], [2], [3]], [tf.cos(
angle), -tf.sin(angle), tf.sin(angle), tf.cos(angle)])
rot_matrix = tf.reshape(rot_matrix, shape=[2, 2])
points_centered_rot = tf.matmul(rot_matrix, tf.transpose(points_centered))
return tf.transpose(points_centered_rot) + image_center
def tf_lms_to_heatmap(lms, h, w, n_landmarks, marked_index, sigma=5):
xs, ys = tf.meshgrid(tf.range(0., tf.to_float(w)),
tf.range(0., tf.to_float(h)))
gaussian = (1. / (sigma * np.sqrt(2. * np.pi)))
def gaussian_fn(lms):
y, x, idx = tf.unstack(lms)
idx = tf.to_int32(idx)
def run_true():
return tf.exp(-0.5 * (tf.pow(ys - y, 2) + tf.pow(xs - x, 2)) *
tf.pow(1. / sigma, 2.)) * gaussian * 17.
def run_false():
return tf.zeros((h, w))
return tf.cond(tf.reduce_any(tf.equal(marked_index, idx)), run_true, run_false)
img_hm = tf.stack(tf.map_fn(gaussian_fn, tf.concat(
[lms, tf.to_float(tf.range(0, n_landmarks))[..., None]], 1)))
return img_hm
def tf_heatmap_to_lms(heatmap):
hs = tf.argmax(tf.reduce_max(heatmap, 2), 1)
ws = tf.argmax(tf.reduce_max(heatmap, 1), 1)
lms = tf.transpose(tf.to_float(tf.stack([hs, ws])), perm=[1, 2, 0])
return lms
def tf_image_batch_to_grid(images, col_size=4):
image_shape = tf.shape(images)
batch_size = image_shape[0]
image_height = image_shape[1]
image_width = image_shape[2]
image_channels = image_shape[3]
w = col_size
h = batch_size // w
tfimg = images[:w * h]
tfimg = tf.reshape(
tfimg, [w, h * image_height, image_width, image_channels])
tfimg = tf.reshape(
tf.transpose(tfimg, [1, 0, 2, 3]), [h * image_height, w * image_width, image_channels])
return tfimg
def tf_image_patch_around_lms(image, lms, patch_size=32, dtype=tf.float32):
pad_size = patch_size // 2 + 1
lms = tf.to_int32(lms) + tf.constant([pad_size, pad_size])
image = tf.pad(image, [[pad_size, pad_size], [pad_size, pad_size], [0, 0]])
def crop(x):
return tf.image.crop_to_bounding_box(image, x[0] - pad_size, x[1] - pad_size, patch_size, patch_size)
image = tf.concat(tf.unstack(tf.map_fn(crop, lms, dtype=dtype)), axis=-1)
return image
def tf_records_iterator(path, feature=None):
record_iterator = tf.python_io.tf_record_iterator(path=path)
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
yield example.features.feature
def tf_logits_to_heatmap(logits, num_classes):
"""Generates a coloured heatmap from the keypoint logits.
Args:
features: A `Tensor` of dimensions [num_batch, height, width, FLAGS.n_landmarks + 1].
"""
keypoint_colours = np.array(
[
plt.cm.spectral(x)
for x in np.linspace(0, 1, num_classes + 1)
])[..., :3].astype(np.float32)
prediction = tf.nn.softmax(logits)
heatmap = tf.matmul(tf.reshape(
prediction, (-1, num_classes + 1)), keypoint_colours)
heatmap = tf.reshape(heatmap, (tf.shape(prediction)[0],
tf.shape(prediction)[1],
tf.shape(prediction)[2], 3))
return heatmap
def tf_logits_to_landmarks(keypoints):
is_background = tf.equal(keypoints, 0)
ones = tf.to_float(tf.ones_like(is_background))
zeros = tf.to_float(tf.zeros_like(is_background))
return tf.where(is_background, zeros, ones) * 255
def tf_keypts_encoding(keypoints, num_classes):
keypoints = tf.to_int32(keypoints)
keypoints = tf.reshape(keypoints, (-1,))
keypoints = tf.layers.one_hot_encoding(
keypoints, num_classes=num_classes + 1)
return keypoints
def tf_get_weight(keypoints, mask=None, ng_w=0.01, ps_w=1.0):
is_background = tf.equal(keypoints, 0)
ones = tf.to_float(tf.ones_like(is_background))
weights = tf.where(is_background, ones * ng_w, ones * ps_w)
# if mask is not None:
# weights *= tf.to_float(mask)
return weights
def tf_atan2(y, x):
angle = tf.where(tf.greater(x, 0.0), tf.atan(y / x), tf.zeros_like(x))
angle = tf.where(tf.greater(y, 0.0), 0.5 * np.pi - tf.atan(x / y), angle)
angle = tf.where(tf.less(y, 0.0), -0.5 * np.pi - tf.atan(x / y), angle)
angle = tf.where(tf.less(x, 0.0), tf.atan(y / x) + np.pi, angle)
angle = tf.where(tf.logical_and(tf.equal(x, 0.0), tf.equal(y, 0.0)),
np.nan * tf.zeros_like(x), angle)
indices = tf.where(tf.less(angle, 0.0))
updated_values = tf.gather_nd(angle, indices) + (2 * np.pi)
update = tf.SparseTensor(indices, updated_values, angle.get_shape())
update_dense = tf.sparse_tensor_to_dense(update)
return angle + update_dense
def tf_n_channel_rgb(inputs, n_feature, colour_set='jet'):
cm = sample_colours_from_colourmap(
n_feature, colour_set).astype(np.float32)
tf_cm = tf.constant(cm)
tf_img = tf.tensordot(inputs, tf_cm, axes=1)
return tf_img
def tf_iuv_rgb(tf_iuv, n_feature=26, colour_set='jet'):
tf_iuv_class = tf_iuv[..., :n_feature]
tf_iuv_class = tf.argmax(tf_iuv_class, axis=-1)
tf_iuv_class = tf.one_hot(tf_iuv_class, n_feature)
tf_u = tf_iuv_class * tf_iuv[..., n_feature:n_feature*2]
tf_v = tf_iuv_class * tf_iuv[..., n_feature*2:]
tf_u = tf_n_channel_rgb(tf_u, n_feature)
tf_v = tf_n_channel_rgb(tf_v, n_feature)
tf_img = (tf_u + tf_v) / 2. / 255.
return tf_img
def tf_ced_accuracy(t, dists):
# Head Shoulder Elbow Wrist Hip Knee Ankle
pts_r = tf.transpose(
tf.gather(tf.transpose(dists), [8, 12, 11, 10, 2, 1, 0]))
pts_l = tf.transpose(
tf.gather(tf.transpose(dists), [9, 13, 14, 15, 3, 4, 5]))
part_pckh = (tf.to_int32(pts_r <= t) + tf.to_int32(pts_l <= t)) / 2
return tf.concat([part_pckh, tf.reduce_sum(tf.to_int32(dists <= t), 1)[..., None] / tf.shape(dists)[1]], 1)
def tf_normalized_point_to_point_error(preds, gts, factor=1):
dists = tf.sqrt(tf.reduce_sum(tf.pow(preds - gts, 2),
reduction_indices=-1)) / factor
return dists
def tf_pckh(preds, gts, scales):
t_range = np.arange(0, 0.51, 0.01)
dists = tf_normalized_point_to_point_error(preds, gts, factor=scales)
return tf_ced_accuracy(0.5, dists)
## Edge Detection --------------
def tf_canny(img_tensor, minRate=0.10, maxRate=0.40, remove_high_val=False, return_raw_edges=False):
""" STEP-0 (Preprocessing):
1. Scale the tensor values to the expected range ([0,1])
2. If 'preserve_size': As TensorFlow will pad by 0s for padding='SAME',
it is better to pad by the same values of the borders.
(This is to avoid considering the borders as edges)
"""
GAUS_KERNEL = img_tensor.get_shape().as_list()[-1]
GAUS_SIGMA = 1.2
def Gaussian_Filter(kernel_size=GAUS_KERNEL, sigma=GAUS_SIGMA): #Default: Filter_shape = [5,5]
# --> Reference: https://en.wikipedia.org/wiki/Canny_edge_detector#Gaussian_filter
k = (kernel_size-1)//2
filters = []
sigma_2 = sigma**2
for i in range(kernel_size):
filter_row = []
for j in range(kernel_size):
Hij = np.exp(-((i+1-(k+1))**2 + (j+1-(k+1))**2)/(2*sigma_2))/(2*np.pi*sigma_2)
filter_row.append(Hij)
filters.append(filter_row)
return np.asarray(filters).reshape(kernel_size,kernel_size,1,1).transpose([2,0,1,3])
"""
NOTE: All variables are initialized first for reducing proccessing time.
"""
gaussian_filter = tf.constant(Gaussian_Filter(), tf.float32) #STEP-1
h_filter = tf.reshape(tf.constant([[-1,0,1],[-2,0,2],[-1,0,1]], tf.float32), [3,3,1,1]) #STEP-2
v_filter = tf.reshape(tf.constant([[1,2,1],[0,0,0],[-1,-2,-1]], tf.float32), [3,3,1,1]) #STEP-2
np_filter_0 = np.zeros((3,3,1,2))
np_filter_0[1,0,0,0], np_filter_0[1,2,0,1] = 1,1 ### Left & Right
# print(np_filter_0)
filter_0 = tf.constant(np_filter_0, tf.float32)
np_filter_90 = np.zeros((3,3,1,2))
np_filter_90[0,1,0,0], np_filter_90[2,1,0,1] = 1,1 ### Top & Bottom
filter_90 = tf.constant(np_filter_90, tf.float32)
np_filter_45 = np.zeros((3,3,1,2))
np_filter_45[0,2,0,0], np_filter_45[2,0,0,1] = 1,1 ### Top-Right & Bottom-Left
filter_45 = tf.constant(np_filter_45, tf.float32)
np_filter_135 = np.zeros((3,3,1,2))
np_filter_135[0,0,0,0], np_filter_135[2,2,0,1] = 1,1 ### Top-Left & Bottom-Right
filter_135 = tf.constant(np_filter_135, tf.float32)
np_filter_sure = np.ones([3,3,1,1]); np_filter_sure[1,1,0,0] = 0
filter_sure = tf.constant(np_filter_sure, tf.float32)
border_paddings = tf.constant([[0,0],[1,1],[1,1],[0,0]])
def FourAngles(d):
d0 = tf.to_float(tf.greater_equal(d,157.5))+tf.to_float(tf.less(d,22.5))
d45 = tf.to_float(tf.greater_equal(d,22.5))*tf.to_float(tf.less(d,67.5))
d90 = tf.to_float(tf.greater_equal(d,67.5))*tf.to_float(tf.less(d,112.5))
d135 = tf.to_float(tf.greater_equal(d,112.5))*tf.to_float(tf.less(d,157.5))
# return {'d0':d0, 'd45':d45, 'd90':d90, 'd135':d135}
return (d0,d45,d90,d135)
""" STEP-1: Noise reduction with Gaussian filter """
x_gaussian = tf.nn.convolution(img_tensor, gaussian_filter, padding='SAME')
### Below is a heuristic to remove the intensity gradient inside a cloud ###
# if remove_high_val: x_gaussian = tf.clip_by_value(x_gaussian, 0, MAX/2)
""" STEP-2: Calculation of Horizontal and Vertical derivatives with Sobel operator
--> Reference: https://en.wikipedia.org/wiki/Sobel_operator
"""
Gx = tf.nn.convolution(x_gaussian, h_filter, padding='SAME')
Gy = tf.nn.convolution(x_gaussian, v_filter, padding='SAME')
G = tf.sqrt(tf.square(Gx) + tf.square(Gy))
BIG_PHI = tf.atan2(Gy,Gx)
BIG_PHI = (BIG_PHI*180/np.pi)%180 ### Convert from Radian to Degree
D_0,D_45,D_90,D_135 = FourAngles(BIG_PHI)### Round the directions to 0, 45, 90, 135 (only take the masks)
""" STEP-3: NON-Maximum Suppression
--> Reference: https://stackoverflow.com/questions/46553662/conditional-value-on-tensor-relative-to-element-neighbors
"""
""" 3.1-Selecting Edge-Pixels on the Horizontal direction """
targetPixels_0 = tf.nn.convolution(G, filter_0, padding='SAME')
isGreater_0 = tf.to_float(tf.greater(G*D_0, targetPixels_0))
isMax_0 = isGreater_0[:,:,:,0:1]*isGreater_0[:,:,:,1:2]
### Note: Need to keep 4 dimensions (index [:,:,:,0] is 3 dimensions) ###
""" 3.2-Selecting Edge-Pixels on the Vertical direction """
targetPixels_90 = tf.nn.convolution(G, filter_90, padding='SAME')
isGreater_90 = tf.to_float(tf.greater(G*D_90, targetPixels_90))
isMax_90 = isGreater_90[:,:,:,0:1]*isGreater_90[:,:,:,1:2]
""" 3.3-Selecting Edge-Pixels on the Diag-45 direction """
targetPixels_45 = tf.nn.convolution(G, filter_45, padding='SAME')
isGreater_45 = tf.to_float(tf.greater(G*D_45, targetPixels_45))
isMax_45 = isGreater_45[:,:,:,0:1]*isGreater_45[:,:,:,1:2]
""" 3.4-Selecting Edge-Pixels on the Diag-135 direction """
targetPixels_135 = tf.nn.convolution(G, filter_135, padding='SAME')
isGreater_135 = tf.to_float(tf.greater(G*D_135, targetPixels_135))
isMax_135 = isGreater_135[:,:,:,0:1]*isGreater_135[:,:,:,1:2]
""" 3.5-Merging Edges on Horizontal-Vertical and Diagonal directions """
edges_raw = G*(isMax_0 + isMax_90 + isMax_45 + isMax_135)
edges_raw = tf.clip_by_value(edges_raw, 0, 1)
return edges_raw
|
yuxiang-zhou/deepmachine
|
deepmachine/utils/tf.py
|
Python
|
mit
| 12,823
|
[
"Gaussian"
] |
2330d4b843c1557ecd79d31e5573363f6e7f00b46c2e96ade567ab507d3957e7
|
# -*- coding: utf-8 -*-
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,random
from resources.lib.modules import control
from resources.lib.modules import cleantitle
from resources.lib.modules import client
debridstatus = control.setting('debridsources')
class source:
def __init__(self):
self.domains = ['rlsbb.online']
self.base_link = 'http://rlsbb.online'
self.search_link = '/?s=%s+%s&submit=Find'
def movie(self, imdb, title, year):
self.zen_url = []
try:
if not debridstatus == 'true': raise Exception()
self.zen_url = []
title = cleantitle.getsearch(title)
cleanmovie = cleantitle.get(title)
query = self.search_link % (urllib.quote_plus(title),year)
query = urlparse.urljoin(self.base_link, query)
# print ("RLSBBONLINE query", query)
link = client.request(query)
r = client.parseDOM(link, 'h2', attrs = {'class': 'postTitle'})
# print ("RLSBBONLINE r", r)
for item in r:
href = client.parseDOM(item, 'a', ret = 'href')[0]
item_title = client.parseDOM(item, 'a', ret = 'title')[0]
href = href.encode('utf-8')
item_title = item_title.encode('utf-8')
if year in item_title:
if cleanmovie in cleantitle.get(item_title):
self.zen_url.append([href,item_title])
# print "RLSBBONLINE MOVIES %s %s" % (item_title , href)
return self.zen_url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
url = {'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
self.zen_url = []
try:
if not debridstatus == 'true': raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
cleanmovie = cleantitle.get(title)
title = cleantitle.getsearch(title)
data['season'], data['episode'] = season, episode
episodecheck = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
episodecheck = str(episodecheck).lower()
query = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
query = self.search_link % (urllib.quote_plus(title),query)
mylink = urlparse.urljoin(self.base_link, query)
link = client.request(mylink)
r = client.parseDOM(link, 'h2', attrs = {'class': 'postTitle'})
# print ("RLSBBONLINE TV r", r)
for item in r:
href = client.parseDOM(item, 'a', ret = 'href')[0]
item_title = client.parseDOM(item, 'a', ret = 'title')[0]
href = href.encode('utf-8')
item_title = item_title.encode('utf-8')
if cleanmovie in cleantitle.get(item_title):
if episodecheck in cleantitle.get(item_title):
self.zen_url.append([href,item_title])
# print ("RLSBBONLINE TV PASSED", self.zen_url)
return self.zen_url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
for movielink,title in self.zen_url:
mylink = client.request(movielink)
r = client.parseDOM(mylink, 'div', attrs = {'class': 'postContent'})
if "1080" in title: quality = "1080p"
elif "720" in title: quality = "HD"
else: quality = "SD"
info = ''
if "hevc" in title.lower(): info = "HEVC"
for items in r:
match = re.compile('href="([^"]+)').findall(items)
for url in match:
# print ("RLSBBONLINE ULRS >>>", url)
if not any(value in url for value in ['sample','uploadkadeh','wordpress','crazy4tv','imdb.com','youtube','userboard','kumpulbagi','mexashare','myvideolink.xyz', 'myvideolinks.xyz' , 'costaction', 'crazydl','.rar', '.RAR', 'ul.to', 'safelinking','linx.2ddl.ag','upload.so','.zip', 'go4up', 'adf.ly','.jpg','.jpeg']):
if any(value in url for value in hostprDict):
try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
except: host = 'Videomega'
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'Rlsbbonline', 'url': url, 'info': info,'direct': False, 'debridonly': True})
return sources
except:
return sources
def resolve(self, url):
return url
|
repotvsupertuga/repo
|
plugin.video.zen/resources/lib/sources/rlsbbonline_mv_tv.py
|
Python
|
gpl-2.0
| 5,095
|
[
"ADF"
] |
c22df191f61e4ea51f5709d45dc99f4ed91e493dd7f9145c27023796dbf6a6ac
|
"""The post-processing module contains classes for image filtering mostly applied after a classification.
Image post-processing aims to alter images such that they depict a desired representation.
"""
import warnings
# import numpy as np
# import pydensecrf.densecrf as crf
# import pydensecrf.utils as crf_util
import pymia.filtering.filter as pymia_fltr
import SimpleITK as sitk
class ImagePostProcessing(pymia_fltr.Filter):
"""Represents a post-processing filter."""
def __init__(self):
"""Initializes a new instance of the ImagePostProcessing class."""
super().__init__()
def execute(self, image: sitk.Image, params: pymia_fltr.FilterParams = None) -> sitk.Image:
"""Registers an image.
Args:
image (sitk.Image): The image.
params (FilterParams): The parameters.
Returns:
sitk.Image: The post-processed image.
"""
# todo: replace this filter by a post-processing - or do we need post-processing at all?
warnings.warn('No post-processing implemented. Can you think about something?')
return image
def __str__(self):
"""Gets a printable string representation.
Returns:
str: String representation.
"""
return 'ImagePostProcessing:\n' \
.format(self=self)
# class DenseCRFParams(pymia_fltr.FilterParams):
# """Dense CRF parameters."""
# def __init__(self, img_t1: sitk.Image, img_t2: sitk.Image, img_proba: sitk.Image):
# """Initializes a new instance of the DenseCRFParams
#
# Args:
# img_t1 (sitk.Image): The T1-weighted image.
# img_t2 (sitk.Image): The T2-weigthed image.
# img_proba (sitk.Image): The posterior probability image.
# """
# self.img_t1 = img_t1
# self.img_t2 = img_t2
# self.img_proba = img_proba
#
#
# class DenseCRF(pymia_fltr.Filter):
# """A dense conditional random field (dCRF).
#
# Implements the work of Krähenbühl and Koltun, Efficient Inference in Fully Connected CRFs
# with Gaussian Edge Potentials, 2012. The dCRF code is taken from https://github.com/lucasb-eyer/pydensecrf.
# """
#
# def __init__(self):
# """Initializes a new instance of the DenseCRF class."""
# super().__init__()
#
# def execute(self, image: sitk.Image, params: DenseCRFParams = None) -> sitk.Image:
# """Executes the dCRF regularization.
#
# Args:
# image (sitk.Image): The image (unused).
# params (FilterParams): The parameters.
#
# Returns:
# sitk.Image: The filtered image.
# """
#
# if params is None:
# raise ValueError('Parameters are required')
#
# img_t2 = sitk.GetArrayFromImage(params.img_t1)
# img_ir = sitk.GetArrayFromImage(params.img_t2)
# img_proba = sitk.GetArrayFromImage(params.img_proba)
#
# # some variables
# x = img_proba.shape[2]
# y = img_proba.shape[1]
# z = img_proba.shape[0]
# no_labels = img_proba.shape[3]
#
# img_proba = np.rollaxis(img_proba, 3, 0)
#
# d = crf.DenseCRF(x * y * z, no_labels) # width, height, nlabels
# U = crf_util.unary_from_softmax(img_proba)
# d.setUnaryEnergy(U)
#
# stack = np.stack([img_t2, img_ir], axis=3)
#
# # Create the pairwise bilateral term from the above images.
# # The two `s{dims,chan}` parameters are model hyper-parameters defining
# # the strength of the location and image content bilaterals, respectively.
#
# # higher weight equals stronger
# pairwise_energy = crf_util.create_pairwise_bilateral(sdims=(1, 1, 1), schan=(1, 1), img=stack, chdim=3)
#
# # `compat` (Compatibility) is the "strength" of this potential.
# compat = 10
# # compat = np.array([1, 1], np.float32)
# # weight --> lower equals stronger
# # compat = np.array([[0, 10], [10, 1]], np.float32)
#
# d.addPairwiseEnergy(pairwise_energy, compat=compat,
# kernel=crf.DIAG_KERNEL,
# normalization=crf.NORMALIZE_SYMMETRIC)
#
# # add location only
# # pairwise_gaussian = crf_util.create_pairwise_gaussian(sdims=(.5,.5,.5), shape=(x, y, z))
# #
# # d.addPairwiseEnergy(pairwise_gaussian, compat=.3,
# # kernel=dcrf.DIAG_KERNEL,
# # normalization=dcrf.NORMALIZE_SYMMETRIC)
#
# # compatibility, kernel and normalization
# Q_unary = d.inference(10)
# # Q_unary, tmp1, tmp2 = d.startInference()
# #
# # for _ in range(10):
# # d.stepInference(Q_unary, tmp1, tmp2)
# # print(d.klDivergence(Q_unary) / (z* y*x))
# # kl2 = d.klDivergence(Q_unary) / (z* y*x)
#
# # The Q is now the approximate posterior, we can get a MAP estimate using argmax.
# map_soln_unary = np.argmax(Q_unary, axis=0)
# map_soln_unary = map_soln_unary.reshape((z, y, x))
# map_soln_unary = map_soln_unary.astype(np.uint8) # convert to uint8 from int64
# # Saving int64 with SimpleITK corrupts the file for Windows, i.e. opening it raises an ITK error:
# # Unknown component type error: 0
#
# img_out = sitk.GetImageFromArray(map_soln_unary)
# img_out.CopyInformation(params.img_t1)
# return img_out
|
istb-mia/MIALab
|
mialab/filtering/postprocessing.py
|
Python
|
apache-2.0
| 5,501
|
[
"Gaussian"
] |
b050c44d8aac9190f7364a07f5ca6793a38b01d8518eca621cd908ee06183d9d
|
#!/usr/bin/env python
"""Module to create command references for DIRAC."""
from concurrent.futures import ThreadPoolExecutor
import logging
import os
import shutil
import textwrap
from collections import namedtuple
from diracdoctools.Utilities import writeLinesToFile, runCommand, makeLogger
from diracdoctools.Config import Configuration, CLParser as clparser
LOG = makeLogger("CommandReference")
TITLE = "title"
PATTERN = "pattern"
SCRIPTS = "scripts"
EXCLUDE = "exclude"
RST_PATH = "rstPath"
PREFIX = "prefix"
Script = namedtuple("Script", "name system description")
class CLParser(clparser):
"""Extension to CLParser to also parse clean."""
def __init__(self):
self.clean = False
super(CLParser, self).__init__()
self.log = LOG.getChild("CLParser")
self.parser.add_argument("--clean", action="store_true", help="Remove rst files and exit")
def parse(self):
super(CLParser, self).parse()
self.clean = self.parsed.clean
def optionDict(self):
oDict = super(CLParser, self).optionDict()
oDict["clean"] = self.clean
return oDict
class CommandReference(object):
def __init__(self, configFile="docs.conf", debug=False):
self.config = Configuration(configFile, sections=["Commands"])
self.exitcode = 0
self.debug = debug
self.scriptDocs = {} # Scripts docs collection
if not os.path.exists(self.config.sourcePath):
LOG.error("%s does not exist" % self.config.sourcePath)
raise RuntimeError("Package not found")
def createSectionAndIndex(self, sectionDict: dict):
"""Create the index file and folder where the RST files will go.
:param sectionDict: section description
"""
reference = f".. _{sectionDict[PREFIX]}_cmd:" if sectionDict[PREFIX] else ""
title = f"{sectionDict[TITLE]} Command Reference"
# Add description
sectionIndexRST = textwrap.dedent(
f"""
{reference}
{"=" * len(title)}
{title}
{"=" * len(title)}
.. this page automatically is created in {__name__}
In this subsection the {title} commands are collected
"""
)
# Write commands that were not included in the subgroups
for name in sectionDict[SCRIPTS]:
if name in self.scriptDocs:
sectionIndexRST += f"- :ref:`{name}<{name}>`\n"
# Write commands included in the subgroups
for group in sectionDict["subgroups"]:
groupDict = sectionDict[group]
# Add subgroup reference
ref = f".. _{groupDict[PREFIX]}_cmd:" if groupDict[PREFIX] else ""
# Add subgroup header
sectionIndexRST += textwrap.dedent(
f"""
{ref}
{"-" * len(groupDict[TITLE])}
{groupDict[TITLE]}
{"-" * len(groupDict[TITLE])}
"""
)
for name in groupDict[SCRIPTS]:
if name in self.scriptDocs:
sectionIndexRST += f" - :ref:`{name}<{name}>`\n"
writeLinesToFile(os.path.join(self.config.docsPath, sectionDict[RST_PATH]), sectionIndexRST)
def createAllScriptsDocsAndWriteToRST(self):
"""Get all scripts and write it to RST file."""
# Use `:orphan:` in case you do not need a reference to this document in doctree
sectionIndexRST = textwrap.dedent(
f"""
:orphan:
.. this page automatically is created in {__name__}
.. _cmd:
Command Reference
In this subsection all commands are collected:
"""
)
futures = []
# Call all scripts help
with ThreadPoolExecutor() as pool:
for script in self.config.allScripts:
futures.append(pool.submit(self.createScriptDoc, script))
systems = []
# Collect all scripts help messages
for future in futures:
script = future.result()
if script:
self.scriptDocs[script.name] = script
script.system not in systems and systems.append(script.system)
# Write all commands in one RST for each system
for system in sorted(systems):
# Write system head
sectionIndexRST += textwrap.dedent(
f"""
.. _{system}_cmd:
{"=" * len(system)}
{system}
{"=" * len(system)}
"""
)
# Write each system command description
for script in sorted(self.scriptDocs):
if self.scriptDocs[script].system == system:
sectionIndexRST += self.scriptDocs[script].description
writeLinesToFile(os.path.join(self.config.docsPath, self.config.com_rst_path), sectionIndexRST)
def createScriptDoc(self, script: str):
"""Create script description.
Folders and indices already exist, just call the scripts and get the help messages. Format the help message.
:return: Script -- system name, script name, parsed help message
"""
executor = "bash"
scriptName = os.path.basename(script)
systemName = script.split("/")[-3].replace("System", "")
if scriptName.endswith(".py"):
executor = "python"
scriptName = scriptName.replace("_", "-")[:-3]
if scriptName in self.config.com_ignore_commands:
return
LOG.info("Creating Doc for %r", scriptName)
helpMessage = runCommand("%s %s -h" % (executor, script))
if not helpMessage:
LOG.warning("NO DOC for %s", scriptName)
helpMessage = "Oops, we couldn't generate a description for this command."
# Script reference
fileContent = textwrap.dedent(
f"""
.. _{scriptName}:
{'-' * len(scriptName)}
{scriptName}
{'-' * len(scriptName)}
"""
)
# Script description payload
rstLines = []
genOptions = False
lineIndented = False
for line in helpMessage.splitlines():
line = line.rstrip()
newLine = "\n" + ":".join(line.rsplit("::", 1)) + ":\n" if line.endswith(":") else line
# ensure dedented lines are separated by newline from previous block
if lineIndented and not newLine.startswith(" "):
newLine = "\n" + newLine
rstLines.append(newLine)
lineIndented = newLine.startswith(" ")
fileContent += "\n\n" + "\n".join(rstLines).strip() + "\n"
# remove the standalone '-' when no short option exists
fileContent = fileContent.replace("- --", " --")
return Script(scriptName, systemName, fileContent)
def cleanDoc(self):
"""Remove the code output files."""
LOG.info("Removing existing commands documentation")
for fPath in [self.config.com_rst_path] + [self.config.scripts[p][RST_PATH] for p in self.config.scripts]:
path = os.path.join(self.config.docsPath, fPath)
if os.path.basename(path) == "index.rst":
path = os.path.dirname(path)
LOG.info("Removing: %r", path)
if os.path.exists(path):
shutil.rmtree(path)
LOG.info("Done")
def run(configFile="docs.conf", logLevel=logging.INFO, debug=False, clean=False):
"""Create the rst files for dirac commands, parsed form the --help message.
:param str configFile: path to the configFile
:param logLevel: logging level to use
:param bool debug: if true even more debug information is printed
:param bool clean: Remove rst files and exit
:returns: return value 1 or 0
"""
logging.getLogger().setLevel(logLevel)
commands = CommandReference(configFile=configFile, debug=debug)
# Clean the generated files
if clean:
return commands.cleanDoc()
# Create a file with a description of all commands
commands.createAllScriptsDocsAndWriteToRST()
# Create dictionaries for the individual dresses described in the configuration
for section in commands.config.scripts:
sectionDict = commands.config.scripts[section]
commands.createSectionAndIndex(sectionDict)
LOG.info("Done")
return commands.exitcode
if __name__ == "__main__":
exit(run())
|
DIRACGrid/DIRAC
|
docs/diracdoctools/cmd/commandReference.py
|
Python
|
gpl-3.0
| 8,645
|
[
"DIRAC"
] |
9db3cfee27def5e8070afea3155a08eb5f832b880b4b9b7a4e5210c3e666a3b9
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 9 19:13:58 2019
@author: lyh
"""
import numpy as np
from oceansar import constants as const
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 7 17:09:40 2019
Create the directional swell spectrum by both generating the spread
and the directional distribution based on Gaussian swell spectrum
:param wave_dir: the dirction of the swell
:param sigf: spread in frequency
:param freq_r: related to the wavelength of swell (lmd = g / (2pi*f^2))
:param sigs: spread in direction
:param Hs: significant wave height (Hs = 4std)
"""
def ardhuin_swell_spec(k_axis, theta_axis, dir_swell_dir,
freq_r=0.068, sigf=0.007, sigs=8, Hs=1.45):
# the swell spectrum is given in frequency domain
f_k = 1 / 2 / np.pi * np.sqrt(const.g * k_axis)
# frequency spectrum (gaussian)
fac_f_k = 1 / 4 / np.pi * np.sqrt(const.g / k_axis)
amp = (Hs / 4) ** 2 / sigf / np.sqrt(2 * np.pi)
efs = (amp * np.exp(-(f_k - freq_r) ** 2 / (2 * sigf ** 2)) + 1E-5) * fac_f_k
# directional distribution
ang = np.angle(np.exp(1j * (theta_axis - np.radians(dir_swell_dir))))
dirss = np.exp(-ang ** 2 / (2 * np.radians(sigs) ** 2))
# normalization: the integration of all the theta for each k is 1
factor = np.sqrt(2 * np.pi) * np.radians(sigs)
# final 2D spectrum
swell_spec = efs * dirss / factor
return swell_spec
|
pakodekker/oceansar
|
oceansar/swell_spec/dir_swell_spec.py
|
Python
|
gpl-3.0
| 1,471
|
[
"Gaussian"
] |
49154b1663ff32738ff7b1d35be992cde633888446623d3f67ebce9017cebdbc
|
#!/usr/bin/env python
# Copyright (C) 2014 Swift Navigation Inc.
# Contact: Colin Beighley <colin@swift-nav.com>
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
from urllib2 import URLError
from json import load as jsonload
from time import sleep
from intelhex import IntelHex, HexRecordError, HexReaderError
from pkg_resources import parse_version
from sbp.bootload import SBP_MSG_BOOTLOADER_JUMP_TO_APP
from sbp.piksi import SBP_MSG_RESET
from threading import Thread
from traits.api import HasTraits, Event, String, Button, Instance, Int, Bool, \
on_trait_change
from traitsui.api import View, Handler, Action, Item, TextEditor, VGroup, \
UItem, InstanceEditor, VSplit, HSplit, HGroup, \
BooleanEditor
from pyface.api import GUI, FileDialog, OK, ProgressDialog
from piksi_tools.version import VERSION as CONSOLE_VERSION
from piksi_tools import bootload
from piksi_tools import flash
import callback_prompt as prompt
from update_downloader import UpdateDownloader
from output_stream import OutputStream
import sys, os
from pyface.image_resource import ImageResource
if getattr(sys, 'frozen', False):
# we are running in a |PyInstaller| bundle
basedir = sys._MEIPASS
os.chdir(basedir)
else:
# we are running in a normal Python environment
basedir = os.path.dirname(__file__)
icon = ImageResource('icon',
search_path=['images', os.path.join(basedir, 'images')])
INDEX_URL = 'http://downloads.swiftnav.com/index.json'
class IntelHexFileDialog(HasTraits):
file_wildcard = String("Intel HEX File (*.hex)|*.hex|All files|*")
status = String('Please choose a file')
choose_fw = Button(label='Choose Firmware File')
view = View(
UItem('status'),
UItem('choose_fw')
)
def __init__(self, flash_type):
"""
Pop-up file dialog to choose an IntelHex file, with status and button to
display in traitsui window.
Parameters
----------
flash_type : string
Which Piksi flash to interact with ("M25" or "STM").
"""
if not flash_type=='M25' and not flash_type=='STM':
raise ValueError("flash_type must be 'M25' or 'STM'")
self._flash_type = flash_type
self.ihx = None
def clear(self, status):
"""
Set text of status box and clear IntelHex file.
Parameters
----------
status : string
Error text to replace status box text with.
"""
self.ihx = None
self.status = status
def load_ihx(self, filepath):
"""
Load IntelHex file and set status to indicate if file was
successfully loaded.
Parameters
----------
filepath : string
Path to IntelHex file.
"""
try:
self.ihx = IntelHex(filepath)
self.status = os.path.split(filepath)[1]
except HexRecordError:
self.clear('Error: File is not a valid Intel HEX File')
# Check that address ranges are valid for self._flash_type.
ihx_addrs = flash.ihx_ranges(self.ihx)
if self._flash_type == "M25":
try:
sectors = flash.sectors_used(ihx_addrs, flash.m25_addr_sector_map)
except IndexError:
self.clear('Error: HEX File contains restricted address ' + \
'(STM Firmware File Chosen?)')
elif self._flash_type == "STM":
try:
sectors = flash.sectors_used(ihx_addrs, flash.stm_addr_sector_map)
except:
self.clear('Error: HEX File contains restricted address ' + \
'(NAP Firmware File Chosen?)')
def _choose_fw_fired(self):
""" Activate file dialog window to choose IntelHex firmware file. """
dialog = FileDialog(label='Choose Firmware File',
action='open', wildcard=self.file_wildcard)
dialog.open()
if dialog.return_code == OK:
filepath = os.path.join(dialog.directory, dialog.filename)
self.load_ihx(filepath)
else:
self.clear('Error while selecting file')
class PulsableProgressDialog(ProgressDialog):
def __init__(self, max, pulsed=False):
"""
Pop-up window for showing a process's progress.
Parameters
----------
max : int
Maximum value of the progress bar.
pulsed : bool
Show non-partial progress initially.
"""
super(PulsableProgressDialog, self).__init__()
self.min = 0
self.max = 0
self.pulsed = pulsed
self.passed_max = max
def progress(self, count):
"""
Update progress of progress bar. If pulsing initially, wait until count
is at least 12 before changing to discrete progress bar.
Parameters
----------
count : int
Current value of progress.
"""
# Provide user feedback initially via pulse for slow sector erases.
if self.pulsed:
if count > 12:
self.max = 100
GUI.invoke_later(self.update, int(100*float(count)/self.passed_max))
else:
self.max = 100
GUI.invoke_later(self.update, int(100*float(count)/self.passed_max))
def close(self):
""" Close progress bar window. """
GUI.invoke_after(0.1, super(PulsableProgressDialog, self).close)
sleep(0.2)
class UpdateView(HasTraits):
piksi_stm_vers = String('Waiting for Piksi to send settings...')
newest_stm_vers = String('Downloading Newest Firmware info...')
piksi_nap_vers = String('Waiting for Piksi to send settings...')
newest_nap_vers = String('Downloading Newest Firmware info...')
local_console_vers = String(CONSOLE_VERSION)
newest_console_vers = String('Downloading Newest Console info...')
erase_stm = Bool(True)
erase_en = Bool(True)
update_firmware = Button(label='Update Piksi Firmware')
updating = Bool(False)
update_en = Bool(False)
download_firmware = Button(label='Download Newest Firmware Files')
downloading = Bool(False)
download_fw_en = Bool(True)
stm_fw = Instance(IntelHexFileDialog)
nap_fw = Instance(IntelHexFileDialog)
stream = Instance(OutputStream)
view = View(
VGroup(
HGroup(
VGroup(
Item('piksi_stm_vers', label='Piksi STM Firmware Version'),
Item('newest_stm_vers', label='Newest STM Firmware Version'),
Item('piksi_nap_vers', label='Piksi NAP Firmware Version'),
Item('newest_nap_vers', label='Newest NAP Firmware Version'),
Item('local_console_vers', label='Local Piksi Console Version'),
Item('newest_console_vers', label='Newest Piksi Console Version'),
),
VGroup(
Item('stm_fw', style='custom', label='STM Firmware File', \
enabled_when='download_fw_en'),
Item('nap_fw', style='custom', label='NAP Firmware File', \
enabled_when='download_fw_en'),
Item('erase_stm', label='Erase STM flash (recommended)', \
enabled_when='erase_en'),
),
),
UItem('download_firmware', enabled_when='download_fw_en'),
UItem('update_firmware', enabled_when='update_en'),
Item(
'stream',
style='custom',
editor=InstanceEditor(),
label='Update Status',
),
)
)
def __init__(self, link, prompt=True):
"""
Traits tab with UI for updating Piksi firmware.
Parameters
----------
link : sbp.client.handler.Handler
Link for SBP transfer to/from Piksi.
prompt : bool
Prompt user to update console/firmware if out of date.
"""
self.link = link
self.settings = {}
self.prompt = prompt
self.python_console_cmds = {
'update': self
}
self.update_dl = None
self.stm_fw = IntelHexFileDialog('STM')
self.stm_fw.on_trait_change(self._manage_enables, 'status')
self.nap_fw = IntelHexFileDialog('M25')
self.nap_fw.on_trait_change(self._manage_enables, 'status')
self.stream = OutputStream()
self.get_latest_version_info()
def _manage_enables(self):
""" Manages whether traits widgets are enabled in the UI or not. """
if self.updating == True or self.downloading == True:
self.update_en = False
self.download_fw_en = False
else:
self.download_fw_en = True
if self.stm_fw.ihx != None and self.nap_fw.ihx != None:
self.update_en = True
else:
self.update_en = False
if self.updating == True:
self.erase_en = False
else:
self.erase_en = True
def _updating_changed(self):
""" Handles self.updating trait being changed. """
self._manage_enables()
def _downloading_changed(self):
""" Handles self.downloading trait being changed. """
self._manage_enables()
def _write(self, text):
"""
Stream style write function. Allows flashing debugging messages to be
routed to embedded text console.
Parameters
----------
text : string
Text to be written to screen.
"""
self.stream.write(text)
self.stream.write('\n')
self.stream.flush()
def _update_firmware_fired(self):
"""
Handle update_firmware button. Starts thread so as not to block the GUI
thread.
"""
try:
if self._firmware_update_thread.is_alive():
return
except AttributeError:
pass
self._firmware_update_thread = Thread(target=self.manage_firmware_updates)
self._firmware_update_thread.start()
def _download_firmware(self):
""" Download latest firmware from swiftnav.com. """
self._write('')
# Check that we received the index file from the website.
if self.update_dl == None:
self._write("Error: Can't download firmware files")
return
self.downloading = True
status = 'Downloading Newest Firmware...'
self.nap_fw.clear(status)
self.stm_fw.clear(status)
self._write(status)
# Get firmware files from Swift Nav's website, save to disk, and load.
try:
self._write('Downloading Newest NAP firmware')
filepath = self.update_dl.download_nap_firmware()
self._write('Saved file to %s' % filepath)
self.nap_fw.load_ihx(filepath)
except AttributeError:
self.nap_fw.clear("Error downloading firmware")
self._write("Error downloading firmware: index file not downloaded yet")
except KeyError:
self.nap_fw.clear("Error downloading firmware")
self._write("Error downloading firmware: URL not present in index")
except URLError:
self.nap_fw.clear("Error downloading firmware")
self._write("Error: Failed to download latest NAP firmware from Swift Navigation's website")
try:
self._write('Downloading Newest STM firmware')
filepath = self.update_dl.download_stm_firmware()
self._write('Saved file to %s' % filepath)
self.stm_fw.load_ihx(filepath)
except AttributeError:
self.stm_fw.clear("Error downloading firmware")
self._write("Error downloading firmware: index file not downloaded yet")
except KeyError:
self.stm_fw.clear("Error downloading firmware")
self._write("Error downloading firmware: URL not present in index")
except URLError:
self.stm_fw.clear("Error downloading firmware")
self._write("Error: Failed to download latest STM firmware from Swift Navigation's website")
self.downloading = False
def _download_firmware_fired(self):
"""
Handle download_firmware button. Starts thread so as not to block the GUI
thread.
"""
try:
if self._download_firmware_thread.is_alive():
return
except AttributeError:
pass
self._download_firmware_thread = Thread(target=self._download_firmware)
self._download_firmware_thread.start()
def compare_versions(self):
"""
To be called after latest Piksi firmware info has been received from
device, to decide if current firmware on Piksi is out of date. Starts a
thread so as not to block GUI thread.
"""
try:
if self._compare_versions_thread.is_alive():
return
except AttributeError:
pass
self._compare_versions_thread = Thread(target=self._compare_versions)
self._compare_versions_thread.start()
def _compare_versions(self):
"""
Compares version info between received firmware version / current console
and firmware / console info from website to decide if current firmware or
console is out of date. Prompt user to update if so.
"""
# Check that settings received from Piksi contain FW versions.
try:
self.piksi_stm_vers = \
self.settings['system_info']['firmware_version'].value
self.piksi_nap_vers = \
self.settings['system_info']['nap_version'].value
except KeyError:
self._write("\nError: Settings received from Piksi don't contain firmware version keys. Please contact Swift Navigation.\n")
return
# Check that we received the index file from the website.
if self.update_dl == None:
self._write("Error: No website index to use to compare versions with local firmware")
return
# Check if console is out of date and notify user if so.
if self.prompt:
local_console_version = parse_version(CONSOLE_VERSION)
remote_console_version = parse_version(self.newest_console_vers)
self.console_outdated = remote_console_version > local_console_version
if self.console_outdated:
console_outdated_prompt = \
prompt.CallbackPrompt(
title="Piksi Console Outdated",
actions=[prompt.close_button],
)
console_outdated_prompt.text = \
"Your Piksi Console is out of date and may be incompatible\n" + \
"with current firmware. We highly recommend upgrading to\n" + \
"ensure proper behavior.\n\n" + \
"Please visit http://downloads.swiftnav.com to\n" + \
"download the newest version.\n\n" + \
"Local Console Version :\n\t" + \
CONSOLE_VERSION + \
"\nNewest Console Version :\n\t" + \
self.update_dl.index['piksi_v2.3.1']['console']['version'] + "\n"
console_outdated_prompt.run()
# For timing aesthetics between windows popping up.
sleep(0.5)
# Check if firmware is out of date and notify user if so.
if self.prompt:
local_stm_version = parse_version(
self.settings['system_info']['firmware_version'].value)
remote_stm_version = parse_version(self.newest_stm_vers)
local_nap_version = parse_version(
self.settings['system_info']['nap_version'].value)
remote_nap_version = parse_version(self.newest_nap_vers)
self.fw_outdated = remote_nap_version > local_nap_version or \
remote_stm_version > local_stm_version
if self.fw_outdated:
fw_update_prompt = \
prompt.CallbackPrompt(
title='Firmware Update',
actions=[prompt.close_button]
)
fw_update_prompt.text = \
"New Piksi firmware available.\n\n" + \
"Please use the Firmware Update tab to update.\n\n" + \
"Newest STM Version :\n\t%s\n\n" % \
self.update_dl.index['piksi_v2.3.1']['stm_fw']['version'] + \
"Newest SwiftNAP Version :\n\t%s\n\n" % \
self.update_dl.index['piksi_v2.3.1']['nap_fw']['version']
fw_update_prompt.run()
def get_latest_version_info(self):
"""
Get latest firmware / console version from website. Starts thread so as not
to block the GUI thread.
"""
try:
if self._get_latest_version_info_thread.is_alive():
return
except AttributeError:
pass
self._get_latest_version_info_thread = Thread(target=self._get_latest_version_info)
self._get_latest_version_info_thread.start()
def _get_latest_version_info(self):
""" Get latest firmware / console version from website. """
try:
self.update_dl = UpdateDownloader()
except URLError:
self._write("\nError: Failed to download latest file index from Swift Navigation's website. Please visit our website to check that you're running the latest Piksi firmware and Piksi console.\n")
return
# Make sure index contains all keys we are interested in.
try:
self.newest_stm_vers = self.update_dl.index['piksi_v2.3.1']['stm_fw']['version']
self.newest_nap_vers = self.update_dl.index['piksi_v2.3.1']['nap_fw']['version']
self.newest_console_vers = self.update_dl.index['piksi_v2.3.1']['console']['version']
except KeyError:
self._write("\nError: Index downloaded from Swift Navigation's website (%s) doesn't contain all keys. Please contact Swift Navigation.\n" % INDEX_URL)
return
# Executed in GUI thread, called from Handler.
def manage_firmware_updates(self):
"""
Update Piksi firmware. Erase entire STM flash (other than bootloader)
if so directed. Flash NAP only if new firmware is available.
"""
self.updating = True
self._write('')
# Erase all of STM's flash (other than bootloader) if box is checked.
if self.erase_stm:
text = "Erasing STM"
self._write(text)
self.create_flash("STM")
sectors_to_erase = set(range(self.pk_flash.n_sectors)).difference(set(self.pk_flash.restricted_sectors))
progress_dialog = PulsableProgressDialog(len(sectors_to_erase), False)
progress_dialog.title = text
GUI.invoke_later(progress_dialog.open)
erase_count = 0
for s in sorted(sectors_to_erase):
progress_dialog.progress(erase_count)
self._write('Erasing %s sector %d' % (self.pk_flash.flash_type,s))
self.pk_flash.erase_sector(s)
erase_count += 1
self.stop_flash()
self._write("")
progress_dialog.close()
# Flash STM.
text = "Updating STM"
self._write(text)
self.create_flash("STM")
stm_n_ops = self.pk_flash.ihx_n_ops(self.stm_fw.ihx, \
erase = not self.erase_stm)
progress_dialog = PulsableProgressDialog(stm_n_ops, True)
progress_dialog.title = text
GUI.invoke_later(progress_dialog.open)
# Don't erase sectors if we've already done so above.
self.pk_flash.write_ihx(self.stm_fw.ihx, self.stream, mod_print=0x40, \
elapsed_ops_cb = progress_dialog.progress, \
erase = not self.erase_stm)
self.stop_flash()
self._write("")
progress_dialog.close()
# Flash NAP if out of date.
try:
local_nap_version = parse_version(
self.settings['system_info']['nap_version'].value)
remote_nap_version = parse_version(self.newest_nap_vers)
nap_out_of_date = local_nap_version != remote_nap_version
except KeyError:
nap_out_of_date = True
if nap_out_of_date:
text = "Updating NAP"
self._write(text)
self.create_flash("M25")
nap_n_ops = self.pk_flash.ihx_n_ops(self.nap_fw.ihx)
progress_dialog = PulsableProgressDialog(nap_n_ops, True)
progress_dialog.title = text
GUI.invoke_later(progress_dialog.open)
self.pk_flash.write_ihx(self.nap_fw.ihx, self.stream, mod_print=0x40, \
elapsed_ops_cb = progress_dialog.progress)
self.stop_flash()
self._write("")
progress_dialog.close()
# Must tell Piksi to jump to application after updating firmware.
self.link.send(SBP_MSG_BOOTLOADER_JUMP_TO_APP, '\x00')
self._write("Firmware updates finished.")
self._write("")
self.updating = False
def create_flash(self, flash_type):
"""
Create flash.Flash instance and set Piksi into bootloader mode, prompting
user to reset if necessary.
Parameter
---------
flash_type : string
Either "STM" or "M25".
"""
# Reset device if the application is running to put into bootloader mode.
self.link.send(SBP_MSG_RESET, '')
self.pk_boot = bootload.Bootloader(self.link)
self._write("Waiting for bootloader handshake message from Piksi ...")
reset_prompt = None
handshake_received = self.pk_boot.handshake(1)
# Prompt user to reset Piksi if we don't receive the handshake message
# within a reasonable amount of tiime (firmware might be corrupted).
while not handshake_received:
reset_prompt = \
prompt.CallbackPrompt(
title="Please Reset Piksi",
actions=[prompt.close_button],
)
reset_prompt.text = \
"You must press the reset button on your Piksi in order\n" + \
"to update your firmware.\n\n" + \
"Please press it now.\n\n"
reset_prompt.run(block=False)
while not reset_prompt.closed and not handshake_received:
handshake_received = self.pk_boot.handshake(1)
reset_prompt.kill()
reset_prompt.wait()
self._write("received bootloader handshake message.")
self._write("Piksi Onboard Bootloader Version: " + self.pk_boot.version)
self.pk_flash = flash.Flash(self.link, flash_type, self.pk_boot.sbp_version)
def stop_flash(self):
"""
Stop Flash and Bootloader instances (removes callback from SerialLink).
"""
self.pk_flash.stop()
self.pk_boot.stop()
|
henryhallam/piksi_tools
|
piksi_tools/console/update_view.py
|
Python
|
lgpl-3.0
| 22,270
|
[
"VisIt"
] |
e6450cb3e3f246a96bc35f620e9924a15573ef506a751a94a7b2b447ec2fb9d4
|
"""
This file is part of Giswater 3
The program is free software: you can redistribute it and/or modify it under the terms of the GNU
General Public License as published by the Free Software Foundation, either version 3 of the License,
or (at your option) any later version.
"""
# -*- coding: utf-8 -*-
import csv
import json
import os
import operator
import re
import sys
from collections import OrderedDict
from functools import partial
from sip import isdeleted
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtGui import QDoubleValidator, QIntValidator, QKeySequence, QColor
from qgis.PyQt.QtSql import QSqlQueryModel, QSqlTableModel
from qgis.PyQt.QtWidgets import QAbstractItemView, QAction, QCheckBox, QComboBox, QDateEdit, QLabel, \
QLineEdit, QTableView, QWidget, QDoubleSpinBox, QTextEdit, QPushButton, QGridLayout
from qgis.core import QgsLayoutExporter, QgsProject, QgsRectangle, QgsPointXY, QgsGeometry
from qgis.gui import QgsMapToolEmitPoint
from .document import GwDocument, global_vars
from ..shared.psector_duplicate import GwPsectorDuplicate
from ..ui.ui_manager import GwPsectorUi, GwPsectorRapportUi, GwPsectorManagerUi, GwPriceManagerUi, GwReplaceArc
from ..utils import tools_gw
from ...lib import tools_db, tools_qgis, tools_qt, tools_log, tools_os
from ..utils.snap_manager import GwSnapManager
class GwPsector:
def __init__(self):
""" Class to control 'New Psector' of toolbar 'master' """
self.iface = global_vars.iface
self.canvas = global_vars.canvas
self.schema_name = global_vars.schema_name
self.rubber_band = tools_gw.create_rubberband(self.canvas)
self.emit_point = None
self.vertex_marker = None
self.dict_to_update = {}
self.my_json = {}
def get_psector(self, psector_id=None, list_coord=None):
""" Buttons 45 and 81: New psector """
row = tools_gw.get_config_value(parameter='admin_currency', columns='value::text', table='config_param_system')
if row:
self.sys_currency = json.loads(row[0], object_pairs_hook=OrderedDict)
# Create the dialog and signals
self.dlg_plan_psector = GwPsectorUi()
tools_gw.load_settings(self.dlg_plan_psector)
# Capture the current layer to return it at the end of the operation
cur_active_layer = self.iface.activeLayer()
widget_list = self.dlg_plan_psector.findChildren(QTableView)
for widget in widget_list:
tools_qt.set_tableview_config(widget)
self.project_type = tools_gw.get_project_type()
# Get layers of every feature_type
self.list_elemets = {}
self.dict_to_update = {}
# Get layers of every feature_type
# Setting lists
self.ids = []
self.list_ids = {}
self.list_ids['arc'] = []
self.list_ids['node'] = []
self.list_ids['connec'] = []
self.list_ids['gully'] = []
self.list_ids['element'] = []
# Setting layers
self.layers = {}
self.layers['gully'] = []
self.layers['element'] = []
self.layers['arc'] = tools_gw.get_layers_from_feature_type('arc')
self.layers['node'] = tools_gw.get_layers_from_feature_type('node')
self.layers['connec'] = tools_gw.get_layers_from_feature_type('connec')
if self.project_type.upper() == 'UD':
self.layers['gully'] = tools_gw.get_layers_from_feature_type('gully')
else:
tools_qt.remove_tab(self.dlg_plan_psector.tab_feature, 'tab_gully')
self.update = False # if false: insert; if true: update
self.feature_type = "arc"
self.all_layers_checked = self._check_for_layers()
if self.all_layers_checked:
tools_qt.set_checked(self.dlg_plan_psector, self.dlg_plan_psector.chk_enable_all, True)
# Remove all previous selections
self.layers = tools_gw.remove_selection(True, layers=self.layers)
# Set icons
tools_gw.add_icon(self.dlg_plan_psector.btn_insert, "111", sub_folder="24x24")
tools_gw.add_icon(self.dlg_plan_psector.btn_delete, "112", sub_folder="24x24")
tools_gw.add_icon(self.dlg_plan_psector.btn_snapping, "137")
tools_gw.add_icon(self.dlg_plan_psector.btn_select_arc, "310", sub_folder="24x24")
tools_gw.add_icon(self.dlg_plan_psector.btn_set_to_arc, "209")
tools_gw.add_icon(self.dlg_plan_psector.btn_doc_insert, "111", sub_folder="24x24")
tools_gw.add_icon(self.dlg_plan_psector.btn_doc_delete, "112", sub_folder="24x24")
tools_gw.add_icon(self.dlg_plan_psector.btn_doc_new, "34", sub_folder="24x24")
tools_gw.add_icon(self.dlg_plan_psector.btn_open_doc, "170")
table_object = "psector"
# tab General elements
self.psector_id = self.dlg_plan_psector.findChild(QLineEdit, "psector_id")
self.ext_code = self.dlg_plan_psector.findChild(QLineEdit, "ext_code")
self.cmb_psector_type = self.dlg_plan_psector.findChild(QComboBox, "psector_type")
self.cmb_expl_id = self.dlg_plan_psector.findChild(QComboBox, "expl_id")
self.cmb_status = self.dlg_plan_psector.findChild(QComboBox, "status")
self.workcat_id = self.dlg_plan_psector.findChild(QComboBox, "workcat_id")
self.parent_id = self.dlg_plan_psector.findChild(QLineEdit, "parent_id")
scale = self.dlg_plan_psector.findChild(QLineEdit, "scale")
scale.setValidator(QDoubleValidator())
rotation = self.dlg_plan_psector.findChild(QLineEdit, "rotation")
rotation.setValidator(QDoubleValidator())
atlas_id = self.dlg_plan_psector.findChild(QLineEdit, "atlas_id")
atlas_id.setValidator(QIntValidator())
num_value = self.dlg_plan_psector.findChild(QLineEdit, "num_value")
num_value.setValidator(QIntValidator())
where = " WHERE typevalue = 'psector_type' "
self.populate_combos(self.dlg_plan_psector.psector_type, 'idval', 'id', 'plan_typevalue', where)
# Manage other_price tab variables
self.price_loaded = False
self.header_exist = None
self.load_signals = False
# Populate combo status
sql = "SELECT id, idval FROM plan_typevalue WHERE typevalue = 'value_priority'"
rows = tools_db.get_rows(sql)
tools_qt.fill_combo_values(self.dlg_plan_psector.priority, rows, 1)
# Populate combo expl_id
sql = ("SELECT expl_id, name from exploitation "
" JOIN selector_expl USING (expl_id) "
" WHERE exploitation.expl_id != 0 and cur_user = current_user")
rows = tools_db.get_rows(sql)
tools_qt.fill_combo_values(self.cmb_expl_id, rows, 1)
# Populate combo workcat_id
sql = "SELECT id as id, id as idval FROM cat_work"
rows = tools_db.get_rows(sql)
tools_qt.fill_combo_values(self.dlg_plan_psector.workcat_id, rows, add_empty=True)
# Populate combo status
sql = "SELECT id, idval FROM plan_typevalue WHERE typevalue = 'psector_status'"
rows = tools_db.get_rows(sql)
tools_qt.fill_combo_values(self.cmb_status, rows, 1)
# tab Bugdet
gexpenses = self.dlg_plan_psector.findChild(QLineEdit, "gexpenses")
tools_qt.double_validator(gexpenses)
vat = self.dlg_plan_psector.findChild(QLineEdit, "vat")
tools_qt.double_validator(vat)
other = self.dlg_plan_psector.findChild(QLineEdit, "other")
tools_qt.double_validator(other)
self.set_tabs_enabled(False)
self.enable_buttons(False)
# Tables
# tab Elements
self.qtbl_arc = self.dlg_plan_psector.findChild(QTableView, "tbl_psector_x_arc")
self.qtbl_arc.setSelectionBehavior(QAbstractItemView.SelectRows)
self.qtbl_node = self.dlg_plan_psector.findChild(QTableView, "tbl_psector_x_node")
self.qtbl_node.setSelectionBehavior(QAbstractItemView.SelectRows)
self.qtbl_connec = self.dlg_plan_psector.findChild(QTableView, "tbl_psector_x_connec")
self.qtbl_connec.setSelectionBehavior(QAbstractItemView.SelectRows)
if self.project_type.upper() == 'UD':
self.qtbl_gully = self.dlg_plan_psector.findChild(QTableView, "tbl_psector_x_gully")
self.qtbl_gully.setSelectionBehavior(QAbstractItemView.SelectRows)
all_rows = self.dlg_plan_psector.findChild(QTableView, "all_rows")
all_rows.setSelectionBehavior(QAbstractItemView.SelectRows)
all_rows.horizontalHeader().setSectionResizeMode(3)
# if a row is selected from mg_psector_mangement(button 46 or button 81)
# if psector_id contains "1" or "0" python takes it as boolean, if it is True, it means that it does not
# contain a value and therefore it is a new one. We convert that value to 0 since no id will be 0 in this way
# if psector_id has a value other than 0, it is that the sector already exists and we want to do an update.
if isinstance(psector_id, bool):
psector_id = 0
self.delete_psector_selector('selector_plan_psector')
# tab 'Document'
self.doc_id = self.dlg_plan_psector.findChild(QLineEdit, "doc_id")
self.tbl_document = self.dlg_plan_psector.findChild(QTableView, "tbl_document")
if psector_id is not None:
self.set_tabs_enabled(True)
self.enable_buttons(True)
self.dlg_plan_psector.name.setEnabled(True)
self.fill_table(self.dlg_plan_psector, self.qtbl_arc, "plan_psector_x_arc",
set_edit_triggers=QTableView.DoubleClicked)
tools_gw.set_tablemodel_config(self.dlg_plan_psector, self.qtbl_arc, "plan_psector_x_arc")
self.fill_table(self.dlg_plan_psector, self.qtbl_node, "plan_psector_x_node",
set_edit_triggers=QTableView.DoubleClicked)
tools_gw.set_tablemodel_config(self.dlg_plan_psector, self.qtbl_node, "plan_psector_x_node")
self.fill_table(self.dlg_plan_psector, self.qtbl_connec, "plan_psector_x_connec",
set_edit_triggers=QTableView.DoubleClicked)
tools_gw.set_tablemodel_config(self.dlg_plan_psector, self.qtbl_connec, "plan_psector_x_connec")
if self.project_type.upper() == 'UD':
self.fill_table(self.dlg_plan_psector, self.qtbl_gully, "plan_psector_x_gully",
set_edit_triggers=QTableView.DoubleClicked)
tools_gw.set_tablemodel_config(self.dlg_plan_psector, self.qtbl_gully, "plan_psector_x_gully")
sql = (f"SELECT psector_id, name, psector_type, expl_id, priority, descript, text1, text2, "
f"text3, text4, text5, text6, num_value, observ, atlas_id, scale, rotation, active, ext_code, status, workcat_id, parent_id"
f" FROM plan_psector "
f"WHERE psector_id = {psector_id}")
row = tools_db.get_row(sql)
if not row:
return
self.dlg_plan_psector.setWindowTitle(f"Plan psector - {row['name']} ({row['psector_id']})")
self.psector_id.setText(str(row['psector_id']))
if str(row['ext_code']) != 'None':
self.ext_code.setText(str(row['ext_code']))
sql = (f"SELECT id, idval FROM plan_typevalue WHERE typevalue = 'psector_type' AND "
f"id = '{row['psector_type']}'")
result = tools_db.get_row(sql)
tools_qt.set_combo_value(self.cmb_psector_type, str(result['idval']), 1)
sql = (f"SELECT name FROM exploitation "
f"WHERE expl_id = {row['expl_id']}")
result = tools_db.get_row(sql)
tools_qt.set_combo_value(self.cmb_expl_id, str(result['name']), 1)
tools_qt.set_combo_value(self.cmb_status, str(row['status']), 0)
# Check if expl_id already exists in expl_selector
sql = ("SELECT DISTINCT(expl_id, cur_user)"
" FROM selector_expl"
f" WHERE expl_id = '{row['expl_id']}' AND cur_user = current_user")
exist = tools_db.get_row(sql)
if exist is None:
sql = ("INSERT INTO selector_expl (expl_id, cur_user) "
f" VALUES ({str(row['expl_id'])}, current_user)"
f" ON CONFLICT DO NOTHING;")
tools_db.execute_sql(sql)
msg = "Your exploitation selector has been updated"
tools_qgis.show_warning(msg, 1)
workcat_id = row['workcat_id']
tools_qt.set_combo_value(self.workcat_id, workcat_id, 0)
tools_qt.set_checked(self.dlg_plan_psector, "active", row['active'])
self.fill_widget(self.dlg_plan_psector, "name", row)
self.fill_widget(self.dlg_plan_psector, "descript", row)
tools_qt.set_combo_value(self.dlg_plan_psector.priority, str(row["priority"]), 0)
self.fill_widget(self.dlg_plan_psector, "text1", row)
self.fill_widget(self.dlg_plan_psector, "text2", row)
self.fill_widget(self.dlg_plan_psector, "text3", row)
self.fill_widget(self.dlg_plan_psector, "text4", row)
self.fill_widget(self.dlg_plan_psector, "text5", row)
self.fill_widget(self.dlg_plan_psector, "text6", row)
self.fill_widget(self.dlg_plan_psector, "num_value", row)
self.fill_widget(self.dlg_plan_psector, "observ", row)
self.fill_widget(self.dlg_plan_psector, "atlas_id", row)
self.fill_widget(self.dlg_plan_psector, "scale", row)
self.fill_widget(self.dlg_plan_psector, "rotation", row)
self.fill_widget(self.dlg_plan_psector, "parent_id", row)
# Fill tables tbl_arc_plan, tbl_node_plan, tbl_v_plan/om_other_x_psector with selected filter
expr = " psector_id = " + str(psector_id)
self.qtbl_arc.model().setFilter(expr)
self.qtbl_arc.model().select()
self.qtbl_arc.clicked.connect(
partial(tools_qgis.hilight_feature_by_id, self.qtbl_arc, "v_edit_arc", "arc_id", self.rubber_band, 5))
expr = " psector_id = " + str(psector_id)
self.qtbl_node.model().setFilter(expr)
self.qtbl_node.model().select()
self.qtbl_node.clicked.connect(partial(
tools_qgis.hilight_feature_by_id, self.qtbl_node, "v_edit_node", "node_id", self.rubber_band, 10))
expr = " psector_id = " + str(psector_id)
self.qtbl_connec.model().setFilter(expr)
self.qtbl_connec.model().select()
self.qtbl_connec.clicked.connect(partial(
tools_qgis.hilight_feature_by_id, self.qtbl_connec, "v_edit_connec", "connec_id", self.rubber_band, 10))
self.qtbl_connec.clicked.connect(partial(self._enable_set_to_arc))
if self.project_type.upper() == 'UD':
expr = " psector_id = " + str(psector_id)
self.qtbl_gully.model().setFilter(expr)
self.qtbl_gully.model().select()
self.qtbl_gully.clicked.connect(partial(
tools_qgis.hilight_feature_by_id, self.qtbl_gully, "v_edit_gully", "gully_id", self.rubber_band, 10))
self.qtbl_gully.clicked.connect(partial(self._enable_set_to_arc))
self.populate_budget(self.dlg_plan_psector, psector_id)
self.update = True
psector_id_aux = tools_qt.get_text(self.dlg_plan_psector, self.dlg_plan_psector.psector_id)
if psector_id_aux != 'null':
sql = (f"DELETE FROM selector_plan_psector "
f"WHERE cur_user = current_user")
tools_db.execute_sql(sql)
self.insert_psector_selector('selector_plan_psector', 'psector_id', psector_id_aux)
sql = (f"DELETE FROM selector_psector "
f"WHERE cur_user = current_user AND psector_id = '{psector_id_aux}'")
tools_db.execute_sql(sql)
self.insert_psector_selector('selector_psector', 'psector_id', psector_id_aux)
self.dlg_plan_psector.rejected.connect(self.rubber_band.reset)
if not list_coord:
sql = f"SELECT st_astext(st_envelope(the_geom)) FROM v_edit_plan_psector WHERE psector_id = {psector_id}"
row = tools_db.get_row(sql)
if row[0]:
list_coord = re.search('\(\((.*)\)\)', str(row[0]))
else:
msg = "Empty coordinate list"
tools_qgis.show_warning(msg)
return
# Get canvas extend in order to create a QgsRectangle
ext = self.canvas.extent()
start_point = QgsPointXY(ext.xMinimum(), ext.yMaximum())
end_point = QgsPointXY(ext.xMaximum(), ext.yMinimum())
canvas_rec = QgsRectangle(start_point, end_point)
canvas_width = ext.xMaximum() - ext.xMinimum()
canvas_height = ext.yMaximum() - ext.yMinimum()
points = tools_qgis.get_geometry_vertex(list_coord)
polygon = QgsGeometry.fromPolygonXY([points])
psector_rec = polygon.boundingBox()
tools_gw.reset_rubberband(self.rubber_band)
rb_duration = tools_gw.get_config_parser("system", "show_psector_ruberband_duration", "user", "init", prefix=False)
if rb_duration == "0": rb_duration = None
tools_qgis.draw_polygon(points, self.rubber_band, duration_time=rb_duration)
# Manage Zoom to rectangle
if not canvas_rec.intersects(psector_rec) or (psector_rec.width() < (canvas_width * 10) / 100 or psector_rec.height() < (canvas_height * 10) / 100):
max_x, max_y, min_x, min_y = tools_qgis.get_max_rectangle_from_coords(list_coord)
tools_qgis.zoom_to_rectangle(max_x, max_y, min_x, min_y, margin=50)
filter_ = "psector_id = '" + str(psector_id) + "'"
message = tools_qt.fill_table(self.tbl_document, f"v_ui_doc_x_psector", filter_)
if message:
tools_qgis.show_warning(message)
self.tbl_document.doubleClicked.connect(partial(tools_qt.document_open, self.tbl_document, 'path'))
self._connect_editing_finished()
else:
# Set psector_status vdefault
sql = "SELECT id, idval FROM plan_typevalue WHERE typevalue = 'psector_status' and id = '2'"
result = tools_db.get_row(sql)
tools_qt.set_combo_value(self.cmb_status, str(result[1]), 1)
# Set check active True as default for new pesectors
tools_qt.set_checked(self.dlg_plan_psector, "active", True)
if self.dlg_plan_psector.tab_feature.currentIndex() not in (2, 3):
self.dlg_plan_psector.btn_set_to_arc.setEnabled(False)
sql = "SELECT state_id FROM selector_state WHERE cur_user = current_user"
rows = tools_db.get_rows(sql)
self.all_states = rows
self.delete_psector_selector('selector_state')
self.insert_psector_selector('selector_state', 'state_id', '1')
# Exclude the layer v_edit_element for adding relations
self.excluded_layers = ['v_edit_element']
# Set signals
excluded_layers = ["v_edit_arc", "v_edit_node", "v_edit_connec", "v_edit_element", "v_edit_gully",
"v_edit_element"]
layers_visibility = tools_gw.get_parent_layers_visibility()
self.dlg_plan_psector.rejected.connect(partial(tools_gw.restore_parent_layers_visibility, layers_visibility))
self.dlg_plan_psector.btn_accept.clicked.connect(partial(self._manage_accept, psector_id))
self.dlg_plan_psector.btn_accept.clicked.connect(
partial(self.insert_or_update_new_psector, 'v_edit_plan_psector', True))
self.dlg_plan_psector.tabWidget.currentChanged.connect(partial(self.check_tab_position))
self.dlg_plan_psector.btn_cancel.clicked.connect(partial(self.close_psector, cur_active_layer))
if hasattr(self, 'dlg_psector_mng'):
self.dlg_plan_psector.rejected.connect(partial(self.fill_table, self.dlg_psector_mng, self.qtbl_psm, 'v_ui_plan_psector'))
self.dlg_plan_psector.rejected.connect(partial(self.close_psector, cur_active_layer))
self.dlg_plan_psector.chk_enable_all.stateChanged.connect(partial(self._enable_layers))
self.lbl_descript = self.dlg_plan_psector.findChild(QLabel, "lbl_descript")
self.dlg_plan_psector.all_rows.clicked.connect(partial(self.show_description))
self.dlg_plan_psector.btn_delete.setShortcut(QKeySequence(Qt.Key_Delete))
self.dlg_plan_psector.btn_insert.clicked.connect(
partial(tools_gw.insert_feature, self, self.dlg_plan_psector, table_object, True, True, None, None))
self.dlg_plan_psector.btn_delete.clicked.connect(
partial(tools_gw.delete_records, self, self.dlg_plan_psector, table_object, True, None, None))
self.dlg_plan_psector.btn_snapping.clicked.connect(
partial(tools_gw.selection_init, self, self.dlg_plan_psector, table_object, True))
self.dlg_plan_psector.btn_select_arc.clicked.connect(
partial(self._replace_arc))
self.dlg_plan_psector.btn_set_to_arc.clicked.connect(
partial(self._set_to_arc))
self.dlg_plan_psector.btn_rapports.clicked.connect(partial(self.open_dlg_rapports))
self.dlg_plan_psector.tab_feature.currentChanged.connect(
partial(tools_gw.get_signal_change_tab, self.dlg_plan_psector, excluded_layers))
self.dlg_plan_psector.tab_feature.currentChanged.connect(
partial(tools_qgis.disconnect_snapping, False, self.emit_point, self.vertex_marker))
self.dlg_plan_psector.tab_feature.currentChanged.connect(
partial(self._enable_arc_replace))
self.dlg_plan_psector.tab_feature.currentChanged.connect(
partial(self._enable_set_to_arc))
self.dlg_plan_psector.name.textChanged.connect(partial(self.enable_relation_tab, 'plan_psector'))
viewname = 'v_edit_plan_psector_x_other'
self.dlg_plan_psector.txt_name.textChanged.connect(
partial(self.query_like_widget_text, self.dlg_plan_psector, self.dlg_plan_psector.txt_name,
self.dlg_plan_psector.all_rows, 'v_price_compost', viewname, "id"))
self.dlg_plan_psector.gexpenses.returnPressed.connect(partial(self.calculate_percents, 'plan_psector', 'gexpenses'))
self.dlg_plan_psector.vat.returnPressed.connect(partial(self.calculate_percents, 'plan_psector', 'vat'))
self.dlg_plan_psector.other.returnPressed.connect(partial(self.calculate_percents, 'plan_psector', 'other'))
self.dlg_plan_psector.btn_doc_insert.clicked.connect(self.document_insert)
self.dlg_plan_psector.btn_doc_delete.clicked.connect(partial(tools_qt.delete_rows_tableview, self.tbl_document))
self.dlg_plan_psector.btn_doc_new.clicked.connect(partial(self.manage_document, self.tbl_document))
self.dlg_plan_psector.btn_open_doc.clicked.connect(partial(tools_qt.document_open, self.tbl_document, 'path'))
self.cmb_status.currentIndexChanged.connect(partial(self.show_status_warning))
# Create list for completer QLineEdit
sql = "SELECT DISTINCT(id) FROM v_ui_document ORDER BY id"
list_items = tools_db.create_list_for_completer(sql)
tools_qt.set_completer_lineedit(self.dlg_plan_psector.doc_id, list_items)
if psector_id is not None:
sql = (f"SELECT other, gexpenses, vat "
f"FROM plan_psector "
f"WHERE psector_id = '{psector_id}'")
row = tools_db.get_row(sql)
other = 0
gexpenses = 0
vat = 0
if row:
other = float(row[0]) if row[0] is not None else 0
gexpenses = float(row[1]) if row[1] is not None else 0
vat = float(row[2]) if row[2] is not None else 0
tools_qt.set_widget_text(self.dlg_plan_psector, self.dlg_plan_psector.other, other)
tools_qt.set_widget_text(self.dlg_plan_psector, self.dlg_plan_psector.gexpenses, gexpenses)
tools_qt.set_widget_text(self.dlg_plan_psector, self.dlg_plan_psector.vat, vat)
tools_qt.set_widget_text(self.dlg_plan_psector, 'cur_total_node', self.sys_currency['symbol'])
tools_qt.set_widget_text(self.dlg_plan_psector, 'cur_total_arc', self.sys_currency['symbol'])
tools_qt.set_widget_text(self.dlg_plan_psector, 'cur_total_other', self.sys_currency['symbol'])
tools_qt.set_widget_text(self.dlg_plan_psector, 'cur_pem', self.sys_currency['symbol'])
tools_qt.set_widget_text(self.dlg_plan_psector, 'cur_pec_pem', self.sys_currency['symbol'])
tools_qt.set_widget_text(self.dlg_plan_psector, 'cur_pec', self.sys_currency['symbol'])
tools_qt.set_widget_text(self.dlg_plan_psector, 'cur_pecvat_pem', self.sys_currency['symbol'])
tools_qt.set_widget_text(self.dlg_plan_psector, 'cur_pec_vat', self.sys_currency['symbol'])
tools_qt.set_widget_text(self.dlg_plan_psector, 'cur_pca_pecvat', self.sys_currency['symbol'])
tools_qt.set_widget_text(self.dlg_plan_psector, 'cur_pca', self.sys_currency['symbol'])
# Adding auto-completion to a QLineEdit for default feature
viewname = "v_edit_" + self.feature_type
tools_gw.set_completer_widget(viewname, self.dlg_plan_psector.feature_id, str(self.feature_type) + "_id")
# Set default tab 'arc'
self.dlg_plan_psector.tab_feature.setCurrentIndex(0)
tools_gw.get_signal_change_tab(self.dlg_plan_psector, excluded_layers)
widget_to_ignore = ('btn_accept', 'btn_cancel', 'btn_rapports', 'btn_open_doc')
restriction = ('role_basic', 'role_om', 'role_epa', 'role_om')
self.set_restriction_by_role(self.dlg_plan_psector, widget_to_ignore, restriction)
# Open dialog
tools_gw.open_dialog(self.dlg_plan_psector, dlg_name='plan_psector')
def fill_widget(self, dialog, widget, row):
if type(widget) is str or type(widget) is str:
widget = dialog.findChild(QWidget, widget)
if not widget:
return
key = widget.objectName()
if key in row:
if row[key] is not None:
value = str(row[key])
if type(widget) is QLineEdit or type(widget) is QTextEdit:
if value == 'None':
value = ""
widget.setText(value)
else:
widget.setText("")
else:
widget.setText("")
def update_total(self, dialog):
""" Show description of product plan/om _psector as label """
total_result = 0
widgets = dialog.tab_other_prices.findChildren(QLabel)
symbol = tools_gw.get_config_value(parameter="admin_currency", columns="value::json->> 'symbol'",
table="config_param_system")[0]
for widget in widgets:
if 'widget_total' in widget.objectName():
total_result = float(total_result) + float(widget.text().replace(symbol, '').strip())
tools_qt.set_widget_text(dialog, 'lbl_total_count', f'{"{:.2f}".format(total_result)} {symbol}')
def open_dlg_rapports(self):
default_file_name = tools_qt.get_text(self.dlg_plan_psector, self.dlg_plan_psector.name)
self.dlg_psector_rapport = GwPsectorRapportUi()
tools_gw.load_settings(self.dlg_psector_rapport)
tools_qt.set_widget_text(self.dlg_psector_rapport, 'txt_composer_path', default_file_name + " comp.pdf")
tools_qt.set_widget_text(self.dlg_psector_rapport, 'txt_csv_detail_path', default_file_name + " detail.csv")
tools_qt.set_widget_text(self.dlg_psector_rapport, 'txt_csv_path', default_file_name + ".csv")
self.dlg_psector_rapport.btn_cancel.clicked.connect(partial(tools_gw.close_dialog, self.dlg_psector_rapport))
self.dlg_psector_rapport.btn_ok.clicked.connect(partial(self.generate_rapports))
self.dlg_psector_rapport.btn_path.clicked.connect(
partial(tools_qt.get_folder_path, self.dlg_psector_rapport, self.dlg_psector_rapport.txt_path))
value = tools_gw.get_config_parser('btn_psector', 'psector_rapport_path', "user", "session")
tools_qt.set_widget_text(self.dlg_psector_rapport, self.dlg_psector_rapport.txt_path, value)
value = tools_gw.get_config_parser('btn_psector', 'psector_rapport_chk_composer', "user", "session")
tools_qt.set_checked(self.dlg_psector_rapport, self.dlg_psector_rapport.chk_composer, value)
value = tools_gw.get_config_parser('btn_psector', 'psector_rapport_chk_csv_detail', "user", "session")
tools_qt.set_checked(self.dlg_psector_rapport, self.dlg_psector_rapport.chk_csv_detail, value)
value = tools_gw.get_config_parser('btn_psector', 'psector_rapport_chk_csv', "user", "session")
tools_qt.set_checked(self.dlg_psector_rapport, self.dlg_psector_rapport.chk_csv, value)
if tools_qt.get_text(self.dlg_psector_rapport, self.dlg_psector_rapport.txt_path) == 'null':
if 'nt' in sys.builtin_module_names:
plugin_dir = os.path.expanduser("~\Documents")
else:
plugin_dir = os.path.expanduser("~")
tools_qt.set_widget_text(self.dlg_psector_rapport, self.dlg_psector_rapport.txt_path, plugin_dir)
self.populate_cmb_templates()
# Open dialog
tools_gw.open_dialog(self.dlg_psector_rapport, dlg_name='psector_rapport')
def populate_cmb_templates(self):
index = 0
records = []
layout_manager = QgsProject.instance().layoutManager()
layouts = layout_manager.layouts() # QgsPrintLayout
for layout in layouts:
elem = [index, layout.name()]
records.append(elem)
index = index + 1
if records in ([], None):
# If no composer configured, disable composer pdf file widgets
self.dlg_psector_rapport.chk_composer.setEnabled(False)
self.dlg_psector_rapport.chk_composer.setChecked(False)
self.dlg_psector_rapport.cmb_templates.setEnabled(False)
self.dlg_psector_rapport.txt_composer_path.setEnabled(False)
self.dlg_psector_rapport.lbl_composer_disabled.setText('No composers defined.')
self.dlg_psector_rapport.lbl_composer_disabled.setStyleSheet('color: red')
return
else:
# If composer configured, enable composer pdf file widgets
self.dlg_psector_rapport.chk_composer.setEnabled(True)
self.dlg_psector_rapport.cmb_templates.setEnabled(True)
self.dlg_psector_rapport.txt_composer_path.setEnabled(True)
self.dlg_psector_rapport.lbl_composer_disabled.setText('')
tools_qt.fill_combo_values(self.dlg_psector_rapport.cmb_templates, records, 1)
row = tools_gw.get_config_value(f'composer_plan_vdefault')
if row:
tools_qt.set_combo_value(self.dlg_psector_rapport.cmb_templates, row[0], 1)
def generate_rapports(self):
txt_path = f"{tools_qt.get_text(self.dlg_psector_rapport, 'txt_path')}"
tools_gw.set_config_parser('btn_psector', 'psector_rapport_path', txt_path)
chk_composer = f"{tools_qt.is_checked(self.dlg_psector_rapport, 'chk_composer')}"
tools_gw.set_config_parser('btn_psector', 'psector_rapport_chk_composer', chk_composer)
chk_csv_detail = f"{tools_qt.is_checked(self.dlg_psector_rapport, 'chk_csv_detail')}"
tools_gw.set_config_parser('btn_psector', 'psector_rapport_chk_csv_detail', chk_csv_detail)
chk_csv = f"{tools_qt.is_checked(self.dlg_psector_rapport, 'chk_csv')}"
tools_gw.set_config_parser('btn_psector', 'psector_rapport_chk_csv', chk_csv)
folder_path = tools_qt.get_text(self.dlg_psector_rapport, self.dlg_psector_rapport.txt_path)
if folder_path is None or folder_path == 'null' or not os.path.exists(folder_path):
tools_qt.get_folder_path(self.dlg_psector_rapport.txt_path)
folder_path = tools_qt.get_text(self.dlg_psector_rapport, self.dlg_psector_rapport.txt_path)
# Generate Composer
if tools_qt.is_checked(self.dlg_psector_rapport, self.dlg_psector_rapport.chk_composer):
file_name = tools_qt.get_text(self.dlg_psector_rapport, 'txt_composer_path')
if file_name is None or file_name == 'null':
message = "File name is required"
tools_qgis.show_warning(message)
if file_name.find('.pdf') is False:
file_name += '.pdf'
path = folder_path + '/' + file_name
self.generate_composer(path)
# Generate csv detail
if tools_qt.is_checked(self.dlg_psector_rapport, self.dlg_psector_rapport.chk_csv_detail):
file_name = tools_qt.get_text(self.dlg_psector_rapport, 'txt_csv_path')
viewname = f"v_plan_current_psector_budget_detail"
if file_name is None or file_name == 'null':
message = "Price list csv file name is required"
tools_qgis.show_warning(message)
if file_name.find('.csv') is False:
file_name += '.csv'
path = folder_path + '/' + file_name
self.generate_csv(path, viewname)
# Generate csv
if tools_qt.is_checked(self.dlg_psector_rapport, self.dlg_psector_rapport.chk_csv):
file_name = tools_qt.get_text(self.dlg_psector_rapport, 'txt_csv_detail_path')
viewname = f"v_plan_current_psector_budget"
if file_name is None or file_name == 'null':
message = "Price list csv file name is required"
tools_qgis.show_warning(message)
if file_name.find('.csv') is False:
file_name += '.csv'
path = folder_path + '/' + file_name
self.generate_csv(path, viewname)
tools_gw.close_dialog(self.dlg_psector_rapport)
def generate_composer(self, path):
# Get layout manager object
layout_manager = QgsProject.instance().layoutManager()
# Get our layout
layout_name = tools_qt.get_text(self.dlg_psector_rapport, self.dlg_psector_rapport.cmb_templates)
layout = layout_manager.layoutByName(layout_name)
# Since qgis 3.4 cant do .setAtlasMode(QgsComposition.PreviewAtlas)
# then we need to force the opening of the layout designer, trigger the mActionAtlasPreview action and
# close the layout designer again (finally sentence)
designer = self.iface.openLayoutDesigner(layout)
layout_view = designer.view()
designer_window = layout_view.window()
action = designer_window.findChild(QAction, 'mActionAtlasPreview')
action.trigger()
# Export to PDF file
if layout:
try:
exporter = QgsLayoutExporter(layout)
exporter.exportToPdf(path, QgsLayoutExporter.PdfExportSettings())
if os.path.exists(path):
message = "Document PDF created in"
tools_qgis.show_info(message, parameter=path)
status, message = tools_os.open_file(path)
if status is False and message is not None:
tools_qgis.show_warning(message, parameter=path)
else:
message = "Cannot create file, check if its open"
tools_qgis.show_warning(message, parameter=path)
except Exception as e:
tools_log.log_warning(str(e))
msg = "Cannot create file, check if selected composer is the correct composer"
tools_qgis.show_warning(msg, parameter=path)
finally:
designer_window.close()
else:
tools_qgis.show_warning("Layout not found", parameter=layout_name)
def generate_csv(self, path, viewname):
# Get columns name in order of the table
sql = (f"SELECT column_name FROM information_schema.columns"
f" WHERE table_name = '{viewname}'"
f" AND table_schema = '" + self.schema_name.replace('"', '') + "'"
f" ORDER BY ordinal_position")
rows = tools_db.get_rows(sql)
columns = []
if not rows or rows is None or rows == '':
message = "CSV not generated. Check fields from table or view"
tools_qgis.show_warning(message, parameter=viewname)
return
for i in range(0, len(rows)):
column_name = rows[i]
columns.append(str(column_name[0]))
psector_id = f"{tools_qt.get_text(self.dlg_plan_psector, self.dlg_plan_psector.psector_id)}"
sql = f"SELECT * FROM {viewname} WHERE psector_id = '{psector_id}'"
rows = tools_db.get_rows(sql)
all_rows = []
all_rows.append(columns)
if not rows or rows is None or rows == '':
return
for i in rows:
all_rows.append(i)
with open(path, "w") as output:
writer = csv.writer(output, lineterminator='\n')
writer.writerows(all_rows)
def populate_budget(self, dialog, psector_id):
sql = (f"SELECT DISTINCT(column_name) FROM information_schema.columns"
f" WHERE table_name = 'v_plan_current_psector'")
rows = tools_db.get_rows(sql)
columns = []
for i in range(0, len(rows)):
column_name = rows[i]
columns.append(str(column_name[0]))
sql = (f"SELECT total_arc, total_node, total_other, pem, pec, pec_vat, gexpenses, vat, other, pca"
f" FROM v_plan_current_psector"
f" WHERE psector_id = '{psector_id}'")
row = tools_db.get_row(sql)
if row:
for column_name in columns:
if column_name in row:
if row[column_name] is not None:
tools_qt.set_widget_text(dialog, column_name, f"{row[column_name]:.02f}")
else:
tools_qt.set_widget_text(dialog, column_name, f"{0:.02f}")
self.calc_pec_pem(dialog)
self.calc_pecvat_pec(dialog)
self.calc_pca_pecvat(dialog)
def calc_pec_pem(self, dialog):
if tools_qt.get_text(dialog, 'pec') not in ('null', None):
pec = float(tools_qt.get_text(dialog, 'pec'))
else:
pec = 0
if tools_qt.get_text(dialog, 'pem') not in ('null', None):
pem = float(tools_qt.get_text(dialog, 'pem'))
else:
pem = 0
res = f"{round(pec - pem, 2):.02f}"
tools_qt.set_widget_text(dialog, 'pec_pem', res)
def calc_pecvat_pec(self, dialog):
if tools_qt.get_text(dialog, 'pec_vat') not in ('null', None):
pec_vat = float(tools_qt.get_text(dialog, 'pec_vat'))
else:
pec_vat = 0
if tools_qt.get_text(dialog, 'pec') not in ('null', None):
pec = float(tools_qt.get_text(dialog, 'pec'))
else:
pec = 0
res = f"{round(pec_vat - pec, 2):.02f}"
tools_qt.set_widget_text(dialog, 'pecvat_pem', res)
def calc_pca_pecvat(self, dialog):
if tools_qt.get_text(dialog, 'pca') not in ('null', None):
pca = float(tools_qt.get_text(dialog, 'pca'))
else:
pca = 0
if tools_qt.get_text(dialog, 'pec_vat') not in ('null', None):
pec_vat = float(tools_qt.get_text(dialog, 'pec_vat'))
else:
pec_vat = 0
res = f"{round(pca - pec_vat, 2):.02f}"
tools_qt.set_widget_text(dialog, 'pca_pecvat', res)
def calculate_percents(self, tablename, field):
field_value = f"{tools_qt.get_text(self.dlg_plan_psector, field)}"
psector_id = tools_qt.get_text(self.dlg_plan_psector, "psector_id")
sql = f"UPDATE {tablename} SET {field} = '{field_value}' WHERE psector_id = '{psector_id}'"
tools_db.execute_sql(sql)
self.populate_budget(self.dlg_plan_psector, psector_id)
def show_description(self):
""" Show description of product plan/om _psector as label"""
selected_list = self.dlg_plan_psector.all_rows.selectionModel().selectedRows()
des = ""
for i in range(0, len(selected_list)):
row = selected_list[i].row()
des = self.dlg_plan_psector.all_rows.model().record(row).value('descript')
tools_qt.set_widget_text(self.dlg_plan_psector, self.lbl_descript, des)
def set_tabs_enabled(self, enabled):
self.dlg_plan_psector.tabWidget.setTabEnabled(1, enabled)
self.dlg_plan_psector.tabWidget.setTabEnabled(2, enabled)
self.dlg_plan_psector.tabWidget.setTabEnabled(3, enabled)
self.dlg_plan_psector.tabWidget.setTabEnabled(4, enabled)
self.dlg_plan_psector.tabWidget.setTabEnabled(5, enabled)
def enable_buttons(self, enabled):
self.dlg_plan_psector.btn_insert.setEnabled(enabled)
self.dlg_plan_psector.btn_delete.setEnabled(enabled)
self.dlg_plan_psector.btn_snapping.setEnabled(enabled)
widget_to_ignore = ('btn_accept', 'btn_cancel', 'btn_rapports', 'btn_open_doc')
restriction = ('role_basic', 'role_om', 'role_epa', 'role_om')
self.set_restriction_by_role(self.dlg_plan_psector, widget_to_ignore, restriction)
def enable_relation_tab(self, tablename):
psector_name = f"{tools_qt.get_text(self.dlg_plan_psector, self.dlg_plan_psector.name)}"
sql = f"SELECT name FROM {tablename} WHERE LOWER(name) = '{psector_name}'"
rows = tools_db.get_rows(sql)
if not rows:
if self.dlg_plan_psector.name.text() != '':
self.set_tabs_enabled(True)
else:
self.set_tabs_enabled(False)
else:
self.set_tabs_enabled(False)
def delete_psector_selector(self, tablename):
sql = (f"DELETE FROM {tablename}"
f" WHERE cur_user = current_user;")
tools_db.execute_sql(sql)
def insert_psector_selector(self, tablename, field, value):
sql = (f"INSERT INTO {tablename} ({field}, cur_user) "
f"VALUES ('{value}', current_user);")
tools_db.execute_sql(sql)
def check_tab_position(self):
self.dlg_plan_psector.name.setEnabled(False)
self.insert_or_update_new_psector(tablename=f'v_edit_plan_psector', close_dlg=False)
self.update = True
psector_id = tools_qt.get_text(self.dlg_plan_psector, 'psector_id')
if self.dlg_plan_psector.tabWidget.currentIndex() == 3:
tableleft = "v_price_compost"
tableright = f"v_edit_plan_psector_x_other"
if not self.load_signals:
self.price_selector(self.dlg_plan_psector, tableleft, tableright)
elif self.dlg_plan_psector.tabWidget.currentIndex() == 4:
self.populate_budget(self.dlg_plan_psector, psector_id)
elif self.dlg_plan_psector.tabWidget.currentIndex() == 5:
expr = f"psector_id = '{psector_id}'"
message = tools_qt.fill_table(self.tbl_document, f"{self.schema_name}.v_ui_doc_x_psector", expr)
tools_gw.set_tablemodel_config(self.dlg_plan_psector, self.tbl_document, "v_ui_doc_x_psector")
if message:
tools_qgis.show_warning(message)
sql = f"SELECT other, gexpenses, vat FROM plan_psector WHERE psector_id = '{psector_id}'"
row = tools_db.get_row(sql)
if row:
tools_qt.set_widget_text(self.dlg_plan_psector, self.dlg_plan_psector.other, row[0])
tools_qt.set_widget_text(self.dlg_plan_psector, self.dlg_plan_psector.gexpenses, row[1])
tools_qt.set_widget_text(self.dlg_plan_psector, self.dlg_plan_psector.vat, row[2])
widget_to_ignore = ('btn_accept', 'btn_cancel', 'btn_rapports', 'btn_open_doc')
restriction = ('role_basic', 'role_om', 'role_epa', 'role_om')
self.set_restriction_by_role(self.dlg_plan_psector, widget_to_ignore, restriction)
def set_restriction_by_role(self, dialog, widget_to_ignore, restriction):
"""
Set all widget enabled(False) or readOnly(True) except those on the tuple
:param dialog:
:param widget_to_ignore: tuple = ('widgetname1', 'widgetname2', 'widgetname3', ...)
:param restriction: roles that do not have access. tuple = ('role1', 'role1', 'role1', ...)
"""
role = global_vars.project_vars['project_role']
role = tools_gw.get_role_permissions(role)
if role in restriction:
widget_list = dialog.findChildren(QWidget)
for widget in widget_list:
if widget.objectName() in widget_to_ignore:
continue
# Set editable/readonly
if type(widget) in (QLineEdit, QDoubleSpinBox, QTextEdit):
widget.setReadOnly(True)
widget.setStyleSheet("QWidget {background: rgb(242, 242, 242);color: rgb(100, 100, 100)}")
elif type(widget) in (QComboBox, QCheckBox, QTableView, QPushButton):
widget.setEnabled(False)
def populate_combos(self, combo, field_name, field_id, table_name, where=None):
sql = f"SELECT DISTINCT({field_id}), {field_name} FROM {table_name} "
if where:
sql += where
sql += f" ORDER BY {field_name}"
rows = tools_db.get_rows(sql)
if not rows:
return
combo.blockSignals(True)
combo.clear()
records_sorted = sorted(rows, key=operator.itemgetter(1))
for record in records_sorted:
combo.addItem(record[1], record)
combo.blockSignals(False)
def reload_states_selector(self):
self.delete_psector_selector('selector_state')
try:
for x in range(0, len(self.all_states)):
sql = (f"INSERT INTO selector_state (state_id, cur_user)"
f" VALUES ('{self.all_states[x][0]}', current_user)")
tools_db.execute_sql(sql)
except TypeError:
# Control if self.all_states is None (object of type 'NoneType' has no len())
pass
def close_psector(self, cur_active_layer=None):
""" Close dialog and disconnect snapping """
tools_gw.reset_rubberband(self.rubber_band)
self._clear_my_json()
self.reload_states_selector()
if cur_active_layer:
self.iface.setActiveLayer(cur_active_layer)
self.layers = tools_gw.remove_selection(True, layers=self.layers)
self.reset_model_psector("arc")
self.reset_model_psector("node")
self.reset_model_psector("connec")
if self.project_type.upper() == 'UD':
self.reset_model_psector("gully")
self.reset_model_psector("other")
tools_gw.close_dialog(self.dlg_plan_psector)
tools_qgis.disconnect_snapping()
tools_gw.disconnect_signal('psector')
tools_qgis.disconnect_signal_selection_changed()
def reset_model_psector(self, feature_type):
""" Reset model of the widget """
table_relation = "" + feature_type + "_plan"
widget_name = "tbl_" + table_relation
widget = tools_qt.get_widget(self.dlg_plan_psector, widget_name)
if widget:
widget.setModel(None)
def check_name(self, psector_name):
""" Check if name of new psector exist or not """
sql = (f"SELECT name FROM plan_psector"
f" WHERE name = '{psector_name}'")
row = tools_db.get_row(sql)
if row is None:
return False
return True
def insert_or_update_new_psector(self, tablename, close_dlg=False):
psector_name = tools_qt.get_text(self.dlg_plan_psector, "name", return_string_null=False)
if psector_name == "":
message = "Mandatory field is missing. Please, set a value"
tools_qgis.show_warning(message, parameter='Name')
return
rotation = tools_qt.get_text(self.dlg_plan_psector, "rotation", return_string_null=False)
if rotation == "":
tools_qt.set_widget_text(self.dlg_plan_psector, self.dlg_plan_psector.rotation, 0)
name_exist = self.check_name(psector_name)
if name_exist and not self.update:
message = "The name is current in use"
tools_qgis.show_warning(message)
return
else:
self.set_tabs_enabled(True)
self.enable_buttons(True)
viewname = f"'v_edit_plan_psector'"
sql = (f"SELECT column_name FROM information_schema.columns "
f"WHERE table_name = {viewname} "
f"AND table_schema = '" + self.schema_name.replace('"', '') + "' "
f"ORDER BY ordinal_position;")
rows = tools_db.get_rows(sql)
if not rows or rows is None or rows == '':
message = "Check fields from table or view"
tools_qgis.show_warning(message, parameter=viewname)
return
columns = []
for row in rows:
columns.append(str(row[0]))
if not self.update:
values = "VALUES("
if columns:
sql = f"INSERT INTO {tablename} ("
for column_name in columns:
if column_name != 'psector_id':
widget_type = tools_qt.get_widget_type(self.dlg_plan_psector, column_name)
if widget_type is not None:
value = None
if widget_type is QCheckBox:
value = str(tools_qt.is_checked(self.dlg_plan_psector, column_name)).upper()
elif widget_type is QDateEdit:
date = self.dlg_plan_psector.findChild(QDateEdit, str(column_name))
values += date.dateTime().toString('yyyy-MM-dd HH:mm:ss') + ", "
elif widget_type is QComboBox:
combo = tools_qt.get_widget(self.dlg_plan_psector, column_name)
value = str(tools_qt.get_combo_value(self.dlg_plan_psector, combo))
else:
value = tools_qt.get_text(self.dlg_plan_psector, column_name)
if value is None or value == 'null':
sql += column_name + ", "
values += "null, "
else:
values += f"$${value}$$, "
sql += column_name + ", "
sql = sql[:len(sql) - 2] + ") "
values = values[:len(values) - 2] + ")"
sql += values
sql += " RETURNING psector_id;"
new_psector_id = tools_db.execute_returning(sql)
tools_qt.set_widget_text(self.dlg_plan_psector, self.dlg_plan_psector.psector_id, str(new_psector_id[0]))
if new_psector_id:
row = tools_gw.get_config_value('plan_psector_vdefault')
if row:
sql = (f"UPDATE config_param_user "
f" SET value = $${new_psector_id[0]}$$ "
f" WHERE parameter = 'plan_psector_vdefault'"
f" AND cur_user=current_user; ")
else:
sql = (f"INSERT INTO config_param_user (parameter, value, cur_user) "
f" VALUES ('plan_psector_vdefault', '{new_psector_id[0]}', current_user);")
tools_db.execute_sql(sql)
self.dlg_plan_psector.tabWidget.setTabEnabled(1, True)
self.delete_psector_selector('selector_plan_psector')
psector_id = tools_qt.get_text(self.dlg_plan_psector, self.dlg_plan_psector.psector_id)
self.insert_psector_selector('selector_plan_psector', 'psector_id', psector_id)
if close_dlg:
json_result = self.set_plan()
if 'status' in json_result and json_result['status'] == 'Accepted':
self.reload_states_selector()
tools_gw.close_dialog(self.dlg_plan_psector)
def set_plan(self):
# TODO: Check this
extras = f'"psectorId":"{tools_qt.get_text(self.dlg_plan_psector, self.psector_id)}"'
body = tools_gw.create_body(extras=extras)
json_result = tools_gw.execute_procedure('gw_fct_setplan', body)
tools_gw.manage_current_selections_docker(json_result)
return json_result
def price_selector(self, dialog, tableleft, tableright):
self.load_signals = True
# fill QTableView all_rows
tbl_all_rows = dialog.findChild(QTableView, "all_rows")
tbl_all_rows.setSelectionBehavior(QAbstractItemView.SelectRows)
self.fill_table(dialog, tbl_all_rows, tableleft)
tools_gw.set_tablemodel_config(dialog, tbl_all_rows, tableleft)
if not self.price_loaded:
self.price_loaded = True
self.count = -1
psector_id = tools_qt.get_text(dialog, 'psector_id')
self._manage_widgets_price(dialog, tableright, psector_id, print_all_rows=True, print_headers=True)
# Button select (Create new labels)
dialog.btn_select.clicked.connect(
partial(self.create_label, dialog, tbl_all_rows, 'id', tableright, "price_id"))
tbl_all_rows.doubleClicked.connect(
partial(self.create_label, dialog, tbl_all_rows, 'id', tableright, "price_id"))
# Button unselect
dialog.btn_remove.clicked.connect(
partial(self.rows_unselector, dialog, tableright))
def create_label(self, dialog, tbl_all_rows, id_ori, tableright, id_des):
selected_list = tbl_all_rows.selectionModel().selectedRows()
if len(selected_list) == 0:
message = "Any record selected"
tools_qgis.show_warning(message)
return
expl_id = []
for i in range(0, len(selected_list)):
row = selected_list[i].row()
id_ = tbl_all_rows.model().record(row).value(id_ori)
expl_id.append(id_)
psector_id = tools_qt.get_text(dialog, 'psector_id')
for i in range(0, len(selected_list)):
row = selected_list[i].row()
values = ""
values += f"'{psector_id}', "
if tbl_all_rows.model().record(row).value('unit') not in (None, 'null', 'NULL'):
values += f"'{tbl_all_rows.model().record(row).value('unit')}', "
else:
values += 'null, '
if tbl_all_rows.model().record(row).value('id') not in (None, 'null', 'NULL'):
values += f"'{tbl_all_rows.model().record(row).value('id')}', "
else:
values += 'null, '
if tbl_all_rows.model().record(row).value('description') not in (None, 'null', 'NULL'):
values += f"'{tbl_all_rows.model().record(row).value('description')}', "
else:
values += 'null, '
if tbl_all_rows.model().record(row).value('price') not in (None, 'null', 'NULL'):
values += f"'{tbl_all_rows.model().record(row).value('price')}', "
else:
values += 'null, '
values = values[:len(values) - 2]
# Check if expl_id already exists in expl_selector
sql = (f"SELECT DISTINCT({id_des})"
f" FROM {tableright}"
f" WHERE {id_des} = '{expl_id[i]}'"
f" AND psector_id = '{psector_id}'")
row = tools_db.get_row(sql)
if row is not None:
# if exist - show warning
message = "Id already selected"
tools_qt.show_info_box(message, "Info", parameter=str(expl_id[i]))
else:
sql = (f"INSERT INTO {tableright}"
f" (psector_id, unit, price_id, observ, price) "
f" VALUES ({values})")
tools_db.execute_sql(sql)
self._manage_widgets_price(dialog, tableright, psector_id, expl_id)
def _manage_widgets_price(self, dialog, tableright, psector_id, print_all_rows=False, print_headers=True):
layout = dialog.findChild(QGridLayout, 'lyt_price')
for i in reversed(range(layout.count())):
if layout.itemAt(i).widget():
layout.itemAt(i).widget().deleteLater()
self._add_price_widgets(dialog, tableright, psector_id, print_all_rows=print_all_rows, print_headers=print_headers)
self.update_total(dialog)
def _add_price_widgets(self, dialog, tableright, psector_id, expl_id=[], editable_widgets=['measurement','observ']
, print_all_rows=False, print_headers=False):
extras = (f'"tableName":"{tableright}", "psectorId":{psector_id}')
body = tools_gw.create_body(extras=extras)
complet_result = tools_gw.execute_procedure('gw_fct_getwidgetprices', body)
if not complet_result or complet_result['fields'] == {}:
return
if print_headers or self.header_exist is None:
self.header_exist = True
pos = 1
self.count = self.count + 1
for key in complet_result['fields'][0].keys():
if key != 'id':
lbl = QLabel()
lbl.setObjectName(f"lbl_{key}_{self.count}")
lbl.setText(f" {key} ")
layout = dialog.findChild(QGridLayout, 'lyt_price')
layout.addWidget(lbl, self.count, pos)
layout.setColumnStretch(2, 1)
pos = pos + 1
for field in complet_result['fields']:
if field['price_id'] in expl_id or print_all_rows:
self.count = self.count + 1
pos = 0
# Create check
check = QCheckBox()
check.setObjectName(f"{field['id']}")
layout = dialog.findChild(QGridLayout, 'lyt_price')
layout.addWidget(check, self.count, pos)
layout.setColumnStretch(2, 1)
pos = pos + 1
for key in complet_result['fields'][0].keys():
if key != 'id':
if key not in editable_widgets:
widget = QLabel()
widget.setObjectName(f"widget_{key}_{field['price_id']}")
widget.setText(f" {field.get(key)} ")
else:
widget = QLineEdit()
widget.setObjectName(f"widget_{key}_{field['price_id']}")
if f"widget_{key}_{field['price_id']}" in self.dict_to_update:
text = self.dict_to_update[f"widget_{key}_{field['price_id']}"][key]
else:
text = field.get(key) if field.get(key) is not None else ''
widget.setText(f"{text}")
widget.editingFinished.connect(partial(self._manage_updates_prices, widget, key, field['price_id']))
widget.editingFinished.connect(partial(self._manage_widgets_price, dialog, tableright, psector_id, print_all_rows=True ))
widget.editingFinished.connect(partial(self.update_total, dialog))
layout = dialog.findChild(QGridLayout, 'lyt_price')
layout.addWidget(widget, self.count, pos)
layout.setColumnStretch(2, 1)
pos = pos + 1
def _manage_updates_prices(self, widget, key, price_id):
self.dict_to_update[f"widget_{key}_{price_id}"] = {"price_id":price_id, key:widget.text()}
self._update_otherprice()
def _update_otherprice(self):
sql = ""
_filter = ""
if self.dict_to_update:
for main_key in self.dict_to_update:
sub_list = list(self.dict_to_update[main_key].keys())
for sub_key in sub_list:
if sub_key == 'price_id':
_filter = self.dict_to_update[main_key][sub_key]
else:
sql += f"UPDATE v_edit_plan_psector_x_other SET {sub_key} = '{self.dict_to_update[main_key][sub_key]}' " \
f"WHERE psector_id = {tools_qt.get_text(self.dlg_plan_psector, self.psector_id)} AND price_id = '{_filter}';\n"
tools_db.execute_sql(sql)
def _manage_widgets(self, dialog, lbl, widget, count, pos):
layout = dialog.findChild(QGridLayout, 'lyt_price')
layout.addWidget(lbl, count, pos)
layout.addWidget(widget, count, pos+1)
layout.setColumnStretch(2, 1)
def rows_unselector(self, dialog, tableright):
query = (f"DELETE FROM {tableright}"
f" WHERE {tableright}.id = ")
select_widgets = dialog.tab_other_prices.findChildren(QCheckBox)
selected_ids = []
count = 0
for check in select_widgets:
if check.isChecked():
selected_ids.append(check.objectName())
else:
count = count + 1
psector_id = tools_qt.get_text(dialog, 'psector_id')
for i in range(0, len(selected_ids)):
sql = f"{query}'{selected_ids[i]}' AND psector_id = '{psector_id}'"
tools_db.execute_sql(sql)
layout = dialog.findChild(QGridLayout, 'lyt_price')
for i in reversed(range(layout.count())):
if layout.itemAt(i).widget():
layout.itemAt(i).widget().deleteLater()
self._add_price_widgets(dialog, tableright, psector_id, print_all_rows=True, print_headers=True)
self.update_total(dialog)
def query_like_widget_text(self, dialog, text_line, qtable, tableleft, tableright, field_id):
""" Populate the QTableView by filtering through the QLineEdit """
schema_name = self.schema_name.replace('"', '')
psector_id = tools_qt.get_text(dialog, 'psector_id')
query = tools_qt.get_text(dialog, text_line).lower()
if query == 'null':
query = ""
sql = (f"SELECT * FROM {schema_name}.{tableleft} WHERE LOWER ({field_id})"
f" LIKE '%{query}%' AND {field_id} NOT IN (SELECT price_id FROM {schema_name}.{tableright}"
f" WHERE psector_id = '{psector_id}')")
self.fill_table_by_query(qtable, sql)
def fill_table_by_query(self, qtable, query):
"""
:param qtable: QTableView to show
:param query: query to set model
"""
model = QSqlQueryModel()
model.setQuery(query, db=global_vars.qgis_db_credentials)
qtable.setModel(model)
qtable.show()
# Check for errors
if model.lastError().isValid():
tools_qgis.show_warning(model.lastError().text())
def fill_table(self, dialog, widget, table_name, hidde=False, set_edit_triggers=QTableView.NoEditTriggers, expr=None):
""" Set a model with selected filter.
Attach that model to selected table
@setEditStrategy:
0: OnFieldChange
1: OnRowChange
2: OnManualSubmit
"""
# Manage exception if dialog is closed
if isdeleted(dialog):
return
if self.schema_name not in table_name:
table_name = self.schema_name + "." + table_name
# Set model
model = QSqlTableModel(db=global_vars.qgis_db_credentials)
model.setTable(table_name)
model.setEditStrategy(QSqlTableModel.OnFieldChange)
model.setSort(0, 0)
model.select()
# When change some field we need to refresh Qtableview and filter by psector_id
model.beforeUpdate.connect(partial(self.manage_update_state, model))
model.dataChanged.connect(partial(self.refresh_table, dialog, widget))
widget.setEditTriggers(set_edit_triggers)
# Check for errors
if model.lastError().isValid():
tools_qgis.show_warning(model.lastError().text())
# Attach model to table view
if expr:
widget.setModel(model)
widget.model().setFilter(expr)
else:
widget.setModel(model)
if hidde:
self.refresh_table(dialog, widget)
def refresh_table(self, dialog, widget):
""" Refresh qTableView """
widget.selectAll()
selected_list = widget.selectionModel().selectedRows()
widget.clearSelection()
for i in range(0, len(selected_list)):
row = selected_list[i].row()
if str(widget.model().record(row).value('psector_id')) != tools_qt.get_text(dialog, 'psector_id'):
widget.hideRow(i)
def manage_update_state(self, model, row, record):
"""
Manage new state of planned features.
:param model: QSqlModel of QTableView
:param row: index of updating row (passed by signal)
:param record: QSqlRecord (passed by signal)
"""
# Get table name via current tab name (arc, node, connec or gully)
index_tab = self.dlg_plan_psector.tab_feature.currentIndex()
tab_name = self.dlg_plan_psector.tab_feature.tabText(index_tab)
table_name = tab_name.lower()
# Get selected feature's state
feature_id = record.value(f'{table_name}_id') # Get the id
sql = f"SELECT {table_name}.state FROM {table_name} WHERE {table_name}_id='{feature_id}';"
sql_row = tools_db.get_row(sql)
if sql_row:
old_state = sql_row[0] # Original state
new_state = record.value('state') # New state
if old_state == 2 and new_state == 0:
msg = "This value is mandatory for planned feature. If you are looking to unlink feature from this " \
"psector please delete row. If delete is not allowed its because feature is only used on this " \
"psector and needs to be removed from canvas"
tools_qgis.show_warning(msg)
model.revert()
def document_insert(self):
""" Insert a document related to the current visit """
doc_id = self.doc_id.text()
psector_id = self.psector_id.text()
if not doc_id:
message = "You need to insert doc_id"
tools_qgis.show_warning(message)
return
if not psector_id:
message = "You need to insert psector_id"
tools_qgis.show_warning(message)
return
# Check if document already exist
sql = (f"SELECT doc_id"
f" FROM doc_x_psector"
f" WHERE doc_id = '{doc_id}' AND psector_id = '{psector_id}'")
row = tools_db.get_row(sql)
if row:
msg = "Document already exist"
tools_qgis.show_warning(msg)
return
# Insert into new table
sql = (f"INSERT INTO doc_x_psector (doc_id, psector_id)"
f" VALUES ('{doc_id}', {psector_id})")
status = tools_db.execute_sql(sql)
if status:
message = "Document inserted successfully"
tools_qgis.show_info(message)
self.dlg_plan_psector.tbl_document.model().select()
def manage_document(self, qtable):
""" Access GUI to manage documents e.g Execute action of button 34 """
psector_id = tools_qt.get_text(self.dlg_plan_psector, self.dlg_plan_psector.psector_id)
manage_document = GwDocument(single_tool=False)
dlg_docman = manage_document.get_document(tablename='psector', qtable=qtable, item_id=psector_id)
dlg_docman.btn_accept.clicked.connect(partial(tools_gw.set_completer_object, dlg_docman, 'doc'))
tools_qt.remove_tab(dlg_docman.tabWidget, 'tab_rel')
def show_status_warning(self):
mode = tools_gw.get_config_value('plan_psector_execute_action', table='config_param_system')
if mode is None:
return
mode = json.loads(mode[0])
if mode['mode'] == 'obsolete':
msg = "WARNING: You have updated the status value. If you click 'Accept' on the main dialog, " \
"a process that updates the state & state_type values of all that features that belong to the " \
"psector, according to the system variables plan_psector_statetype, " \
"plan_statetype_planned and plan_statetype_ficticious, will be triggered."
tools_qt.show_details(msg, 'Message warning')
elif mode['mode'] == 'onService':
if tools_qt.get_combo_value(self.dlg_plan_psector, self.cmb_status) == '0':
msg = "WARNING: You have updated the status value. If you click 'Accept' on the main dialog, " \
"this psector will be executed. Planified features will turn on service and deleted features " \
"will turn obsolete. To mantain traceability, a copy of planified features will be inserted " \
"on the psector."
tools_qt.show_details(msg, 'Message warning')
def master_new_psector(self, psector_id=None):
""" Button 45: New psector """
self.get_psector(psector_id)
def manage_psectors(self):
""" Button 46: Psector management """
# Create the dialog and signals
self.dlg_psector_mng = GwPsectorManagerUi()
tools_gw.load_settings(self.dlg_psector_mng)
table_name = "v_ui_plan_psector"
column_id = "psector_id"
# Tables
self.qtbl_psm = self.dlg_psector_mng.findChild(QTableView, "tbl_psm")
self.qtbl_psm.setSelectionBehavior(QAbstractItemView.SelectRows)
# Set signals
self.dlg_psector_mng.btn_cancel.clicked.connect(partial(tools_gw.close_dialog, self.dlg_psector_mng))
self.dlg_psector_mng.rejected.connect(partial(tools_gw.close_dialog, self.dlg_psector_mng))
self.dlg_psector_mng.btn_delete.clicked.connect(partial(
self.multi_rows_delete, self.dlg_psector_mng, self.qtbl_psm, table_name, column_id, 'lbl_vdefault_psector', 'psector'))
self.dlg_psector_mng.btn_update_psector.clicked.connect(
partial(self.update_current_psector, self.dlg_psector_mng, self.qtbl_psm))
self.dlg_psector_mng.btn_duplicate.clicked.connect(self.psector_duplicate)
self.dlg_psector_mng.txt_name.textChanged.connect(partial(
self.filter_by_text, self.dlg_psector_mng, self.qtbl_psm, self.dlg_psector_mng.txt_name, table_name))
self.dlg_psector_mng.tbl_psm.doubleClicked.connect(partial(self.charge_psector, self.qtbl_psm))
self.fill_table(self.dlg_psector_mng, self.qtbl_psm, table_name)
tools_gw.set_tablemodel_config(self.dlg_psector_mng, self.qtbl_psm, table_name)
selection_model = self.qtbl_psm.selectionModel()
selection_model.selectionChanged.connect(partial(self._fill_txt_infolog))
self.set_label_current_psector(self.dlg_psector_mng)
# Open form
self.dlg_psector_mng.setWindowFlags(Qt.WindowStaysOnTopHint)
tools_gw.open_dialog(self.dlg_psector_mng, dlg_name="psector_manager")
def update_current_psector(self, dialog, qtbl_psm):
selected_list = qtbl_psm.selectionModel().selectedRows()
if len(selected_list) == 0:
message = "Any record selected"
tools_qgis.show_warning(message)
return
row = selected_list[0].row()
psector_id = qtbl_psm.model().record(row).value("psector_id")
aux_widget = QLineEdit()
aux_widget.setText(str(psector_id))
self.upsert_config_param_user(dialog, aux_widget, "plan_psector_vdefault")
message = "Values has been updated"
tools_qgis.show_info(message)
self.fill_table(dialog, qtbl_psm, "v_ui_plan_psector")
tools_gw.set_tablemodel_config(dialog, qtbl_psm, "v_ui_plan_psector")
self.set_label_current_psector(dialog)
tools_gw.open_dialog(dialog)
def upsert_config_param_user(self, dialog, widget, parameter):
""" Insert or update values in tables with current_user control """
tablename = "config_param_user"
sql = (f"SELECT * FROM {tablename}"
f" WHERE cur_user = current_user")
rows = tools_db.get_rows(sql)
exist_param = False
if type(widget) != QDateEdit:
if tools_qt.get_text(dialog, widget) != "":
for row in rows:
if row[0] == parameter:
exist_param = True
if exist_param:
sql = f"UPDATE {tablename} SET value = "
if widget.objectName() != 'edit_state_vdefault':
sql += (f"'{tools_qt.get_text(dialog, widget)}'"
f" WHERE cur_user = current_user AND parameter = '{parameter}'")
else:
sql += (f"(SELECT id FROM value_state"
f" WHERE name = '{tools_qt.get_text(dialog, widget)}')"
f" WHERE cur_user = current_user AND parameter = 'edit_state_vdefault'")
else:
sql = f'INSERT INTO {tablename} (parameter, value, cur_user)'
if widget.objectName() != 'edit_state_vdefault':
sql += f" VALUES ('{parameter}', '{tools_qt.get_text(dialog, widget)}', current_user)"
else:
sql += (f" VALUES ('{parameter}',"
f" (SELECT id FROM value_state"
f" WHERE name = '{tools_qt.get_text(dialog, widget)}'), current_user)")
else:
for row in rows:
if row[0] == parameter:
exist_param = True
_date = widget.dateTime().toString('yyyy-MM-dd')
if exist_param:
sql = (f"UPDATE {tablename}"
f" SET value = '{_date}'"
f" WHERE cur_user = current_user AND parameter = '{parameter}'")
else:
sql = (f"INSERT INTO {tablename} (parameter, value, cur_user)"
f" VALUES ('{parameter}', '{_date}', current_user);")
tools_db.execute_sql(sql)
def filter_by_text(self, dialog, table, widget_txt, tablename):
result_select = tools_qt.get_text(dialog, widget_txt)
if result_select != 'null':
expr = f" name ILIKE '%{result_select}%'"
# Refresh model with selected filter
table.model().setFilter(expr)
table.model().select()
else:
self.fill_table(dialog, table, tablename)
def charge_psector(self, qtbl_psm):
selected_list = qtbl_psm.selectionModel().selectedRows()
if len(selected_list) == 0:
message = "Any record selected"
tools_qgis.show_warning(message)
return
row = selected_list[0].row()
psector_id = qtbl_psm.model().record(row).value("psector_id")
keep_open_form = tools_gw.get_config_parser('dialogs_actions', 'psector_manager_keep_open', "user", "init", prefix=True)
if tools_os.set_boolean(keep_open_form, False) is not True:
tools_gw.close_dialog(self.dlg_psector_mng)
self.master_new_psector(psector_id)
def multi_rows_delete(self, dialog, widget, table_name, column_id, label, action):
"""
Delete selected elements of the table
:param dialog: (QDialog)
:param QTableView widget: origin
:param table_name: table origin
:param column_id: Refers to the id of the source table
"""
# Get selected rows
selected_list = widget.selectionModel().selectedRows()
if len(selected_list) == 0:
message = "Any record selected"
tools_qgis.show_warning(message)
return
cur_psector = tools_gw.get_config_value('plan_psector_vdefault')
inf_text = ""
list_id = ""
for i in range(0, len(selected_list)):
row = selected_list[i].row()
id_ = widget.model().record(row).value(str(column_id))
if cur_psector and (str(id_) == str(cur_psector[0])):
message = ("You are trying to delete your current psector. "
"Please, change your current psector before delete.")
tools_qt.show_exception_message('Current psector', tools_qt.tr(message))
return
inf_text += f'"{id_}", '
list_id += f'"{id_}", '
inf_text = inf_text[:-2]
list_id = list_id[:-2]
if action == 'psector':
feature = f'"id":[{inf_text}], "featureType":"PSECTOR"'
body = tools_gw.create_body(feature=feature)
result = tools_gw.execute_procedure('gw_fct_getcheckdelete', body)
if result is not None and result['status'] == "Accepted":
if result['message']:
answer = tools_qt.show_question(result['message']['text'])
if answer:
feature += f', "tableName":"{table_name}", "idName":"{column_id}"'
body = tools_gw.create_body(feature=feature)
tools_gw.execute_procedure('gw_fct_setdelete', body)
elif action == 'price':
message = "Are you sure you want to delete these records?"
answer = tools_qt.show_question(message, "Delete records", inf_text)
if answer:
sql = "DELETE FROM selector_plan_result WHERE result_id in ("
if list_id != '':
sql += f"{list_id}) AND cur_user = current_user;"
tools_db.execute_sql(sql)
tools_qt.set_widget_text(dialog, label, '')
sql = (f"DELETE FROM {table_name}"
f" WHERE {column_id} IN ({list_id});")
tools_db.execute_sql(sql)
widget.model().select()
def manage_prices(self):
""" Button 50: Plan estimate result manager """
# Create the dialog and signals
self.dlg_merm = GwPriceManagerUi()
tools_gw.load_settings(self.dlg_merm)
# Set current value
sql = (f"SELECT name FROM plan_result_cat WHERE result_id IN (SELECT result_id FROM selector_plan_result "
f"WHERE cur_user = current_user)")
row = tools_db.get_row(sql)
if row:
tools_qt.set_widget_text(self.dlg_merm, 'lbl_vdefault_price', str(row[0]))
# Tables
tablename = 'plan_result_cat'
self.tbl_om_result_cat = self.dlg_merm.findChild(QTableView, "tbl_om_result_cat")
tools_qt.set_tableview_config(self.tbl_om_result_cat)
# Set signals
self.dlg_merm.btn_cancel.clicked.connect(partial(tools_gw.close_dialog, self.dlg_merm))
self.dlg_merm.rejected.connect(partial(tools_gw.close_dialog, self.dlg_merm))
self.dlg_merm.btn_delete.clicked.connect(partial(self.delete_merm, self.dlg_merm))
self.dlg_merm.btn_update_result.clicked.connect(partial(self.update_price_vdefault))
self.dlg_merm.txt_name.textChanged.connect(partial(self.filter_merm, self.dlg_merm, tablename))
self.fill_table(self.dlg_merm, self.tbl_om_result_cat, tablename)
tools_gw.set_tablemodel_config(self.tbl_om_result_cat, self.dlg_merm.tbl_om_result_cat, tablename)
# Open form
self.dlg_merm.setWindowFlags(Qt.WindowStaysOnTopHint)
tools_gw.open_dialog(self.dlg_merm, dlg_name="price_manager")
def update_price_vdefault(self):
selected_list = self.dlg_merm.tbl_om_result_cat.selectionModel().selectedRows()
if len(selected_list) == 0:
message = "Any record selected"
tools_qgis.show_warning(message)
return
row = selected_list[0].row()
price_name = self.dlg_merm.tbl_om_result_cat.model().record(row).value("name")
result_id = self.dlg_merm.tbl_om_result_cat.model().record(row).value("result_id")
tools_qt.set_widget_text(self.dlg_merm, 'lbl_vdefault_price', price_name)
sql = (f"DELETE FROM selector_plan_result WHERE current_user = cur_user;"
f"\nINSERT INTO selector_plan_result (result_id, cur_user)"
f" VALUES({result_id}, current_user);")
status = tools_db.execute_sql(sql)
if status:
message = "Values has been updated"
tools_qgis.show_info(message)
# Refresh canvas
self.iface.mapCanvas().refreshAllLayers()
def delete_merm(self, dialog):
""" Delete selected row from 'manage_prices' dialog from selected tab """
self.multi_rows_delete(dialog, dialog.tbl_om_result_cat, 'plan_result_cat',
'result_id', 'lbl_vdefault_price', 'price')
def filter_merm(self, dialog, tablename):
""" Filter rows from 'manage_prices' dialog from selected tab """
self.filter_by_text(dialog, dialog.tbl_om_result_cat, dialog.txt_name, tablename)
def psector_duplicate(self):
"""" Button 51: Duplicate psector """
selected_list = self.qtbl_psm.selectionModel().selectedRows()
if len(selected_list) == 0:
message = "Any record selected"
tools_qgis.show_warning(message)
return
row = selected_list[0].row()
psector_id = self.qtbl_psm.model().record(row).value("psector_id")
self.duplicate_psector = GwPsectorDuplicate()
self.duplicate_psector.is_duplicated.connect(partial(self.fill_table, self.dlg_psector_mng, self.qtbl_psm, 'v_ui_plan_psector'))
self.duplicate_psector.is_duplicated.connect(partial(self.set_label_current_psector, self.dlg_psector_mng))
self.duplicate_psector.manage_duplicate_psector(psector_id)
def set_label_current_psector(self, dialog):
sql = ("SELECT t1.psector_id, t1.name FROM plan_psector AS t1 "
" INNER JOIN config_param_user AS t2 ON t1.psector_id::text = t2.value "
" WHERE t2.parameter='plan_psector_vdefault' AND cur_user = current_user")
row = tools_db.get_row(sql)
if not row:
return
tools_qt.set_widget_text(dialog, 'lbl_vdefault_psector', row[1])
extras = (f'"selectorType":"selector_basic", "tabName":"tab_psector", "id":{row[0]}, '
f'"isAlone":"False", "disableParent":"False", '
f'"value":"True"')
body = tools_gw.create_body(extras=extras)
result = tools_gw.execute_procedure("gw_fct_getselectors", body)
tools_gw.manage_current_selections_docker(result)
def zoom_to_selected_features(self, layer, feature_type=None, zoom=None):
""" Zoom to selected features of the @layer with @feature_type """
if not layer:
return
global_vars.iface.setActiveLayer(layer)
global_vars.iface.actionZoomToSelected().trigger()
if feature_type and zoom:
# Set scale = scale_zoom
if feature_type in ('node', 'connec', 'gully'):
scale = zoom
# Set scale = max(current_scale, scale_zoom)
elif feature_type == 'arc':
scale = global_vars.iface.mapCanvas().scale()
if int(scale) < int(zoom):
scale = zoom
else:
scale = 5000
if zoom is not None:
scale = zoom
global_vars.iface.mapCanvas().zoomScale(float(scale))
# region private functions
def _manage_accept(self, psector_id):
if not self.my_json:
return
updates = ""
for key, value in self.my_json.items():
updates += f"{key} = '{value}', "
if updates:
updates = updates[:-2]
sql = f"UPDATE v_edit_plan_psector SET {updates} WHERE psector_id = {psector_id}"
if tools_db.execute_sql(sql):
msg = "Psector values updated successfully"
tools_qgis.show_info(msg)
self._clear_my_json()
def _clear_my_json(self):
self.my_json = {}
def _connect_editing_finished(self):
try:
# Widgets
dialog = self.dlg_plan_psector
widgets = dialog.General.findChildren(QWidget)
more_widgets = dialog.additional_info.findChildren(QWidget)
widgets.extend(more_widgets)
for widget in widgets:
if type(widget) == QLineEdit:
widget.editingFinished.connect(partial(tools_gw.get_values, dialog, widget, self.my_json))
elif type(widget) == QComboBox:
widget.currentIndexChanged.connect(partial(tools_gw.get_values, dialog, widget, self.my_json))
elif type(widget) == QCheckBox:
widget.stateChanged.connect(partial(tools_gw.get_values, dialog, widget, self.my_json))
elif type(widget) == QTextEdit:
widget.textChanged.connect(partial(tools_gw.get_values, dialog, widget, self.my_json))
except RuntimeError:
pass
def _fill_txt_infolog(self, selected):
"""
Fill txt_infolog from epa_result_manager form with current data selected for columns:
'name', 'priority', 'status', 'exploitation', 'type', 'descript', 'text1', 'text2', 'observ'
"""
# Get id of selected row
row = selected.indexes()
if not row:
return
msg = ""
cols = ['Name', 'Priority', 'Status', 'expl_id', 'Descript', 'text1', 'text2', 'Observ']
for col in cols:
# Get column index for column
col_ind = tools_qt.get_col_index_by_col_name(self.qtbl_psm, f"{col.lower()}")
text = f'{row[col_ind].data()}'
msg += f"<b>{col}: </b><br>{text}<br><br>"
# Set message text into widget
tools_qt.set_widget_text(self.dlg_psector_mng, 'txt_infolog', msg)
def _enable_layers(self, is_cheked):
""" Manage checkbox state and act accordingly with the layers """
layers = ['v_plan_psector_arc', 'v_plan_psector_connec', 'v_plan_psector_gully', 'v_plan_psector_link',
'v_plan_psector_node']
if is_cheked == 0: # user unckeck it
for layer_name in layers:
layer = tools_qgis.get_layer_by_tablename(layer_name)
if layer:
tools_qgis.set_layer_visible(layer, False, False)
elif is_cheked == 2: # user check it
self._check_layers_visible('v_plan_psector_arc', 'the_geom', 'arc_id')
self._check_layers_visible('v_plan_psector_connec', 'the_geom', 'connec_id')
self._check_layers_visible('v_plan_psector_link', 'the_geom', 'link_id')
self._check_layers_visible('v_plan_psector_node', 'the_geom', 'node_id')
if self.project_type == 'ud':
self._check_layers_visible('v_plan_psector_gully', 'the_geom', 'gully_id')
def _check_layers_visible(self, layer_name, the_geom, field_id):
""" Check layers visibility and add it if it is not in the toc """
layer = tools_qgis.get_layer_by_tablename(layer_name)
if layer is None:
tools_gw.add_layer_database(layer_name, the_geom, field_id)
if layer and QgsProject.instance().layerTreeRoot().findLayer(layer).isVisible() is False:
tools_qgis.set_layer_visible(layer, True, True)
def _check_for_layers(self):
""" Return if ALL this layers in the list are checked or not """
all_checked = True
layers = ['v_plan_psector_arc', 'v_plan_psector_connec', 'v_plan_psector_gully', 'v_plan_psector_link',
'v_plan_psector_node']
for layer_name in layers:
if self.project_type == 'ws' and layer_name == 'v_plan_psector_gully':
continue
layer = tools_qgis.get_layer_by_tablename(layer_name)
if layer is None or QgsProject.instance().layerTreeRoot().findLayer(layer).isVisible() is False:
all_checked = False
return all_checked
def _set_to_arc(self):
if hasattr(self, 'emit_point') and self.emit_point is not None:
tools_gw.disconnect_signal('psector', 'set_to_arc_ep_canvasClicked_set_arc_id')
self.emit_point = QgsMapToolEmitPoint(self.canvas)
self.canvas.setMapTool(self.emit_point)
self.snapper_manager = GwSnapManager(self.iface)
self.snapper = self.snapper_manager.get_snapper()
self.layer_arc = tools_qgis.get_layer_by_tablename("v_edit_arc")
# Vertex marker
self.vertex_marker = self.snapper_manager.vertex_marker
# Store user snapping configuration
self.previous_snapping = self.snapper_manager.get_snapping_options()
# Set signals
tools_gw.connect_signal(self.canvas.xyCoordinates, self._mouse_move_arc, 'psector',
'set_to_arc_xyCoordinates_mouse_move_arc')
tools_gw.connect_signal(self.emit_point.canvasClicked, partial(self._set_arc_id),
'psector', 'set_to_arc_ep_canvasClicked_set_arc_id')
def _set_arc_id(self, point):
# Check if there is a connec/gully selected
tab_idx = self.dlg_plan_psector.tab_feature.currentIndex()
selected_rows = []
selected_qtbl = None
if tab_idx == 2:
selected_rows = self.qtbl_connec.selectionModel().selectedRows()
selected_qtbl = self.qtbl_connec
if len(selected_rows) == 0:
message = "Any record selected"
tools_qgis.show_warning(message)
return
elif tab_idx == 3:
selected_rows = self.qtbl_gully.selectionModel().selectedRows()
selected_qtbl = self.qtbl_gully
if len(selected_rows) == 0:
message = "Any record selected"
tools_qgis.show_warning(message)
return
# Get the point
event_point = self.snapper_manager.get_event_point(point=point)
self.arc_id = None
# Manage current psector
sql = ("SELECT t1.psector_id FROM plan_psector AS t1 "
" INNER JOIN config_param_user AS t2 ON t1.psector_id::text = t2.value "
" WHERE t2.parameter='plan_psector_vdefault' AND cur_user = current_user")
row = tools_db.get_row(sql)
current_psector = row[0]
selected_psector = tools_qt.get_text(self.dlg_plan_psector, self.psector_id)
if str(current_psector) != str(selected_psector):
message = "This psector does not match the current one. Value of current psector will be updated."
tools_qt.show_info_box(message)
sql = (f"UPDATE config_param_user "
f"SET value = '{selected_psector}' "
f"WHERE parameter = 'plan_psector_vdefault' AND cur_user=current_user")
tools_db.execute_sql(sql)
# Snap point
result = self.snapper_manager.snap_to_current_layer(event_point)
if result.isValid():
# Check feature
layer = self.snapper_manager.get_snapped_layer(result)
if layer == self.layer_arc:
# Get the point
snapped_feat = self.snapper_manager.get_snapped_feature(result)
self.arc_id = snapped_feat.attribute('arc_id')
self.arc_cat_id = snapped_feat.attribute('arccat_id')
# Set highlight
feature = tools_qt.get_feature_by_id(layer, self.arc_id, 'arc_id')
try:
geometry = feature.geometry()
self.rubber_band.setToGeometry(geometry, None)
self.rubber_band.setColor(QColor(255, 0, 0, 100))
self.rubber_band.setWidth(5)
self.rubber_band.show()
except AttributeError:
pass
if self.arc_id is None: return
for row in selected_rows:
cell = row.siblingAtColumn(tools_qt.get_col_index_by_col_name(selected_qtbl, 'arc_id'))
selected_qtbl.model().setData(cell, self.arc_id)
def _replace_arc(self):
if hasattr(self, 'emit_point') and self.emit_point is not None:
tools_gw.disconnect_signal('psector', 'replace_arc_ep_canvasClicked_open_arc_replace_form')
self.emit_point = QgsMapToolEmitPoint(self.canvas)
self.canvas.setMapTool(self.emit_point)
self.snapper_manager = GwSnapManager(self.iface)
self.snapper = self.snapper_manager.get_snapper()
self.layer_arc = tools_qgis.get_layer_by_tablename("v_edit_arc")
# Vertex marker
self.vertex_marker = self.snapper_manager.vertex_marker
# Store user snapping configuration
self.previous_snapping = self.snapper_manager.get_snapping_options()
# Set signals
tools_gw.connect_signal(self.canvas.xyCoordinates, self._mouse_move_arc, 'psector',
'replace_arc_xyCoordinates_mouse_move_arc')
tools_gw.connect_signal(self.emit_point.canvasClicked, self._open_arc_replace_form, 'psector',
'replace_arc_ep_canvasClicked_open_arc_replace_form')
def _open_arc_replace_form(self, point):
self.dlg_replace_arc = GwReplaceArc()
tools_gw.load_settings(self.dlg_replace_arc)
event_point = self.snapper_manager.get_event_point(point=point)
self.arc_id = None
# Manage current psector
sql = ("SELECT t1.psector_id FROM plan_psector AS t1 "
" INNER JOIN config_param_user AS t2 ON t1.psector_id::text = t2.value "
" WHERE t2.parameter='plan_psector_vdefault' AND cur_user = current_user")
row = tools_db.get_row(sql)
current_psector = row[0]
selected_psector = tools_qt.get_text(self.dlg_plan_psector, self.psector_id)
if str(current_psector) != str(selected_psector):
message = "This psector does not match the current one. Value of current psector will be updated."
tools_qt.show_info_box(message)
sql = (f"UPDATE config_param_user "
f"SET value = '{selected_psector}' "
f"WHERE parameter = 'plan_psector_vdefault' AND cur_user=current_user")
tools_db.execute_sql(sql)
# Snap point
result = self.snapper_manager.snap_to_current_layer(event_point)
if result.isValid():
# Check feature
layer = self.snapper_manager.get_snapped_layer(result)
if layer == self.layer_arc:
# Get the point
snapped_feat = self.snapper_manager.get_snapped_feature(result)
self.arc_id = snapped_feat.attribute('arc_id')
self.arc_cat_id = snapped_feat.attribute('arccat_id')
# Set highlight
feature = tools_qt.get_feature_by_id(layer, self.arc_id, 'arc_id')
try:
geometry = feature.geometry()
self.rubber_band.setToGeometry(geometry, None)
self.rubber_band.setColor(QColor(255, 0, 0, 100))
self.rubber_band.setWidth(5)
self.rubber_band.show()
except AttributeError:
pass
if self.arc_id is None: return
# Populate combo arccat
sql = "SELECT cat_arc.id AS id, cat_arc.id as idval FROM cat_arc WHERE id IS NOT NULL AND active IS TRUE "
rows = tools_db.get_rows(sql)
tools_qt.fill_combo_values(self.dlg_replace_arc.cmb_newarccat, rows, 1)
# Set text current arccat
self.dlg_replace_arc.txt_current_arccat.setText(self.arc_cat_id)
# Disconnect Snapping
self.snapper_manager.recover_snapping_options()
tools_qgis.disconnect_snapping(False, None, self.vertex_marker)
tools_gw.disconnect_signal('psector')
# Disable tab log
tools_gw.disable_tab_log(self.dlg_replace_arc)
# Triggers
self.dlg_replace_arc.btn_accept.clicked.connect(partial(self._set_plan_replace_feature))
self.dlg_replace_arc.btn_cancel.clicked.connect(partial(tools_gw.close_dialog, self.dlg_replace_arc))
self.dlg_replace_arc.rejected.connect(partial(tools_gw.reset_rubberband, self.rubber_band))
# Open form
tools_gw.open_dialog(self.dlg_replace_arc, dlg_name="replace_arc")
def _set_plan_replace_feature(self):
new_arc_cat = f'"{tools_qt.get_combo_value(self.dlg_replace_arc, self.dlg_replace_arc.cmb_newarccat)}"'
feature = f'"featureType":"ARC", "ids":["{self.arc_id}"]'
extras = f'"catalog":{new_arc_cat}'
body = tools_gw.create_body(feature=feature, extras=extras)
json_result = tools_gw.execute_procedure('gw_fct_setfeaturereplaceplan', body)
# Refresh tableview tbl_psector_x_arc, tbl_psector_x_connec, tbl_psector_x_gully
tools_gw.load_tableview_psector(self.dlg_plan_psector, 'arc')
tools_gw.load_tableview_psector(self.dlg_plan_psector, 'node')
tools_gw.load_tableview_psector(self.dlg_plan_psector, 'connec')
if self.project_type.upper() == 'UD':
tools_gw.load_tableview_psector(self.dlg_plan_psector, 'gully')
tools_gw.set_tablemodel_config(self.dlg_plan_psector, "tbl_psector_x_arc", 'plan_psector_x_arc', isQStandardItemModel=True)
tools_gw.set_tablemodel_config(self.dlg_plan_psector, "tbl_psector_x_connec", 'plan_psector_x_connec', isQStandardItemModel=True)
if self.project_type.upper() == 'UD':
tools_gw.set_tablemodel_config(self.dlg_plan_psector, "tbl_psector_x_gully", 'plan_psector_x_gully', isQStandardItemModel=True)
message = json_result['message']['text']
if message is not None:
tools_qt.show_info_box(message)
text_result, change_tab = tools_gw.fill_tab_log(self.dlg_replace_arc, json_result['body']['data'])
if not change_tab:
self.dlg_replace_arc.close()
tools_gw.reset_rubberband(self.rubber_band)
def _enable_arc_replace(self):
tab_idx = self.dlg_plan_psector.tab_feature.currentIndex()
self.dlg_plan_psector.btn_select_arc.setEnabled(False)
if tab_idx == 0:
self.dlg_plan_psector.btn_select_arc.setEnabled(True)
def _enable_set_to_arc(self):
tab_idx = self.dlg_plan_psector.tab_feature.currentIndex()
self.dlg_plan_psector.btn_set_to_arc.setEnabled(False)
if self.qtbl_connec.selectionModel() is None:
return
if tab_idx == 2 and self.qtbl_connec.selectionModel().selectedRows():
self.dlg_plan_psector.btn_set_to_arc.setEnabled(True)
elif tab_idx == 3 and self.qtbl_gully.selectionModel().selectedRows():
self.dlg_plan_psector.btn_set_to_arc.setEnabled(True)
def _mouse_move_arc(self, point):
if not self.layer_arc:
return
# Set active layer
self.iface.setActiveLayer(self.layer_arc)
# Get clicked point and add marker
self.vertex_marker.hide()
event_point = self.snapper_manager.get_event_point(point=point)
result = self.snapper_manager.snap_to_current_layer(event_point)
if result.isValid():
self.snapper_manager.add_marker(result, self.vertex_marker)
# endregion
|
Giswater/giswater_qgis_plugin
|
core/shared/psector.py
|
Python
|
gpl-3.0
| 100,688
|
[
"VisIt"
] |
aaaeed0daa48afa021c35c5c2ba8592a81fb69400e58a7f6a8fc30539dbb8f6b
|
import os
import ntpath
import json
from math import sqrt
#from json_utils import create_json_file
def get_files_mol2(mypath):
only_mol2_file = []
for root, dirs, files in os.walk(mypath):
for file in files:
if file.endswith(".mol2"):
f_path = os.path.join(root, file)
only_mol2_file.append(f_path)
return only_mol2_file
""" This function obtains all pdb files
in mypath
"""
def get_files_pdb(mypath):
only_pdb_file = []
for root, dirs, files in os.walk(mypath):
for file in files:
if file.endswith(".pdb"):
f_path = os.path.join(root, file)
only_pdb_file.append(f_path)
return only_pdb_file
""" This function obtains all pdb files
in mypath filtered by reference
"""
def get_files_pdb_filter(mypath, reference):
only_pdb_file = []
for root, dirs, files in os.walk(mypath):
for file in files:
if file.endswith(".pdb"):
if file.find(reference) > -1:
f_path = os.path.join(root, file)
only_pdb_file.append(f_path)
return only_pdb_file
""" This function obtains all pdbqt files
in mypath
"""
def get_files_pdbqt(mypath):
only_pdb_file = []
for root, dirs, files in os.walk(mypath):
for file in files:
if file.endswith(".pdbqt"):
f_path = os.path.join(root, file)
only_pdb_file.append(f_path)
return only_pdb_file
""" This function obtains all log files
in mypath
"""
def get_files_log(mypath):
only_pdb_file = []
for root, dirs, files in os.walk(mypath):
for file in files:
if file.endswith(".log"):
f_path = os.path.join(root, file)
only_pdb_file.append(f_path)
return only_pdb_file
""" This function obtains the name of
sorted energy file
"""
def get_file_name_sorted_energy():
return 'vs_energies_sorted.txt'
def get_separator_filename_mode():
"""
Returns the separator file mode. It means a way to separate receptor_ligand and mode
Example:
>>> get_separator_filename_mode()
@return: the separator file mode
@rtype: string
"""
return '+----+'
""" This function obtains the name of
path that saving pdbqt files for analysis
"""
def get_directory_pdbqt_analysis(path_analysis):
path_analysis_pdbqt = os.path.join(path_analysis, "pdbqt_model")
# Checking path_analysis
if not os.path.exists(path_analysis_pdbqt):
os.makedirs(path_analysis_pdbqt)
return path_analysis_pdbqt
def get_structure_file_name(myfile):
"""
This function obtains the name of myfile without filename extension
"""
path, filename = ntpath.split(myfile)
name = str(filename.split(".")[0]) # remove .pdbqt
return name
def get_name_model_pdb(myfile):
"""
This function obtains the name of myfile without filename extension
"""
path, filename = ntpath.split(myfile)
name = str(filename.split(".")[0]) # remove .pdb
return name
def get_name_model_pdbqt(myfile):
"""
This function obtains the name of myfile without filename extension
"""
path, filename = ntpath.split(myfile)
name = str(filename.split(".")[0]) # remove .pdbqt
return name
def get_name_receptor_pdb(myfile):
"""
This function obtains the name of myfile without filename extension
"""
path, filename = ntpath.split(myfile)
name = str(filename.split(".")[0]) # remove .pdb
return name
def get_name_receptor_pdbqt(myfile):
"""
This function obtains the name of myfile without filename extension
"""
path, filename = ntpath.split(myfile)
name = str(filename.split(".")[0]) # remove .pdbqt
return name
def get_ligand_from_receptor_ligand_model(receptor_ligand_model):
"""
This function obtains the name of ligand based on receptor_ligand_model
Example of input: compl_ns3pro_dm_0_-_NuBBE_485_obabel_3D+----+20
"""
separator_model = get_separator_filename_mode()
separator_receptor = "_-_"
string_ref = receptor_ligand_model
s = string_ref.split(separator_receptor) # Removing receptor
s = str(s[1]).split(separator_model) # Removing model
ligand_name = str(s[0]) # geting name of ligand
return ligand_name
""" This function obtains the name of
path that saving pdbqt files for analysis
"""
def get_directory_pdb_analysis(path_analysis):
path_analysis_pdb = os.path.join(path_analysis, "pdb_model")
# Checking path_analysis
if not os.path.exists(path_analysis_pdb):
os.makedirs(path_analysis_pdb)
return path_analysis_pdb
""" This function obtains the name of
path that saving pdbqt files for analysis
"""
def get_directory_complex_pdb_analysis(path_analysis):
path_analysis_pdb = os.path.join(path_analysis, "pdb_complex")
# Checking path_analysis
if not os.path.exists(path_analysis_pdb):
os.makedirs(path_analysis_pdb)
return path_analysis_pdb
""" This function loading pdb file to list.
list_ret is composed by pdb_path_file and loaded file.
"""
def loading_pdb_2_list(pdb_path_file):
list_pdb = []
f_PDB = open(pdb_path_file, "r")
for line in f_PDB:
if line.find("ATOM") > -1:
list_pdb.append(line)
f_PDB.close()
list_ret = (pdb_path_file, list_pdb)
return list_ret
""" Extract the numbers. GROMACS uses nm and autodock vina uses A as units.
Therefore, values from gromacs are multiplied by 10.
"""
def get_value_from_box_center(box):
splited_value_box = str(box).split()
return dict(box_center_x="{0:.2f}".format(float(splited_value_box[0]) * 10),
box_center_y="{0:.2f}".format(float(splited_value_box[1]) * 10),
box_center_z="{0:.2f}".format(float(splited_value_box[2]) * 10))
def get_value_from_box_size(box):
splited_value_box = str(box).split()
return dict(box_size_x="{0:.2f}".format(float(splited_value_box[0]) * 10),
box_size_y="{0:.2f}".format(float(splited_value_box[1]) * 10),
box_size_z="{0:.2f}".format(float(splited_value_box[2]) * 10))
def _generate_parameters_to_complexo_dm():
"""These parameters are generated in hardcoded form, so this function will be deprecated later"""
d = dict(num_modes=9999,
energy_range=9999,
exhaustiveness=10,
cpu=1)
create_json_file('parameters_complexo_dm.json', d)
# Use pdbid_box.json and general_parameters.json to crete the config_complexo_dm.txt
def generate_config_complexo_dm(box_json, general_parameters_json):
box = open(box_json, 'r')
box_dict = json.load(box)
parameters = open(general_parameters_json, 'r')
parameters_dict = json.load(parameters)
file = open('config_complexo_dm.txt', 'w')
for key, value in box_dict.items():
line = ''.join([str(key),
' = ',
str(value),
'\n'])
file.write(line)
for key, value in parameters_dict.items():
line = ''.join([str(key),
' = ',
str(value),
'\n'])
file.write(line)
file.close()
def calculate_avg_value(docking_output):
f_file = open(docking_output, "r")
err_list = []
value = 0
n = 0
for line in f_file:
splited_line = str(line).split()
if not len(splited_line) == 0 and splited_line[0].isdigit():
value += float(splited_line[1])
err_list.append(float(splited_line[2]))
n += 1
avg = value / n
avg = "%.1f" % avg
err = 0
for value in err_list:
err = (value - float(avg)) * (value - float(avg))
err = sqrt(err / (n - 1) / sqrt(n))
err = "%.1f" % err
f_file.close()
return dict(number_modes=n,
avg=avg,
err=err)
def get_receptor_from_receptor_ligand_model(receptor_ligand_model):
"""
This function obtains the name of receptor based on receptor_ligand_model
Example of input: compl_ns3pro_dm_0_-_NuBBE_485_obabel_3D+----+20
"""
separator_model = get_separator_filename_mode()
separator_receptor = "_-_"
string_ref = receptor_ligand_model
receptor_name = string_ref.split(separator_receptor)[0] #Removing all, except receptor name
return receptor_name
def get_model_from_receptor_ligand_model(receptor_ligand_model):
"""
This function obtains the model based on receptor_ligand_model
Example of input: compl_ns3pro_dm_0_-_NuBBE_485_obabel_3D+----+20
Return: 20
"""
separator_model = get_separator_filename_mode()
separator_receptor = "_-_"
string_ref = receptor_ligand_model
s = string_ref.split(separator_receptor) #Removing receptor
s = str(s[1]).split(separator_model) #Removing ligand name
# getting model
model = int(s[len(s)-1])
return model
""" This function obtains the name of
path that saving pdbqt files for analysis
"""
def get_directory_temp_analysis(path_analysis):
path_analysis_temp = os.path.join(path_analysis,"temp")
#Checking path_analysis
if not os.path.exists(path_analysis_temp):
os.makedirs(path_analysis_temp)
return path_analysis_temp
|
rodrigofaccioli/drugdesign
|
virtualscreening/vina/spark/vina_utils.py
|
Python
|
apache-2.0
| 9,638
|
[
"Gromacs"
] |
796f869cb78aec2f0436fe0e17033f1ce33beff85c23f4420b96a03640c7c054
|
import xml.etree. ElementTree as ET
import numpy as np
from uncertainties import ufloat
import openmc
import pytest
from tests.unit_tests import assert_unbounded
from openmc.data import atomic_mass, AVOGADRO
def test_contains():
# Cell with specified region
s = openmc.XPlane()
c = openmc.Cell(region=+s)
assert (1.0, 0.0, 0.0) in c
assert (-1.0, 0.0, 0.0) not in c
# Cell with no region
c = openmc.Cell()
assert (10.0, -4., 2.0) in c
def test_repr(cell_with_lattice):
cells, mats, univ, lattice = cell_with_lattice
repr(cells[0]) # cell with distributed materials
repr(cells[1]) # cell with material
repr(cells[2]) # cell with lattice
# Empty cell
c = openmc.Cell()
repr(c)
# Empty cell with volume
c.volume = 3.0
repr(c)
# Empty cell with uncertain volume
c.volume = ufloat(3.0, 0.2)
repr(c)
def test_bounding_box():
zcyl = openmc.ZCylinder()
c = openmc.Cell(region=-zcyl)
ll, ur = c.bounding_box
assert ll == pytest.approx((-1., -1., -np.inf))
assert ur == pytest.approx((1., 1., np.inf))
# Cell with no region specified
c = openmc.Cell()
assert_unbounded(c)
def test_clone():
m = openmc.Material()
cyl = openmc.ZCylinder()
c = openmc.Cell(fill=m, region=-cyl)
c.temperature = 650.
c2 = c.clone()
assert c2.id != c.id
assert c2.fill != c.fill
assert c2.region != c.region
assert c2.temperature == c.temperature
c3 = c.clone(clone_materials=False)
assert c3.id != c.id
assert c3.fill == c.fill
assert c3.region != c.region
assert c3.temperature == c.temperature
c4 = c.clone(clone_regions=False)
assert c4.id != c.id
assert c4.fill != c.fill
assert c4.region == c.region
assert c4.temperature == c.temperature
def test_temperature(cell_with_lattice):
# Make sure temperature propagates through universes
m = openmc.Material()
s = openmc.XPlane()
c1 = openmc.Cell(fill=m, region=+s)
c2 = openmc.Cell(fill=m, region=-s)
u1 = openmc.Universe(cells=[c1, c2])
c = openmc.Cell(fill=u1)
c.temperature = 400.0
assert c1.temperature == 400.0
assert c2.temperature == 400.0
with pytest.raises(ValueError):
c.temperature = -100.
# distributed temperature
cells, _, _, _ = cell_with_lattice
c = cells[0]
c.temperature = (300., 600., 900.)
def test_rotation():
u = openmc.Universe()
c = openmc.Cell(fill=u)
c.rotation = (180.0, 0.0, 0.0)
assert np.allclose(c.rotation_matrix, [
[1., 0., 0.],
[0., -1., 0.],
[0., 0., -1.]
])
c.rotation = (0.0, 90.0, 0.0)
assert np.allclose(c.rotation_matrix, [
[0., 0., -1.],
[0., 1., 0.],
[1., 0., 0.]
])
def test_get_nuclides(uo2):
c = openmc.Cell(fill=uo2)
nucs = c.get_nuclides()
assert nucs == ['U235', 'O16']
def test_volume_setting():
c = openmc.Cell()
# Test ordinary volume and uncertain volume
c.volume = 3
c.volume = ufloat(3, 0.7)
# Allow volume to be set to 0.0
c.volume = 0.0
c.volume = ufloat(0.0, 0.1)
# Test errors for negative volume
with pytest.raises(ValueError):
c.volume = -1.0
with pytest.raises(ValueError):
c.volume = ufloat(-0.05, 0.1)
def test_atoms_material_cell(uo2, water):
""" Test if correct number of atoms is returned.
Also check if Cell.atoms still works after volume/material was changed
"""
c = openmc.Cell(fill=uo2)
c.volume = 2.0
expected_nucs = ['U235', 'O16']
# Precalculate the expected number of atoms
M = (atomic_mass('U235') + 2 * atomic_mass('O16')) / 3
expected_atoms = [
1/3 * uo2.density/M * AVOGADRO * 2.0, # U235
2/3 * uo2.density/M * AVOGADRO * 2.0 # O16
]
tuples = c.atoms.items()
for nuc, atom_num, t in zip(expected_nucs, expected_atoms, tuples):
assert nuc == t[0]
assert atom_num == pytest.approx(t[1])
# Change volume and check if OK
c.volume = 3.0
expected_atoms = [
1/3 * uo2.density/M * AVOGADRO * 3.0, # U235
2/3 * uo2.density/M * AVOGADRO * 3.0 # O16
]
tuples = c.atoms.items()
for nuc, atom_num, t in zip(expected_nucs, expected_atoms, tuples):
assert nuc == t[0]
assert atom_num == pytest.approx(t[1])
# Change material and check if OK
c.fill = water
expected_nucs = ['H1', 'O16']
M = (2 * atomic_mass('H1') + atomic_mass('O16')) / 3
expected_atoms = [
2/3 * water.density/M * AVOGADRO * 3.0, # H1
1/3 * water.density/M * AVOGADRO * 3.0 # O16
]
tuples = c.atoms.items()
for nuc, atom_num, t in zip(expected_nucs, expected_atoms, tuples):
assert nuc == t[0]
assert atom_num == pytest.approx(t[1])
def test_atoms_distribmat_cell(uo2, water):
""" Test if correct number of atoms is returned for a cell with
'distribmat' fill
"""
c = openmc.Cell(fill=[uo2, water])
c.volume = 6.0
# Calculate the expected number of atoms
expected_nucs = ['U235', 'O16', 'H1']
M_uo2 = (atomic_mass('U235') + 2 * atomic_mass('O16')) / 3
M_water = (2 * atomic_mass('H1') + atomic_mass('O16')) / 3
expected_atoms = [
1/3 * uo2.density/M_uo2 * AVOGADRO * 3.0, # U235
(2/3 * uo2.density/M_uo2 * AVOGADRO * 3.0 +
1/3 * water.density/M_water * AVOGADRO * 3.0), # O16
2/3 * water.density/M_water * AVOGADRO * 3.0 # H1
]
tuples = c.atoms.items()
for nuc, atom_num, t in zip(expected_nucs, expected_atoms, tuples):
assert nuc == t[0]
assert atom_num == pytest.approx(t[1])
def test_atoms_errors(cell_with_lattice):
cells, mats, univ, lattice = cell_with_lattice
# Material Cell with no volume
with pytest.raises(ValueError):
cells[1].atoms
# Cell with lattice
cells[2].volume = 3
with pytest.raises(ValueError):
cells[2].atoms
# Cell with volume but with void fill
cells[1].volume = 2
cells[1].fill = None
with pytest.raises(ValueError):
cells[1].atoms
def test_nuclide_densities(uo2):
c = openmc.Cell(fill=uo2)
expected_nucs = ['U235', 'O16']
expected_density = [1.0, 2.0]
tuples = list(c.get_nuclide_densities().values())
for nuc, density, t in zip(expected_nucs, expected_density, tuples):
assert nuc == t[0]
assert density == pytest.approx(t[1])
# Empty cell
c = openmc.Cell()
assert not c.get_nuclide_densities()
def test_get_all_universes(cell_with_lattice):
# Cell with nested universes
c1 = openmc.Cell()
u1 = openmc.Universe(cells=[c1])
c2 = openmc.Cell(fill=u1)
u2 = openmc.Universe(cells=[c2])
c3 = openmc.Cell(fill=u2)
univs = set(c3.get_all_universes().values())
assert not (univs ^ {u1, u2})
# Cell with lattice
cells, mats, univ, lattice = cell_with_lattice
univs = set(cells[-1].get_all_universes().values())
assert not (univs ^ {univ})
def test_get_all_materials(cell_with_lattice):
# Normal cell
m = openmc.Material()
c = openmc.Cell(fill=m)
test_mats = set(c.get_all_materials().values())
assert not(test_mats ^ {m})
# Cell filled with distributed materials
cells, mats, univ, lattice = cell_with_lattice
c = cells[0]
test_mats = set(c.get_all_materials().values())
assert not (test_mats ^ set(m for m in c.fill if m is not None))
# Cell filled with universe
c = cells[-1]
test_mats = set(c.get_all_materials().values())
assert not (test_mats ^ set(mats))
def test_to_xml_element(cell_with_lattice):
cells, mats, univ, lattice = cell_with_lattice
c = cells[-1]
root = ET.Element('geometry')
elem = c.create_xml_subelement(root)
assert elem.tag == 'cell'
assert elem.get('id') == str(c.id)
assert elem.get('region') is None
surf_elem = root.find('surface')
assert surf_elem.get('id') == str(cells[0].region.surface.id)
c = cells[0]
c.temperature = 900.0
elem = c.create_xml_subelement(root)
assert elem.get('region') == str(c.region)
assert elem.get('temperature') == str(c.temperature)
|
walshjon/openmc
|
tests/unit_tests/test_cell.py
|
Python
|
mit
| 8,229
|
[
"Avogadro"
] |
8cd8f3cf42dce03918937f1aa87063c687409bb048781a26f1daf0c86fdd4888
|
"""Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..utils.validation import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount
from ..utils.multiclass import check_classification_targets
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score fuction."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'balanced_subsample', 'subsample', 'auto')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated and will be removed in 0.18."
" It was replaced by class_weight='balanced_subsample' "
"using the balanced strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
|
yanlend/scikit-learn
|
sklearn/ensemble/forest.py
|
Python
|
bsd-3-clause
| 62,656
|
[
"Brian"
] |
373a46165512f9daaab85d890647c0960a4abcfe0626cb2a3fc34bb8afe97b94
|
# Author: Travis Oliphant
# 1999 -- 2002
from __future__ import division, print_function, absolute_import
import operator
import threading
import sys
import timeit
from . import sigtools, dlti
from ._upfirdn import upfirdn, _output_len
from scipy._lib.six import callable
from scipy._lib._version import NumpyVersion
from scipy import fftpack, linalg
from scipy.fftpack.helper import _init_nd_shape_and_axes_sorted
from numpy import (allclose, angle, arange, argsort, array, asarray,
atleast_1d, atleast_2d, cast, dot, exp, expand_dims,
iscomplexobj, mean, ndarray, newaxis, ones, pi,
poly, polyadd, polyder, polydiv, polymul, polysub, polyval,
product, r_, ravel, real_if_close, reshape,
roots, sort, take, transpose, unique, where, zeros,
zeros_like)
import numpy as np
import math
from scipy.special import factorial
from .windows import get_window
from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
from .filter_design import cheby1, _validate_sos
from .fir_filter_design import firwin
if sys.version_info.major >= 3 and sys.version_info.minor >= 5:
from math import gcd
else:
from fractions import gcd
__all__ = ['correlate', 'fftconvolve', 'convolve', 'convolve2d', 'correlate2d',
'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',
'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',
'residuez', 'resample', 'resample_poly', 'detrend',
'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method',
'filtfilt', 'decimate', 'vectorstrength']
_modedict = {'valid': 0, 'same': 1, 'full': 2}
_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,
'symmetric': 1, 'reflect': 4}
_rfft_mt_safe = (NumpyVersion(np.__version__) >= '1.9.0.dev-e24486e')
_rfft_lock = threading.Lock()
def _valfrommode(mode):
try:
return _modedict[mode]
except KeyError:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
def _bvalfromboundary(boundary):
try:
return _boundarydict[boundary] << 2
except KeyError:
raise ValueError("Acceptable boundary flags are 'fill', 'circular' "
"(or 'wrap'), and 'symmetric' (or 'symm').")
def _inputs_swap_needed(mode, shape1, shape2):
"""
If in 'valid' mode, returns whether or not the input arrays need to be
swapped depending on whether `shape1` is at least as large as `shape2` in
every dimension.
This is important for some of the correlation and convolution
implementations in this module, where the larger array input needs to come
before the smaller array input when operating in this mode.
Note that if the mode provided is not 'valid', False is immediately
returned.
"""
if mode == 'valid':
ok1, ok2 = True, True
for d1, d2 in zip(shape1, shape2):
if not d1 >= d2:
ok1 = False
if not d2 >= d1:
ok2 = False
if not (ok1 or ok2):
raise ValueError("For 'valid' mode, one must be at least "
"as large as the other in every dimension")
return not ok1
return False
def correlate(in1, in2, mode='full', method='auto'):
r"""
Cross-correlate two N-dimensional arrays.
Cross-correlate `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
method : str {'auto', 'direct', 'fft'}, optional
A string indicating which method to use to calculate the correlation.
``direct``
The correlation is determined directly from sums, the definition of
correlation.
``fft``
The Fast Fourier Transform is used to perform the correlation more
quickly (only available for numerical arrays.)
``auto``
Automatically chooses direct or Fourier method based on an estimate
of which is faster (default). See `convolve` Notes for more detail.
.. versionadded:: 0.19.0
Returns
-------
correlate : array
An N-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
See Also
--------
choose_conv_method : contains more documentation on `method`.
Notes
-----
The correlation z of two d-dimensional arrays x and y is defined as::
z[...,k,...] = sum[..., i_l, ...] x[..., i_l,...] * conj(y[..., i_l - k,...])
This way, if x and y are 1-D arrays and ``z = correlate(x, y, 'full')``
then
.. math::
z[k] = (x * y)(k - N + 1)
= \sum_{l=0}^{||x||-1}x_l y_{l-k+N-1}^{*}
for :math:`k = 0, 1, ..., ||x|| + ||y|| - 2`
where :math:`||x||` is the length of ``x``, :math:`N = \max(||x||,||y||)`,
and :math:`y_m` is 0 when m is outside the range of y.
``method='fft'`` only works for numerical arrays as it relies on
`fftconvolve`. In certain cases (i.e., arrays of objects or when
rounding integers can lose precision), ``method='direct'`` is always used.
Examples
--------
Implement a matched filter using cross-correlation, to recover a signal
that has passed through a noisy channel.
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)
>>> sig_noise = sig + np.random.randn(len(sig))
>>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128
>>> import matplotlib.pyplot as plt
>>> clock = np.arange(64, len(sig), 128)
>>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.plot(clock, sig[clock], 'ro')
>>> ax_orig.set_title('Original signal')
>>> ax_noise.plot(sig_noise)
>>> ax_noise.set_title('Signal with noise')
>>> ax_corr.plot(corr)
>>> ax_corr.plot(clock, corr[clock], 'ro')
>>> ax_corr.axhline(0.5, ls=':')
>>> ax_corr.set_title('Cross-correlated with rectangular pulse')
>>> ax_orig.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if in1.ndim == in2.ndim == 0:
return in1 * in2.conj()
elif in1.ndim != in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
# Don't use _valfrommode, since correlate should not accept numeric modes
try:
val = _modedict[mode]
except KeyError:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
# this either calls fftconvolve or this function with method=='direct'
if method in ('fft', 'auto'):
return convolve(in1, _reverse_and_conj(in2), mode, method)
elif method == 'direct':
# fastpath to faster numpy.correlate for 1d inputs when possible
if _np_conv_ok(in1, in2, mode):
return np.correlate(in1, in2, mode)
# _correlateND is far slower when in2.size > in1.size, so swap them
# and then undo the effect afterward if mode == 'full'. Also, it fails
# with 'valid' mode if in2 is larger than in1, so swap those, too.
# Don't swap inputs for 'same' mode, since shape of in1 matters.
swapped_inputs = ((mode == 'full') and (in2.size > in1.size) or
_inputs_swap_needed(mode, in1.shape, in2.shape))
if swapped_inputs:
in1, in2 = in2, in1
if mode == 'valid':
ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
out = np.empty(ps, in1.dtype)
z = sigtools._correlateND(in1, in2, out, val)
else:
ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
# zero pad input
in1zpadded = np.zeros(ps, in1.dtype)
sc = tuple(slice(0, i) for i in in1.shape)
in1zpadded[sc] = in1.copy()
if mode == 'full':
out = np.empty(ps, in1.dtype)
elif mode == 'same':
out = np.empty(in1.shape, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
if swapped_inputs:
# Reverse and conjugate to undo the effect of swapping inputs
z = _reverse_and_conj(z)
return z
else:
raise ValueError("Acceptable method flags are 'auto',"
" 'direct', or 'fft'.")
def _centered(arr, newshape):
# Return the center newshape portion of the array.
newshape = asarray(newshape)
currshape = array(arr.shape)
startind = (currshape - newshape) // 2
endind = startind + newshape
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def fftconvolve(in1, in2, mode="full", axes=None):
"""Convolve two N-dimensional arrays using FFT.
Convolve `in1` and `in2` using the fast Fourier transform method, with
the output size determined by the `mode` argument.
This is generally much faster than `convolve` for large arrays (n > ~500),
but can be slower when only a few output values are needed, and can only
output float arrays (int or object array inputs will be cast to float).
As of v0.19, `convolve` automatically chooses this method or the direct
method based on an estimation of which is faster.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
axis : tuple, optional
axes : int or array_like of ints or None, optional
Axes over which to compute the convolution.
The default is over all axes.
Returns
-------
out : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Autocorrelation of white noise is an impulse.
>>> from scipy import signal
>>> sig = np.random.randn(1000)
>>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('White noise')
>>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)
>>> ax_mag.set_title('Autocorrelation')
>>> fig.tight_layout()
>>> fig.show()
Gaussian blur implemented using FFT convolution. Notice the dark borders
around the image, due to the zero-padding beyond its boundaries.
The `convolve2d` function allows for other types of image boundaries,
but is far slower.
>>> from scipy import misc
>>> face = misc.face(gray=True)
>>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8))
>>> blurred = signal.fftconvolve(face, kernel, mode='same')
>>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1,
... figsize=(6, 15))
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_kernel.imshow(kernel, cmap='gray')
>>> ax_kernel.set_title('Gaussian kernel')
>>> ax_kernel.set_axis_off()
>>> ax_blurred.imshow(blurred, cmap='gray')
>>> ax_blurred.set_title('Blurred')
>>> ax_blurred.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
noaxes = axes is None
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif in1.ndim != in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return array([])
_, axes = _init_nd_shape_and_axes_sorted(in1, shape=None, axes=axes)
if not noaxes and not axes.size:
raise ValueError("when provided, axes cannot be empty")
if noaxes:
other_axes = array([], dtype=np.intc)
else:
other_axes = np.setdiff1d(np.arange(in1.ndim), axes)
s1 = array(in1.shape)
s2 = array(in2.shape)
if not np.all((s1[other_axes] == s2[other_axes])
| (s1[other_axes] == 1) | (s2[other_axes] == 1)):
raise ValueError("incompatible shapes for in1 and in2:"
" {0} and {1}".format(in1.shape, in2.shape))
complex_result = (np.issubdtype(in1.dtype, np.complexfloating)
or np.issubdtype(in2.dtype, np.complexfloating))
shape = np.maximum(s1, s2)
shape[axes] = s1[axes] + s2[axes] - 1
# Check that input sizes are compatible with 'valid' mode
if _inputs_swap_needed(mode, s1, s2):
# Convolution is commutative; order doesn't have any effect on output
in1, s1, in2, s2 = in2, s2, in1, s1
# Speed up FFT by padding to optimal size for FFTPACK
fshape = [fftpack.helper.next_fast_len(d) for d in shape[axes]]
fslice = tuple([slice(sz) for sz in shape])
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)):
try:
sp1 = np.fft.rfftn(in1, fshape, axes=axes)
sp2 = np.fft.rfftn(in2, fshape, axes=axes)
ret = np.fft.irfftn(sp1 * sp2, fshape, axes=axes)[fslice].copy()
finally:
if not _rfft_mt_safe:
_rfft_lock.release()
else:
# If we're here, it's either because we need a complex result, or we
# failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
# is already in use by another thread). In either case, use the
# (threadsafe but slower) SciPy complex-FFT routines instead.
sp1 = fftpack.fftn(in1, fshape, axes=axes)
sp2 = fftpack.fftn(in2, fshape, axes=axes)
ret = fftpack.ifftn(sp1 * sp2, axes=axes)[fslice].copy()
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
return _centered(ret, s1)
elif mode == "valid":
shape_valid = shape.copy()
shape_valid[axes] = s1[axes] - s2[axes] + 1
return _centered(ret, shape_valid)
else:
raise ValueError("acceptable mode flags are 'valid',"
" 'same', or 'full'")
def _numeric_arrays(arrays, kinds='buifc'):
"""
See if a list of arrays are all numeric.
Parameters
----------
ndarrays : array or list of arrays
arrays to check if numeric.
numeric_kinds : string-like
The dtypes of the arrays to be checked. If the dtype.kind of
the ndarrays are not in this string the function returns False and
otherwise returns True.
"""
if type(arrays) == ndarray:
return arrays.dtype.kind in kinds
for array_ in arrays:
if array_.dtype.kind not in kinds:
return False
return True
def _prod(iterable):
"""
Product of a list of numbers.
Faster than np.prod for short lists like array shapes.
"""
product = 1
for x in iterable:
product *= x
return product
def _fftconv_faster(x, h, mode):
"""
See if using `fftconvolve` or `_correlateND` is faster. The boolean value
returned depends on the sizes and shapes of the input values.
The big O ratios were found to hold across different machines, which makes
sense as it's the ratio that matters (the effective speed of the computer
is found in both big O constants). Regardless, this had been tuned on an
early 2015 MacBook Pro with 8GB RAM and an Intel i5 processor.
"""
if mode == 'full':
out_shape = [n + k - 1 for n, k in zip(x.shape, h.shape)]
big_O_constant = 10963.92823819 if x.ndim == 1 else 8899.1104874
elif mode == 'same':
out_shape = x.shape
if x.ndim == 1:
if h.size <= x.size:
big_O_constant = 7183.41306773
else:
big_O_constant = 856.78174111
else:
big_O_constant = 34519.21021589
elif mode == 'valid':
out_shape = [n - k + 1 for n, k in zip(x.shape, h.shape)]
big_O_constant = 41954.28006344 if x.ndim == 1 else 66453.24316434
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
# see whether the Fourier transform convolution method or the direct
# convolution method is faster (discussed in scikit-image PR #1792)
direct_time = (x.size * h.size * _prod(out_shape))
fft_time = sum(n * math.log(n) for n in (x.shape + h.shape +
tuple(out_shape)))
return big_O_constant * fft_time < direct_time
def _reverse_and_conj(x):
"""
Reverse array `x` in all dimensions and perform the complex conjugate
"""
reverse = (slice(None, None, -1),) * x.ndim
return x[reverse].conj()
def _np_conv_ok(volume, kernel, mode):
"""
See if numpy supports convolution of `volume` and `kernel` (i.e. both are
1D ndarrays and of the appropriate shape). NumPy's 'same' mode uses the
size of the larger input, while SciPy's uses the size of the first input.
Invalid mode strings will return False and be caught by the calling func.
"""
if volume.ndim == kernel.ndim == 1:
if mode in ('full', 'valid'):
return True
elif mode == 'same':
return volume.size >= kernel.size
else:
return False
def _timeit_fast(stmt="pass", setup="pass", repeat=3):
"""
Returns the time the statement/function took, in seconds.
Faster, less precise version of IPython's timeit. `stmt` can be a statement
written as a string or a callable.
Will do only 1 loop (like IPython's timeit) with no repetitions
(unlike IPython) for very slow functions. For fast functions, only does
enough loops to take 5 ms, which seems to produce similar results (on
Windows at least), and avoids doing an extraneous cycle that isn't
measured.
"""
timer = timeit.Timer(stmt, setup)
# determine number of calls per rep so total time for 1 rep >= 5 ms
x = 0
for p in range(0, 10):
number = 10**p
x = timer.timeit(number) # seconds
if x >= 5e-3 / 10: # 5 ms for final test, 1/10th that for this one
break
if x > 1: # second
# If it's macroscopic, don't bother with repetitions
best = x
else:
number *= 10
r = timer.repeat(repeat, number)
best = min(r)
sec = best / number
return sec
def choose_conv_method(in1, in2, mode='full', measure=False):
"""
Find the fastest convolution/correlation method.
This primarily exists to be called during the ``method='auto'`` option in
`convolve` and `correlate`, but can also be used when performing many
convolutions of the same input shapes and dtypes, determining
which method to use for all of them, either to avoid the overhead of the
'auto' option or to use accurate real-world measurements.
Parameters
----------
in1 : array_like
The first argument passed into the convolution function.
in2 : array_like
The second argument passed into the convolution function.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
measure : bool, optional
If True, run and time the convolution of `in1` and `in2` with both
methods and return the fastest. If False (default), predict the fastest
method using precomputed values.
Returns
-------
method : str
A string indicating which convolution method is fastest, either
'direct' or 'fft'
times : dict, optional
A dictionary containing the times (in seconds) needed for each method.
This value is only returned if ``measure=True``.
See Also
--------
convolve
correlate
Notes
-----
For large n, ``measure=False`` is accurate and can quickly determine the
fastest method to perform the convolution. However, this is not as
accurate for small n (when any dimension in the input or output is small).
In practice, we found that this function estimates the faster method up to
a multiplicative factor of 5 (i.e., the estimated method is *at most* 5
times slower than the fastest method). The estimation values were tuned on
an early 2015 MacBook Pro with 8GB RAM but we found that the prediction
held *fairly* accurately across different machines.
If ``measure=True``, time the convolutions. Because this function uses
`fftconvolve`, an error will be thrown if it does not support the inputs.
There are cases when `fftconvolve` supports the inputs but this function
returns `direct` (e.g., to protect against floating point integer
precision).
.. versionadded:: 0.19
Examples
--------
Estimate the fastest method for a given input:
>>> from scipy import signal
>>> a = np.random.randn(1000)
>>> b = np.random.randn(1000000)
>>> method = signal.choose_conv_method(a, b, mode='same')
>>> method
'fft'
This can then be applied to other arrays of the same dtype and shape:
>>> c = np.random.randn(1000)
>>> d = np.random.randn(1000000)
>>> # `method` works with correlate and convolve
>>> corr1 = signal.correlate(a, b, mode='same', method=method)
>>> corr2 = signal.correlate(c, d, mode='same', method=method)
>>> conv1 = signal.convolve(a, b, mode='same', method=method)
>>> conv2 = signal.convolve(c, d, mode='same', method=method)
"""
volume = asarray(in1)
kernel = asarray(in2)
if measure:
times = {}
for method in ['fft', 'direct']:
times[method] = _timeit_fast(lambda: convolve(volume, kernel,
mode=mode, method=method))
chosen_method = 'fft' if times['fft'] < times['direct'] else 'direct'
return chosen_method, times
# fftconvolve doesn't support complex256
fftconv_unsup = "complex256" if sys.maxsize > 2**32 else "complex192"
if hasattr(np, fftconv_unsup):
if volume.dtype == fftconv_unsup or kernel.dtype == fftconv_unsup:
return 'direct'
# for integer input,
# catch when more precision required than float provides (representing an
# integer as float can lose precision in fftconvolve if larger than 2**52)
if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]):
max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max())
max_value *= int(min(volume.size, kernel.size))
if max_value > 2**np.finfo('float').nmant - 1:
return 'direct'
if _numeric_arrays([volume, kernel], kinds='b'):
return 'direct'
if _numeric_arrays([volume, kernel]):
if _fftconv_faster(volume, kernel, mode):
return 'fft'
return 'direct'
def convolve(in1, in2, mode='full', method='auto'):
"""
Convolve two N-dimensional arrays.
Convolve `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
method : str {'auto', 'direct', 'fft'}, optional
A string indicating which method to use to calculate the convolution.
``direct``
The convolution is determined directly from sums, the definition of
convolution.
``fft``
The Fourier Transform is used to perform the convolution by calling
`fftconvolve`.
``auto``
Automatically chooses direct or Fourier method based on an estimate
of which is faster (default). See Notes for more detail.
.. versionadded:: 0.19.0
Returns
-------
convolve : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
See Also
--------
numpy.polymul : performs polynomial multiplication (same operation, but
also accepts poly1d objects)
choose_conv_method : chooses the fastest appropriate convolution method
fftconvolve
Notes
-----
By default, `convolve` and `correlate` use ``method='auto'``, which calls
`choose_conv_method` to choose the fastest method using pre-computed
values (`choose_conv_method` can also measure real-world timing with a
keyword argument). Because `fftconvolve` relies on floating point numbers,
there are certain constraints that may force `method=direct` (more detail
in `choose_conv_method` docstring).
Examples
--------
Smooth a square pulse using a Hann window:
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 0.], 100)
>>> win = signal.hann(50)
>>> filtered = signal.convolve(sig, win, mode='same') / sum(win)
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('Original pulse')
>>> ax_orig.margins(0, 0.1)
>>> ax_win.plot(win)
>>> ax_win.set_title('Filter impulse response')
>>> ax_win.margins(0, 0.1)
>>> ax_filt.plot(filtered)
>>> ax_filt.set_title('Filtered signal')
>>> ax_filt.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
volume = asarray(in1)
kernel = asarray(in2)
if volume.ndim == kernel.ndim == 0:
return volume * kernel
elif volume.ndim != kernel.ndim:
raise ValueError("volume and kernel should have the same "
"dimensionality")
if _inputs_swap_needed(mode, volume.shape, kernel.shape):
# Convolution is commutative; order doesn't have any effect on output
volume, kernel = kernel, volume
if method == 'auto':
method = choose_conv_method(volume, kernel, mode=mode)
if method == 'fft':
out = fftconvolve(volume, kernel, mode=mode)
result_type = np.result_type(volume, kernel)
if result_type.kind in {'u', 'i'}:
out = np.around(out)
return out.astype(result_type)
elif method == 'direct':
# fastpath to faster numpy.convolve for 1d inputs when possible
if _np_conv_ok(volume, kernel, mode):
return np.convolve(volume, kernel, mode)
return correlate(volume, _reverse_and_conj(kernel), mode, 'direct')
else:
raise ValueError("Acceptable method flags are 'auto',"
" 'direct', or 'fft'.")
def order_filter(a, domain, rank):
"""
Perform an order filter on an N-dimensional array.
Perform an order filter on the array in. The domain argument acts as a
mask centered over each pixel. The non-zero elements of domain are
used to select elements surrounding each input pixel which are placed
in a list. The list is sorted, and the output for that pixel is the
element corresponding to rank in the sorted list.
Parameters
----------
a : ndarray
The N-dimensional input array.
domain : array_like
A mask array with the same number of dimensions as `a`.
Each dimension should have an odd number of elements.
rank : int
A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element, 1 is the
next smallest element, etc.).
Returns
-------
out : ndarray
The results of the order filter in an array with the same
shape as `a`.
Examples
--------
>>> from scipy import signal
>>> x = np.arange(25).reshape(5, 5)
>>> domain = np.identity(3)
>>> x
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> signal.order_filter(x, domain, 0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 2., 0.],
[ 0., 5., 6., 7., 0.],
[ 0., 10., 11., 12., 0.],
[ 0., 0., 0., 0., 0.]])
>>> signal.order_filter(x, domain, 2)
array([[ 6., 7., 8., 9., 4.],
[ 11., 12., 13., 14., 9.],
[ 16., 17., 18., 19., 14.],
[ 21., 22., 23., 24., 19.],
[ 20., 21., 22., 23., 24.]])
"""
domain = asarray(domain)
size = domain.shape
for k in range(len(size)):
if (size[k] % 2) != 1:
raise ValueError("Each dimension of domain argument "
" should have an odd number of elements.")
return sigtools._order_filterND(a, domain, rank)
def medfilt(volume, kernel_size=None):
"""
Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size`. The array will automatically be zero-padded.
Parameters
----------
volume : array_like
An N-dimensional input array.
kernel_size : array_like, optional
A scalar or an N-length list giving the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size in
each dimension. Default size is 3 for each dimension.
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
See also
--------
scipy.ndimage.median_filter
Notes
-------
The more general function `scipy.ndimage.median_filter` has a more
efficient implementation of a median filter and therefore runs much faster.
"""
volume = atleast_1d(volume)
if kernel_size is None:
kernel_size = [3] * volume.ndim
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), volume.ndim)
for k in range(volume.ndim):
if (kernel_size[k] % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
domain = ones(kernel_size)
numels = product(kernel_size, axis=0)
order = numels // 2
return sigtools._order_filterND(volume, domain, order)
def wiener(im, mysize=None, noise=None):
"""
Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Parameters
----------
im : ndarray
An N-dimensional array.
mysize : int or array_like, optional
A scalar or an N-length list giving the size of the Wiener filter
window in each dimension. Elements of mysize should be odd.
If mysize is a scalar, then this scalar is used as the size
in each dimension.
noise : float, optional
The noise-power to use. If None, then noise is estimated as the
average of the local variance of the input.
Returns
-------
out : ndarray
Wiener filtered result with the same shape as `im`.
"""
im = asarray(im)
if mysize is None:
mysize = [3] * im.ndim
mysize = asarray(mysize)
if mysize.shape == ():
mysize = np.repeat(mysize.item(), im.ndim)
# Estimate the local mean
lMean = correlate(im, ones(mysize), 'same') / product(mysize, axis=0)
# Estimate the local variance
lVar = (correlate(im ** 2, ones(mysize), 'same') /
product(mysize, axis=0) - lMean ** 2)
# Estimate the noise power if needed.
if noise is None:
noise = mean(ravel(lVar), axis=0)
res = (im - lMean)
res *= (1 - noise / lVar)
res += lMean
out = where(lVar < noise, lMean, res)
return out
def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Convolve two 2-dimensional arrays.
Convolve `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Compute the gradient of an image by 2D convolution with a complex Scharr
operator. (Horizontal operator is real, vertical is imaginary.) Use
symmetric boundary condition to avoid creating edges at the image
boundaries.
>>> from scipy import signal
>>> from scipy import misc
>>> ascent = misc.ascent()
>>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j],
... [-10+0j, 0+ 0j, +10 +0j],
... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
>>> grad = signal.convolve2d(ascent, scharr, boundary='symm', mode='same')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(3, 1, figsize=(6, 15))
>>> ax_orig.imshow(ascent, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_mag.imshow(np.absolute(grad), cmap='gray')
>>> ax_mag.set_title('Gradient magnitude')
>>> ax_mag.set_axis_off()
>>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles
>>> ax_ang.set_title('Gradient orientation')
>>> ax_ang.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if not in1.ndim == in2.ndim == 2:
raise ValueError('convolve2d inputs must both be 2D arrays')
if _inputs_swap_needed(mode, in1.shape, in2.shape):
in1, in2 = in2, in1
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
return out
def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Cross-correlate two 2-dimensional arrays.
Cross correlate `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
correlate2d : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Examples
--------
Use 2D cross-correlation to find the location of a template in a noisy
image:
>>> from scipy import signal
>>> from scipy import misc
>>> face = misc.face(gray=True) - misc.face(gray=True).mean()
>>> template = np.copy(face[300:365, 670:750]) # right eye
>>> template -= template.mean()
>>> face = face + np.random.randn(*face.shape) * 50 # add noise
>>> corr = signal.correlate2d(face, template, boundary='symm', mode='same')
>>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(3, 1,
... figsize=(6, 15))
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_template.imshow(template, cmap='gray')
>>> ax_template.set_title('Template')
>>> ax_template.set_axis_off()
>>> ax_corr.imshow(corr, cmap='gray')
>>> ax_corr.set_title('Cross-correlation')
>>> ax_corr.set_axis_off()
>>> ax_orig.plot(x, y, 'ro')
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if not in1.ndim == in2.ndim == 2:
raise ValueError('correlate2d inputs must both be 2D arrays')
swapped_inputs = _inputs_swap_needed(mode, in1.shape, in2.shape)
if swapped_inputs:
in1, in2 = in2, in1
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
out = sigtools._convolve2d(in1, in2.conj(), 0, val, bval, fillvalue)
if swapped_inputs:
out = out[::-1, ::-1]
return out
def medfilt2d(input, kernel_size=3):
"""
Median filter a 2-dimensional array.
Apply a median filter to the `input` array using a local window-size
given by `kernel_size` (must be odd). The array is zero-padded
automatically.
Parameters
----------
input : array_like
A 2-dimensional input array.
kernel_size : array_like, optional
A scalar or a list of length 2, giving the size of the
median filter window in each dimension. Elements of
`kernel_size` should be odd. If `kernel_size` is a scalar,
then this scalar is used as the size in each dimension.
Default is a kernel of size (3, 3).
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
See also
--------
scipy.ndimage.median_filter
Notes
-------
The more general function `scipy.ndimage.median_filter` has a more
efficient implementation of a median filter and therefore runs much faster.
"""
image = asarray(input)
if kernel_size is None:
kernel_size = [3] * 2
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), 2)
for size in kernel_size:
if (size % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
return sigtools._medfilt2d(image, kernel_size)
def lfilter(b, a, x, axis=-1, zi=None):
"""
Filter data along one-dimension with an IIR or FIR filter.
Filter a data sequence, `x`, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
(see Notes).
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
``max(len(a), len(b)) - 1``. If `zi` is None or is not given then
initial rest is assumed. See `lfiltic` for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
lfiltic : Construct initial conditions for `lfilter`.
lfilter_zi : Compute initial state (steady state of step response) for
`lfilter`.
filtfilt : A forward-backward filter, to obtain a filter with linear phase.
savgol_filter : A Savitzky-Golay filter.
sosfilt: Filter data using cascaded second-order sections.
sosfiltfilt: A forward-backward filter using second-order sections.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M]
- a[1]*y[n-1] - ... - a[N]*y[n-N]
where `M` is the degree of the numerator, `N` is the degree of the
denominator, and `n` is the sample number. It is implemented using
the following difference equations (assuming M = N)::
a[0]*y[n] = b[0] * x[n] + d[0][n-1]
d[0][n] = b[1] * x[n] - a[1] * y[n] + d[1][n-1]
d[1][n] = b[2] * x[n] - a[2] * y[n] + d[2][n-1]
...
d[N-2][n] = b[N-1]*x[n] - a[N-1]*y[n] + d[N-1][n-1]
d[N-1][n] = b[N] * x[n] - a[N] * y[n]
where `d` are the state variables.
The rational transfer function describing this filter in the
z-transform domain is::
-1 -M
b[0] + b[1]z + ... + b[M] z
Y(z) = -------------------------------- X(z)
-1 -N
a[0] + a[1]z + ... + a[N] z
Examples
--------
Generate a noisy signal to be filtered:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 201)
>>> x = (np.sin(2*np.pi*0.75*t*(1-t) + 2.1) +
... 0.1*np.sin(2*np.pi*1.25*t + 1) +
... 0.18*np.cos(2*np.pi*3.85*t))
>>> xn = x + np.random.randn(len(t)) * 0.08
Create an order 3 lowpass butterworth filter:
>>> b, a = signal.butter(3, 0.05)
Apply the filter to xn. Use lfilter_zi to choose the initial condition of
the filter:
>>> zi = signal.lfilter_zi(b, a)
>>> z, _ = signal.lfilter(b, a, xn, zi=zi*xn[0])
Apply the filter again, to have a result filtered at an order the same as
filtfilt:
>>> z2, _ = signal.lfilter(b, a, z, zi=zi*z[0])
Use filtfilt to apply the filter:
>>> y = signal.filtfilt(b, a, xn)
Plot the original signal and the various filtered versions:
>>> plt.figure
>>> plt.plot(t, xn, 'b', alpha=0.75)
>>> plt.plot(t, z, 'r--', t, z2, 'r', t, y, 'k')
>>> plt.legend(('noisy signal', 'lfilter, once', 'lfilter, twice',
... 'filtfilt'), loc='best')
>>> plt.grid(True)
>>> plt.show()
"""
a = np.atleast_1d(a)
if len(a) == 1:
# This path only supports types fdgFDGO to mirror _linear_filter below.
# Any of b, a, x, or zi can set the dtype, but there is no default
# casting of other types; instead a NotImplementedError is raised.
b = np.asarray(b)
a = np.asarray(a)
if b.ndim != 1 and a.ndim != 1:
raise ValueError('object of too small depth for desired array')
x = np.asarray(x)
inputs = [b, a, x]
if zi is not None:
# _linear_filter does not broadcast zi, but does do expansion of
# singleton dims.
zi = np.asarray(zi)
if zi.ndim != x.ndim:
raise ValueError('object of too small depth for desired array')
expected_shape = list(x.shape)
expected_shape[axis] = b.shape[0] - 1
expected_shape = tuple(expected_shape)
# check the trivial case where zi is the right shape first
if zi.shape != expected_shape:
strides = zi.ndim * [None]
if axis < 0:
axis += zi.ndim
for k in range(zi.ndim):
if k == axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == 1:
strides[k] = 0
else:
raise ValueError('Unexpected shape for zi: expected '
'%s, found %s.' %
(expected_shape, zi.shape))
zi = np.lib.stride_tricks.as_strided(zi, expected_shape,
strides)
inputs.append(zi)
dtype = np.result_type(*inputs)
if dtype.char not in 'fdgFDGO':
raise NotImplementedError("input type '%s' not supported" % dtype)
b = np.array(b, dtype=dtype)
a = np.array(a, dtype=dtype, copy=False)
b /= a[0]
x = np.array(x, dtype=dtype, copy=False)
out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x)
ind = out_full.ndim * [slice(None)]
if zi is not None:
ind[axis] = slice(zi.shape[axis])
out_full[tuple(ind)] += zi
ind[axis] = slice(out_full.shape[axis] - len(b) + 1)
out = out_full[tuple(ind)]
if zi is None:
return out
else:
ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None)
zf = out_full[tuple(ind)]
return out, zf
else:
if zi is None:
return sigtools._linear_filter(b, a, x, axis)
else:
return sigtools._linear_filter(b, a, x, axis, zi)
def lfiltic(b, a, y, x=None):
"""
Construct initial conditions for lfilter given input and output vectors.
Given a linear filter (b, a) and initial conditions on the output `y`
and the input `x`, return the initial conditions on the state vector zi
which is used by `lfilter` to generate the output given the input.
Parameters
----------
b : array_like
Linear filter term.
a : array_like
Linear filter term.
y : array_like
Initial conditions.
If ``N = len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.
If `y` is too short, it is padded with zeros.
x : array_like, optional
Initial conditions.
If ``M = len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.
If `x` is not given, its initial conditions are assumed zero.
If `x` is too short, it is padded with zeros.
Returns
-------
zi : ndarray
The state vector ``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``,
where ``K = max(M, N)``.
See Also
--------
lfilter, lfilter_zi
"""
N = np.size(a) - 1
M = np.size(b) - 1
K = max(M, N)
y = asarray(y)
if y.dtype.kind in 'bui':
# ensure calculations are floating point
y = y.astype(np.float64)
zi = zeros(K, y.dtype)
if x is None:
x = zeros(M, y.dtype)
else:
x = asarray(x)
L = np.size(x)
if L < M:
x = r_[x, zeros(M - L)]
L = np.size(y)
if L < N:
y = r_[y, zeros(N - L)]
for m in range(M):
zi[m] = np.sum(b[m + 1:] * x[:M - m], axis=0)
for m in range(N):
zi[m] -= np.sum(a[m + 1:] * y[:N - m], axis=0)
return zi
def deconvolve(signal, divisor):
"""Deconvolves ``divisor`` out of ``signal`` using inverse filtering.
Returns the quotient and remainder such that
``signal = convolve(divisor, quotient) + remainder``
Parameters
----------
signal : array_like
Signal data, typically a recorded signal
divisor : array_like
Divisor data, typically an impulse response or filter that was
applied to the original signal
Returns
-------
quotient : ndarray
Quotient, typically the recovered original signal
remainder : ndarray
Remainder
Examples
--------
Deconvolve a signal that's been filtered:
>>> from scipy import signal
>>> original = [0, 1, 0, 0, 1, 1, 0, 0]
>>> impulse_response = [2, 1]
>>> recorded = signal.convolve(impulse_response, original)
>>> recorded
array([0, 2, 1, 0, 2, 3, 1, 0, 0])
>>> recovered, remainder = signal.deconvolve(recorded, impulse_response)
>>> recovered
array([ 0., 1., 0., 0., 1., 1., 0., 0.])
See Also
--------
numpy.polydiv : performs polynomial division (same operation, but
also accepts poly1d objects)
"""
num = atleast_1d(signal)
den = atleast_1d(divisor)
N = len(num)
D = len(den)
if D > N:
quot = []
rem = num
else:
input = zeros(N - D + 1, float)
input[0] = 1
quot = lfilter(num, den, input)
rem = num - convolve(den, quot, mode='full')
return quot, rem
def hilbert(x, N=None, axis=-1):
"""
Compute the analytic signal, using the Hilbert transform.
The transformation is done along the last axis by default.
Parameters
----------
x : array_like
Signal data. Must be real.
N : int, optional
Number of Fourier components. Default: ``x.shape[axis]``
axis : int, optional
Axis along which to do the transformation. Default: -1.
Returns
-------
xa : ndarray
Analytic signal of `x`, of each 1-D array along `axis`
See Also
--------
scipy.fftpack.hilbert : Return Hilbert transform of a periodic sequence x.
Notes
-----
The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
.. math:: x_a = F^{-1}(F(x) 2U) = x + i y
where `F` is the Fourier transform, `U` the unit step function,
and `y` the Hilbert transform of `x`. [1]_
In other words, the negative half of the frequency spectrum is zeroed
out, turning the real-valued signal into a complex signal. The Hilbert
transformed signal can be obtained from ``np.imag(hilbert(x))``, and the
original signal from ``np.real(hilbert(x))``.
Examples
---------
In this example we use the Hilbert transform to determine the amplitude
envelope and instantaneous frequency of an amplitude-modulated signal.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import hilbert, chirp
>>> duration = 1.0
>>> fs = 400.0
>>> samples = int(fs*duration)
>>> t = np.arange(samples) / fs
We create a chirp of which the frequency increases from 20 Hz to 100 Hz and
apply an amplitude modulation.
>>> signal = chirp(t, 20.0, t[-1], 100.0)
>>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) )
The amplitude envelope is given by magnitude of the analytic signal. The
instantaneous frequency can be obtained by differentiating the
instantaneous phase in respect to time. The instantaneous phase corresponds
to the phase angle of the analytic signal.
>>> analytic_signal = hilbert(signal)
>>> amplitude_envelope = np.abs(analytic_signal)
>>> instantaneous_phase = np.unwrap(np.angle(analytic_signal))
>>> instantaneous_frequency = (np.diff(instantaneous_phase) /
... (2.0*np.pi) * fs)
>>> fig = plt.figure()
>>> ax0 = fig.add_subplot(211)
>>> ax0.plot(t, signal, label='signal')
>>> ax0.plot(t, amplitude_envelope, label='envelope')
>>> ax0.set_xlabel("time in seconds")
>>> ax0.legend()
>>> ax1 = fig.add_subplot(212)
>>> ax1.plot(t[1:], instantaneous_frequency)
>>> ax1.set_xlabel("time in seconds")
>>> ax1.set_ylim(0.0, 120.0)
References
----------
.. [1] Wikipedia, "Analytic signal".
https://en.wikipedia.org/wiki/Analytic_signal
.. [2] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2.
.. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal
Processing, Third Edition, 2009. Chapter 12.
ISBN 13: 978-1292-02572-8
"""
x = asarray(x)
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape[axis]
if N <= 0:
raise ValueError("N must be positive.")
Xf = fftpack.fft(x, N, axis=axis)
h = zeros(N)
if N % 2 == 0:
h[0] = h[N // 2] = 1
h[1:N // 2] = 2
else:
h[0] = 1
h[1:(N + 1) // 2] = 2
if x.ndim > 1:
ind = [newaxis] * x.ndim
ind[axis] = slice(None)
h = h[tuple(ind)]
x = fftpack.ifft(Xf * h, axis=axis)
return x
def hilbert2(x, N=None):
"""
Compute the '2-D' analytic signal of `x`
Parameters
----------
x : array_like
2-D signal data.
N : int or tuple of two ints, optional
Number of Fourier components. Default is ``x.shape``
Returns
-------
xa : ndarray
Analytic signal of `x` taken along axes (0,1).
References
----------
.. [1] Wikipedia, "Analytic signal",
https://en.wikipedia.org/wiki/Analytic_signal
"""
x = atleast_2d(x)
if x.ndim > 2:
raise ValueError("x must be 2-D.")
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape
elif isinstance(N, int):
if N <= 0:
raise ValueError("N must be positive.")
N = (N, N)
elif len(N) != 2 or np.any(np.asarray(N) <= 0):
raise ValueError("When given as a tuple, N must hold exactly "
"two positive integers")
Xf = fftpack.fft2(x, N, axes=(0, 1))
h1 = zeros(N[0], 'd')
h2 = zeros(N[1], 'd')
for p in range(2):
h = eval("h%d" % (p + 1))
N1 = N[p]
if N1 % 2 == 0:
h[0] = h[N1 // 2] = 1
h[1:N1 // 2] = 2
else:
h[0] = 1
h[1:(N1 + 1) // 2] = 2
exec("h%d = h" % (p + 1), globals(), locals())
h = h1[:, newaxis] * h2[newaxis, :]
k = x.ndim
while k > 2:
h = h[:, newaxis]
k -= 1
x = fftpack.ifft2(Xf * h, axes=(0, 1))
return x
def cmplx_sort(p):
"""Sort roots based on magnitude.
Parameters
----------
p : array_like
The roots to sort, as a 1-D array.
Returns
-------
p_sorted : ndarray
Sorted roots.
indx : ndarray
Array of indices needed to sort the input `p`.
Examples
--------
>>> from scipy import signal
>>> vals = [1, 4, 1+1.j, 3]
>>> p_sorted, indx = signal.cmplx_sort(vals)
>>> p_sorted
array([1.+0.j, 1.+1.j, 3.+0.j, 4.+0.j])
>>> indx
array([0, 2, 3, 1])
"""
p = asarray(p)
if iscomplexobj(p):
indx = argsort(abs(p))
else:
indx = argsort(p)
return take(p, indx, 0), indx
def unique_roots(p, tol=1e-3, rtype='min'):
"""
Determine unique roots and their multiplicities from a list of roots.
Parameters
----------
p : array_like
The list of roots.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
pout : ndarray
The list of unique roots, sorted from low to high.
mult : ndarray
The multiplicity of each root.
Notes
-----
This utility function is not specific to roots but can be used for any
sequence of values for which uniqueness and multiplicity has to be
determined. For a more general routine, see `numpy.unique`.
Examples
--------
>>> from scipy import signal
>>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
>>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')
Check which roots have multiplicity larger than 1:
>>> uniq[mult > 1]
array([ 1.305])
"""
if rtype in ['max', 'maximum']:
comproot = np.max
elif rtype in ['min', 'minimum']:
comproot = np.min
elif rtype in ['avg', 'mean']:
comproot = np.mean
else:
raise ValueError("`rtype` must be one of "
"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
p = asarray(p) * 1.0
tol = abs(tol)
p, indx = cmplx_sort(p)
pout = []
mult = []
indx = -1
curp = p[0] + 5 * tol
sameroots = []
for k in range(len(p)):
tr = p[k]
if abs(tr - curp) < tol:
sameroots.append(tr)
curp = comproot(sameroots)
pout[indx] = curp
mult[indx] += 1
else:
pout.append(tr)
curp = tr
sameroots = [tr]
indx += 1
mult.append(1)
return array(pout), array(mult)
def invres(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(s) and a(s) from partial fraction expansion.
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
H(s) = ------ = ------------------------------------------
a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
then the partial-fraction expansion H(s) is defined as::
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
This function is used for polynomials in positive powers of s or z,
such as analog filters or digital filters in controls engineering. For
negative powers of z (typical for digital filters in DSP), use `invresz`.
Parameters
----------
r : array_like
Residues.
p : array_like
Poles.
k : array_like
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
See Also
--------
residue, invresz, unique_roots
"""
extra = k
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
for k in range(len(pout)):
temp = []
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
b = polyadd(b, r[indx] * atleast_1d(poly(t2)))
indx += 1
b = real_if_close(b)
while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1):
b = b[1:]
return b, a
def residue(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(s) / a(s).
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
H(s) = ------ = ------------------------------------------
a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
then the partial-fraction expansion H(s) is defined as::
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
This function is used for polynomials in positive powers of s or z,
such as analog filters or digital filters in controls engineering. For
negative powers of z (typical for digital filters in DSP), use `residuez`.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invres, residuez, numpy.poly, unique_roots
"""
b, a = map(asarray, (b, a))
rscale = a[0]
k, b = polydiv(b, a)
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula
indx = 0
for n in range(len(pout)):
bn = b.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))
# bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
# multiplicity of pole at po[n]
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, pout[n]) / polyval(an, pout[n]) /
factorial(sig - m))
indx += sig
return r / rscale, p, k
def residuez(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(z) / a(z).
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
H(z) = ------ = ------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
then the partial-fraction expansion H(z) is defined as::
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than `tol`), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
This function is used for polynomials in negative powers of z,
such as digital filters in DSP. For positive powers, use `residue`.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invresz, residue, unique_roots
"""
b, a = map(asarray, (b, a))
gain = a[0]
brev, arev = b[::-1], a[::-1]
krev, brev = polydiv(brev, arev)
if krev == []:
k = []
else:
k = krev[::-1]
b = brev[::-1]
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula (for discrete-time)
# the polynomial is in z**(-1) and the multiplication is by terms
# like this (1-p[i] z**(-1))**mult[i]. After differentiation,
# we must divide by (-p[i])**(m-k) as well as (m-k)!
indx = 0
for n in range(len(pout)):
bn = brev.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))[::-1]
# bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
# multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, 1.0 / pout[n]) /
polyval(an, 1.0 / pout[n]) /
factorial(sig - m) / (-pout[n]) ** (sig - m))
indx += sig
return r / gain, p, k
def invresz(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(z) and a(z) from partial fraction expansion.
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
H(z) = ------ = ------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
then the partial-fraction expansion H(z) is defined as::
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than `tol`), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
This function is used for polynomials in negative powers of z,
such as digital filters in DSP. For positive powers, use `invres`.
Parameters
----------
r : array_like
Residues.
p : array_like
Poles.
k : array_like
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
See Also
--------
residuez, unique_roots, invres
"""
extra = asarray(k)
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
brev = asarray(b)[::-1]
for k in range(len(pout)):
temp = []
# Construct polynomial which does not include any of this root
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
brev = polyadd(brev, (r[indx] * atleast_1d(poly(t2)))[::-1])
indx += 1
b = real_if_close(brev[::-1])
return b, a
def resample(x, num, t=None, axis=0, window=None):
"""
Resample `x` to `num` samples using Fourier method along the given axis.
The resampled signal starts at the same value as `x` but is sampled
with a spacing of ``len(x) / num * (spacing of x)``. Because a
Fourier method is used, the signal is assumed to be periodic.
Parameters
----------
x : array_like
The data to be resampled.
num : int
The number of samples in the resampled signal.
t : array_like, optional
If `t` is given, it is assumed to be the sample positions
associated with the signal data in `x`.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain. See below for details.
Returns
-------
resampled_x or (resampled_x, resampled_t)
Either the resampled array, or, if `t` was given, a tuple
containing the resampled array and the corresponding resampled
positions.
See Also
--------
decimate : Downsample the signal after applying an FIR or IIR filter.
resample_poly : Resample using polyphase filtering and an FIR filter.
Notes
-----
The argument `window` controls a Fourier-domain window that tapers
the Fourier spectrum before zero-padding to alleviate ringing in
the resampled values for sampled signals you didn't intend to be
interpreted as band-limited.
If `window` is a function, then it is called with a vector of inputs
indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
If `window` is an array of the same length as `x.shape[axis]` it is
assumed to be the window to be applied directly in the Fourier
domain (with dc and low-frequency first).
For any other type of `window`, the function `scipy.signal.get_window`
is called to generate the window.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * len(x) / num``.
If `t` is not None, then it represents the old sample positions,
and the new sample positions will be returned as well as the new
samples.
As noted, `resample` uses FFT transformations, which can be very
slow if the number of input or output samples is large and prime;
see `scipy.fftpack.fft`.
Examples
--------
Note that the end of the resampled data rises to meet the first
sample of the next cycle:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f = signal.resample(y, 100)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro')
>>> plt.legend(['data', 'resampled'], loc='best')
>>> plt.show()
"""
x = asarray(x)
X = fftpack.fft(x, axis=axis)
Nx = x.shape[axis]
if window is not None:
if callable(window):
W = window(fftpack.fftfreq(Nx))
elif isinstance(window, ndarray):
if window.shape != (Nx,):
raise ValueError('window must have the same length as data')
W = window
else:
W = fftpack.ifftshift(get_window(window, Nx))
newshape = [1] * x.ndim
newshape[axis] = len(W)
W.shape = newshape
X = X * W
W.shape = (Nx,)
sl = [slice(None)] * x.ndim
newshape = list(x.shape)
newshape[axis] = num
N = int(np.minimum(num, Nx))
Y = zeros(newshape, 'D')
sl[axis] = slice(0, (N + 1) // 2)
Y[tuple(sl)] = X[tuple(sl)]
sl[axis] = slice(-(N - 1) // 2, None)
Y[tuple(sl)] = X[tuple(sl)]
if N % 2 == 0: # special treatment if low number of points is even. So far we have set Y[-N/2]=X[-N/2]
if N < Nx: # if downsampling
sl[axis] = slice(N//2,N//2+1,None) # select the component at frequency N/2
Y[tuple(sl)] += X[tuple(sl)] # add the component of X at N/2
elif N < num: # if upsampling
sl[axis] = slice(num-N//2,num-N//2+1,None) # select the component at frequency -N/2
Y[tuple(sl)] /= 2 # halve the component at -N/2
temp = Y[tuple(sl)]
sl[axis] = slice(N//2,N//2+1,None) # select the component at +N/2
Y[tuple(sl)] = temp # set that equal to the component at -N/2
y = fftpack.ifft(Y, axis=axis) * (float(num) / float(Nx))
if x.dtype.char not in ['F', 'D']:
y = y.real
if t is None:
return y
else:
new_t = arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
return y, new_t
def resample_poly(x, up, down, axis=0, window=('kaiser', 5.0)):
"""
Resample `x` along the given axis using polyphase filtering.
The signal `x` is upsampled by the factor `up`, a zero-phase low-pass
FIR filter is applied, and then it is downsampled by the factor `down`.
The resulting sample rate is ``up / down`` times the original sample
rate. Values beyond the boundary of the signal are assumed to be zero
during the filtering step.
Parameters
----------
x : array_like
The data to be resampled.
up : int
The upsampling factor.
down : int
The downsampling factor.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : string, tuple, or array_like, optional
Desired window to use to design the low-pass filter, or the FIR filter
coefficients to employ. See below for details.
Returns
-------
resampled_x : array
The resampled array.
See Also
--------
decimate : Downsample the signal after applying an FIR or IIR filter.
resample : Resample up or down using the FFT method.
Notes
-----
This polyphase method will likely be faster than the Fourier method
in `scipy.signal.resample` when the number of samples is large and
prime, or when the number of samples is large and `up` and `down`
share a large greatest common denominator. The length of the FIR
filter used will depend on ``max(up, down) // gcd(up, down)``, and
the number of operations during polyphase filtering will depend on
the filter length and `down` (see `scipy.signal.upfirdn` for details).
The argument `window` specifies the FIR low-pass filter design.
If `window` is an array_like it is assumed to be the FIR filter
coefficients. Note that the FIR filter is applied after the upsampling
step, so it should be designed to operate on a signal at a sampling
frequency higher than the original by a factor of `up//gcd(up, down)`.
This function's output will be centered with respect to this array, so it
is best to pass a symmetric filter with an odd number of samples if, as
is usually the case, a zero-phase filter is desired.
For any other type of `window`, the functions `scipy.signal.get_window`
and `scipy.signal.firwin` are called to generate the appropriate filter
coefficients.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * down / float(up)``.
Examples
--------
Note that the end of the resampled data rises to meet the first
sample of the next cycle for the FFT method, and gets closer to zero
for the polyphase method:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f_fft = signal.resample(y, 100)
>>> f_poly = signal.resample_poly(y, 100, 20)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(xnew, f_fft, 'b.-', xnew, f_poly, 'r.-')
>>> plt.plot(x, y, 'ko-')
>>> plt.plot(10, y[0], 'bo', 10, 0., 'ro') # boundaries
>>> plt.legend(['resample', 'resamp_poly', 'data'], loc='best')
>>> plt.show()
"""
x = asarray(x)
if up != int(up):
raise ValueError("up must be an integer")
if down != int(down):
raise ValueError("down must be an integer")
up = int(up)
down = int(down)
if up < 1 or down < 1:
raise ValueError('up and down must be >= 1')
# Determine our up and down factors
# Use a rational approximation to save computation time on really long
# signals
g_ = gcd(up, down)
up //= g_
down //= g_
if up == down == 1:
return x.copy()
n_out = x.shape[axis] * up
n_out = n_out // down + bool(n_out % down)
if isinstance(window, (list, np.ndarray)):
window = array(window) # use array to force a copy (we modify it)
if window.ndim > 1:
raise ValueError('window must be 1-D')
half_len = (window.size - 1) // 2
h = window
else:
# Design a linear-phase low-pass FIR filter
max_rate = max(up, down)
f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist)
half_len = 10 * max_rate # reasonable cutoff for our sinc-like function
h = firwin(2 * half_len + 1, f_c, window=window)
h *= up
# Zero-pad our filter to put the output samples at the center
n_pre_pad = (down - half_len % down)
n_post_pad = 0
n_pre_remove = (half_len + n_pre_pad) // down
# We should rarely need to do this given our filter lengths...
while _output_len(len(h) + n_pre_pad + n_post_pad, x.shape[axis],
up, down) < n_out + n_pre_remove:
n_post_pad += 1
h = np.concatenate((np.zeros(n_pre_pad, dtype=h.dtype), h,
np.zeros(n_post_pad, dtype=h.dtype)))
n_pre_remove_end = n_pre_remove + n_out
# filter then remove excess
y = upfirdn(h, x, up, down, axis=axis)
keep = [slice(None), ]*x.ndim
keep[axis] = slice(n_pre_remove, n_pre_remove_end)
return y[tuple(keep)]
def vectorstrength(events, period):
'''
Determine the vector strength of the events corresponding to the given
period.
The vector strength is a measure of phase synchrony, how well the
timing of the events is synchronized to a single period of a periodic
signal.
If multiple periods are used, calculate the vector strength of each.
This is called the "resonating vector strength".
Parameters
----------
events : 1D array_like
An array of time points containing the timing of the events.
period : float or array_like
The period of the signal that the events should synchronize to.
The period is in the same units as `events`. It can also be an array
of periods, in which case the outputs are arrays of the same length.
Returns
-------
strength : float or 1D array
The strength of the synchronization. 1.0 is perfect synchronization
and 0.0 is no synchronization. If `period` is an array, this is also
an array with each element containing the vector strength at the
corresponding period.
phase : float or array
The phase that the events are most strongly synchronized to in radians.
If `period` is an array, this is also an array with each element
containing the phase for the corresponding period.
References
----------
van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector
strength: Auditory system, electric fish, and noise.
Chaos 21, 047508 (2011);
:doi:`10.1063/1.3670512`.
van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises:
biological and mathematical perspectives. Biol Cybern.
2013 Aug;107(4):385-96. :doi:`10.1007/s00422-013-0561-7`.
van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens
when we vary the "probing" frequency while keeping the spike times
fixed. Biol Cybern. 2013 Aug;107(4):491-94.
:doi:`10.1007/s00422-013-0560-8`.
'''
events = asarray(events)
period = asarray(period)
if events.ndim > 1:
raise ValueError('events cannot have dimensions more than 1')
if period.ndim > 1:
raise ValueError('period cannot have dimensions more than 1')
# we need to know later if period was originally a scalar
scalarperiod = not period.ndim
events = atleast_2d(events)
period = atleast_2d(period)
if (period <= 0).any():
raise ValueError('periods must be positive')
# this converts the times to vectors
vectors = exp(dot(2j*pi/period.T, events))
# the vector strength is just the magnitude of the mean of the vectors
# the vector phase is the angle of the mean of the vectors
vectormean = mean(vectors, axis=1)
strength = abs(vectormean)
phase = angle(vectormean)
# if the original period was a scalar, return scalars
if scalarperiod:
strength = strength[0]
phase = phase[0]
return strength, phase
def detrend(data, axis=-1, type='linear', bp=0):
"""
Remove linear trend along axis from data.
Parameters
----------
data : array_like
The input data.
axis : int, optional
The axis along which to detrend the data. By default this is the
last axis (-1).
type : {'linear', 'constant'}, optional
The type of detrending. If ``type == 'linear'`` (default),
the result of a linear least-squares fit to `data` is subtracted
from `data`.
If ``type == 'constant'``, only the mean of `data` is subtracted.
bp : array_like of ints, optional
A sequence of break points. If given, an individual linear fit is
performed for each part of `data` between two break points.
Break points are specified as indices into `data`.
Returns
-------
ret : ndarray
The detrended input data.
Examples
--------
>>> from scipy import signal
>>> randgen = np.random.RandomState(9)
>>> npoints = 1000
>>> noise = randgen.randn(npoints)
>>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
>>> (signal.detrend(x) - noise).max() < 0.01
True
"""
if type not in ['linear', 'l', 'constant', 'c']:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant', 'c']:
ret = data - expand_dims(mean(data, axis), axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = sort(unique(r_[0, bp, N]))
if np.any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0:
axis = axis + rnk
newdims = r_[axis, 0:axis, axis + 1:rnk]
newdata = reshape(transpose(data, tuple(newdims)),
(N, _prod(dshape) // N))
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m + 1] - bp[m]
A = ones((Npts, 2), dtype)
A[:, 0] = cast[dtype](arange(1, Npts + 1) * 1.0 / Npts)
sl = slice(bp[m], bp[m + 1])
coef, resids, rank, s = linalg.lstsq(A, newdata[sl])
newdata[sl] = newdata[sl] - dot(A, coef)
# Put data back in original shape.
tdshape = take(dshape, newdims, 0)
ret = reshape(newdata, tuple(tdshape))
vals = list(range(1, rnk))
olddims = vals[:axis] + [0] + vals[axis:]
ret = transpose(ret, tuple(olddims))
return ret
def lfilter_zi(b, a):
"""
Construct initial conditions for lfilter for step response steady-state.
Compute an initial state `zi` for the `lfilter` function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
b, a : array_like (1-D)
The IIR filter coefficients. See `lfilter` for more
information.
Returns
-------
zi : 1-D ndarray
The initial state for the filter.
See Also
--------
lfilter, lfiltic, filtfilt
Notes
-----
A linear filter with order m has a state space representation (A, B, C, D),
for which the output y of the filter can be expressed as::
z(n+1) = A*z(n) + B*x(n)
y(n) = C*z(n) + D*x(n)
where z(n) is a vector of length m, A has shape (m, m), B has shape
(m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is
a scalar). lfilter_zi solves::
zi = A*zi + B
In other words, it finds the initial condition for which the response
to an input of all ones is a constant.
Given the filter coefficients `a` and `b`, the state space matrices
for the transposed direct form II implementation of the linear filter,
which is the implementation used by scipy.signal.lfilter, are::
A = scipy.linalg.companion(a).T
B = b[1:] - a[1:]*b[0]
assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first
divided by a[0].
Examples
--------
The following code creates a lowpass Butterworth filter. Then it
applies that filter to an array whose values are all 1.0; the
output is also all 1.0, as expected for a lowpass filter. If the
`zi` argument of `lfilter` had not been given, the output would have
shown the transient signal.
>>> from numpy import array, ones
>>> from scipy.signal import lfilter, lfilter_zi, butter
>>> b, a = butter(5, 0.25)
>>> zi = lfilter_zi(b, a)
>>> y, zo = lfilter(b, a, ones(10), zi=zi)
>>> y
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
Another example:
>>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])
>>> y, zf = lfilter(b, a, x, zi=zi*x[0])
>>> y
array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,
0.44399389, 0.35505241])
Note that the `zi` argument to `lfilter` was computed using
`lfilter_zi` and scaled by `x[0]`. Then the output `y` has no
transient until the input drops from 0.5 to 0.0.
"""
# FIXME: Can this function be replaced with an appropriate
# use of lfiltic? For example, when b,a = butter(N,Wn),
# lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).
#
# We could use scipy.signal.normalize, but it uses warnings in
# cases where a ValueError is more appropriate, and it allows
# b to be 2D.
b = np.atleast_1d(b)
if b.ndim != 1:
raise ValueError("Numerator b must be 1-D.")
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Denominator a must be 1-D.")
while len(a) > 1 and a[0] == 0.0:
a = a[1:]
if a.size < 1:
raise ValueError("There must be at least one nonzero `a` coefficient.")
if a[0] != 1.0:
# Normalize the coefficients so a[0] == 1.
b = b / a[0]
a = a / a[0]
n = max(len(a), len(b))
# Pad a or b with zeros so they are the same length.
if len(a) < n:
a = np.r_[a, np.zeros(n - len(a))]
elif len(b) < n:
b = np.r_[b, np.zeros(n - len(b))]
IminusA = np.eye(n - 1) - linalg.companion(a).T
B = b[1:] - a[1:] * b[0]
# Solve zi = A*zi + B
zi = np.linalg.solve(IminusA, B)
# For future reference: we could also use the following
# explicit formulas to solve the linear system:
#
# zi = np.zeros(n - 1)
# zi[0] = B.sum() / IminusA[:,0].sum()
# asum = 1.0
# csum = 0.0
# for k in range(1,n-1):
# asum += a[k]
# csum += b[k] - a[k]*b[0]
# zi[k] = asum*zi[0] - csum
return zi
def sosfilt_zi(sos):
"""
Construct initial conditions for sosfilt for step response steady-state.
Compute an initial state `zi` for the `sosfilt` function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
zi : ndarray
Initial conditions suitable for use with ``sosfilt``, shape
``(n_sections, 2)``.
See Also
--------
sosfilt, zpk2sos
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
Filter a rectangular pulse that begins at time 0, with and without
the use of the `zi` argument of `scipy.signal.sosfilt`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sos = signal.butter(9, 0.125, output='sos')
>>> zi = signal.sosfilt_zi(sos)
>>> x = (np.arange(250) < 100).astype(int)
>>> f1 = signal.sosfilt(sos, x)
>>> f2, zo = signal.sosfilt(sos, x, zi=zi)
>>> plt.plot(x, 'k--', label='x')
>>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered')
>>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi')
>>> plt.legend(loc='best')
>>> plt.show()
"""
sos = np.asarray(sos)
if sos.ndim != 2 or sos.shape[1] != 6:
raise ValueError('sos must be shape (n_sections, 6)')
n_sections = sos.shape[0]
zi = np.empty((n_sections, 2))
scale = 1.0
for section in range(n_sections):
b = sos[section, :3]
a = sos[section, 3:]
zi[section] = scale * lfilter_zi(b, a)
# If H(z) = B(z)/A(z) is this section's transfer function, then
# b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady
# state value of this section's step response.
scale *= b.sum() / a.sum()
return zi
def _filtfilt_gust(b, a, x, axis=-1, irlen=None):
"""Forward-backward IIR filter that uses Gustafsson's method.
Apply the IIR filter defined by `(b,a)` to `x` twice, first forward
then backward, using Gustafsson's initial conditions [1]_.
Let ``y_fb`` be the result of filtering first forward and then backward,
and let ``y_bf`` be the result of filtering first backward then forward.
Gustafsson's method is to compute initial conditions for the forward
pass and the backward pass such that ``y_fb == y_bf``.
Parameters
----------
b : scalar or 1-D ndarray
Numerator coefficients of the filter.
a : scalar or 1-D ndarray
Denominator coefficients of the filter.
x : ndarray
Data to be filtered.
axis : int, optional
Axis of `x` to be filtered. Default is -1.
irlen : int or None, optional
The length of the nonnegligible part of the impulse response.
If `irlen` is None, or if the length of the signal is less than
``2 * irlen``, then no part of the impulse response is ignored.
Returns
-------
y : ndarray
The filtered data.
x0 : ndarray
Initial condition for the forward filter.
x1 : ndarray
Initial condition for the backward filter.
Notes
-----
Typically the return values `x0` and `x1` are not needed by the
caller. The intended use of these return values is in unit tests.
References
----------
.. [1] F. Gustaffson. Determining the initial states in forward-backward
filtering. Transactions on Signal Processing, 46(4):988-992, 1996.
"""
# In the comments, "Gustafsson's paper" and [1] refer to the
# paper referenced in the docstring.
b = np.atleast_1d(b)
a = np.atleast_1d(a)
order = max(len(b), len(a)) - 1
if order == 0:
# The filter is just scalar multiplication, with no state.
scale = (b[0] / a[0])**2
y = scale * x
return y, np.array([]), np.array([])
if axis != -1 or axis != x.ndim - 1:
# Move the axis containing the data to the end.
x = np.swapaxes(x, axis, x.ndim - 1)
# n is the number of samples in the data to be filtered.
n = x.shape[-1]
if irlen is None or n <= 2*irlen:
m = n
else:
m = irlen
# Create Obs, the observability matrix (called O in the paper).
# This matrix can be interpreted as the operator that propagates
# an arbitrary initial state to the output, assuming the input is
# zero.
# In Gustafsson's paper, the forward and backward filters are not
# necessarily the same, so he has both O_f and O_b. We use the same
# filter in both directions, so we only need O. The same comment
# applies to S below.
Obs = np.zeros((m, order))
zi = np.zeros(order)
zi[0] = 1
Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0]
for k in range(1, order):
Obs[k:, k] = Obs[:-k, 0]
# Obsr is O^R (Gustafsson's notation for row-reversed O)
Obsr = Obs[::-1]
# Create S. S is the matrix that applies the filter to the reversed
# propagated initial conditions. That is,
# out = S.dot(zi)
# is the same as
# tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs.
# out = lfilter(b, a, tmp[::-1]) # Reverse and filter.
# Equations (5) & (6) of [1]
S = lfilter(b, a, Obs[::-1], axis=0)
# Sr is S^R (row-reversed S)
Sr = S[::-1]
# M is [(S^R - O), (O^R - S)]
if m == n:
M = np.hstack((Sr - Obs, Obsr - S))
else:
# Matrix described in section IV of [1].
M = np.zeros((2*m, 2*order))
M[:m, :order] = Sr - Obs
M[m:, order:] = Obsr - S
# Naive forward-backward and backward-forward filters.
# These have large transients because the filters use zero initial
# conditions.
y_f = lfilter(b, a, x)
y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1]
y_b = lfilter(b, a, x[..., ::-1])[..., ::-1]
y_bf = lfilter(b, a, y_b)
delta_y_bf_fb = y_bf - y_fb
if m == n:
delta = delta_y_bf_fb
else:
start_m = delta_y_bf_fb[..., :m]
end_m = delta_y_bf_fb[..., -m:]
delta = np.concatenate((start_m, end_m), axis=-1)
# ic_opt holds the "optimal" initial conditions.
# The following code computes the result shown in the formula
# of the paper between equations (6) and (7).
if delta.ndim == 1:
ic_opt = linalg.lstsq(M, delta)[0]
else:
# Reshape delta so it can be used as an array of multiple
# right-hand-sides in linalg.lstsq.
delta2d = delta.reshape(-1, delta.shape[-1]).T
ic_opt0 = linalg.lstsq(M, delta2d)[0].T
ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],))
# Now compute the filtered signal using equation (7) of [1].
# First, form [S^R, O^R] and call it W.
if m == n:
W = np.hstack((Sr, Obsr))
else:
W = np.zeros((2*m, 2*order))
W[:m, :order] = Sr
W[m:, order:] = Obsr
# Equation (7) of [1] says
# Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt]
# `wic` is (almost) the product on the right.
# W has shape (m, 2*order), and ic_opt has shape (..., 2*order),
# so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T,
# so wic has shape (..., m).
wic = ic_opt.dot(W.T)
# `wic` is "almost" the product of W and the optimal ICs in equation
# (7)--if we're using a truncated impulse response (m < n), `wic`
# contains only the adjustments required for the ends of the signal.
# Here we form y_opt, taking this into account if necessary.
y_opt = y_fb
if m == n:
y_opt += wic
else:
y_opt[..., :m] += wic[..., :m]
y_opt[..., -m:] += wic[..., -m:]
x0 = ic_opt[..., :order]
x1 = ic_opt[..., -order:]
if axis != -1 or axis != x.ndim - 1:
# Restore the data axis to its original position.
x0 = np.swapaxes(x0, axis, x.ndim - 1)
x1 = np.swapaxes(x1, axis, x.ndim - 1)
y_opt = np.swapaxes(y_opt, axis, x.ndim - 1)
return y_opt, x0, x1
def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad',
irlen=None):
"""
Apply a digital filter forward and backward to a signal.
This function applies a linear digital filter twice, once forward and
once backwards. The combined filter has zero phase and a filter order
twice that of the original.
The function provides options for handling the edges of the signal.
Parameters
----------
b : (N,) array_like
The numerator coefficient vector of the filter.
a : (N,) array_like
The denominator coefficient vector of the filter. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is ``3 * max(len(a), len(b))``.
method : str, optional
Determines the method for handling the edges of the signal, either
"pad" or "gust". When `method` is "pad", the signal is padded; the
type of padding is determined by `padtype` and `padlen`, and `irlen`
is ignored. When `method` is "gust", Gustafsson's method is used,
and `padtype` and `padlen` are ignored.
irlen : int or None, optional
When `method` is "gust", `irlen` specifies the length of the
impulse response of the filter. If `irlen` is None, no part
of the impulse response is ignored. For a long signal, specifying
`irlen` can significantly improve the performance of the filter.
Returns
-------
y : ndarray
The filtered output with the same shape as `x`.
See Also
--------
sosfiltfilt, lfilter_zi, lfilter, lfiltic, savgol_filter, sosfilt
Notes
-----
When `method` is "pad", the function pads the data along the given axis
in one of three ways: odd, even or constant. The odd and even extensions
have the corresponding symmetry about the end point of the data. The
constant extension extends the data with the values at the end points. On
both the forward and backward passes, the initial condition of the
filter is found by using `lfilter_zi` and scaling it by the end point of
the extended data.
When `method` is "gust", Gustafsson's method [1]_ is used. Initial
conditions are chosen for the forward and backward passes so that the
forward-backward filter gives the same result as the backward-forward
filter.
The option to use Gustaffson's method was added in scipy version 0.16.0.
References
----------
.. [1] F. Gustaffson, "Determining the initial states in forward-backward
filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992,
1996.
Examples
--------
The examples will use several functions from `scipy.signal`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
First we create a one second signal that is the sum of two pure sine
waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
>>> t = np.linspace(0, 1.0, 2001)
>>> xlow = np.sin(2 * np.pi * 5 * t)
>>> xhigh = np.sin(2 * np.pi * 250 * t)
>>> x = xlow + xhigh
Now create a lowpass Butterworth filter with a cutoff of 0.125 times
the Nyquist frequency, or 125 Hz, and apply it to ``x`` with `filtfilt`.
The result should be approximately ``xlow``, with no phase shift.
>>> b, a = signal.butter(8, 0.125)
>>> y = signal.filtfilt(b, a, x, padlen=150)
>>> np.abs(y - xlow).max()
9.1086182074789912e-06
We get a fairly clean result for this artificial example because
the odd extension is exact, and with the moderately long padding,
the filter's transients have dissipated by the time the actual data
is reached. In general, transient effects at the edges are
unavoidable.
The following example demonstrates the option ``method="gust"``.
First, create a filter.
>>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied.
>>> np.random.seed(123456)
`sig` is a random input signal to be filtered.
>>> n = 60
>>> sig = np.random.randn(n)**3 + 3*np.random.randn(n).cumsum()
Apply `filtfilt` to `sig`, once using the Gustafsson method, and
once using padding, and plot the results for comparison.
>>> fgust = signal.filtfilt(b, a, sig, method="gust")
>>> fpad = signal.filtfilt(b, a, sig, padlen=50)
>>> plt.plot(sig, 'k-', label='input')
>>> plt.plot(fgust, 'b-', linewidth=4, label='gust')
>>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad')
>>> plt.legend(loc='best')
>>> plt.show()
The `irlen` argument can be used to improve the performance
of Gustafsson's method.
Estimate the impulse response length of the filter.
>>> z, p, k = signal.tf2zpk(b, a)
>>> eps = 1e-9
>>> r = np.max(np.abs(p))
>>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
>>> approx_impulse_len
137
Apply the filter to a longer signal, with and without the `irlen`
argument. The difference between `y1` and `y2` is small. For long
signals, using `irlen` gives a significant performance improvement.
>>> x = np.random.randn(5000)
>>> y1 = signal.filtfilt(b, a, x, method='gust')
>>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len)
>>> print(np.max(np.abs(y1 - y2)))
1.80056858312e-10
"""
b = np.atleast_1d(b)
a = np.atleast_1d(a)
x = np.asarray(x)
if method not in ["pad", "gust"]:
raise ValueError("method must be 'pad' or 'gust'.")
if method == "gust":
y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
return y
# method == "pad"
edge, ext = _validate_pad(padtype, padlen, x, axis,
ntaps=max(len(a), len(b)))
# Get the steady state of the filter's step response.
zi = lfilter_zi(b, a)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
zi_shape = [1] * x.ndim
zi_shape[axis] = zi.size
zi = np.reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
# Forward filter.
(y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)
# Backward filter.
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
# Reverse y.
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def _validate_pad(padtype, padlen, x, axis, ntaps):
"""Helper to validate padding for filtfilt"""
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype "
"must be 'even', 'odd', 'constant', or None.") %
padtype)
if padtype is None:
padlen = 0
if padlen is None:
# Original padding; preserved for backwards compatibility.
edge = ntaps * 3
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
return edge, ext
def sosfilt(sos, x, axis=-1, zi=None):
"""
Filter data along one dimension using cascaded second-order sections.
Filter a data sequence, `x`, using a digital IIR filter defined by
`sos`. This is implemented by performing `lfilter` for each
second-order section. See `lfilter` for details.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the cascaded filter delays. It is a (at
least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where
``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``
replaced by 2. If `zi` is None or is not given then initial rest
(i.e. all zeros) is assumed.
Note that these initial conditions are *not* the same as the initial
conditions given by `lfiltic` or `lfilter_zi`.
Returns
-------
y : ndarray
The output of the digital filter.
zf : ndarray, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
zpk2sos, sos2zpk, sosfilt_zi, sosfiltfilt, sosfreqz
Notes
-----
The filter function is implemented as a series of second-order filters
with direct-form II transposed structure. It is designed to minimize
numerical precision errors for high-order filters.
.. versionadded:: 0.16.0
Examples
--------
Plot a 13th-order filter's impulse response using both `lfilter` and
`sosfilt`, showing the instability that results from trying to do a
13th-order filter in a single stage (the numerical error pushes some poles
outside of the unit circle):
>>> import matplotlib.pyplot as plt
>>> from scipy import signal
>>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')
>>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos')
>>> x = signal.unit_impulse(700)
>>> y_tf = signal.lfilter(b, a, x)
>>> y_sos = signal.sosfilt(sos, x)
>>> plt.plot(y_tf, 'r', label='TF')
>>> plt.plot(y_sos, 'k', label='SOS')
>>> plt.legend(loc='best')
>>> plt.show()
"""
x = np.asarray(x)
sos, n_sections = _validate_sos(sos)
use_zi = zi is not None
if use_zi:
zi = np.asarray(zi)
x_zi_shape = list(x.shape)
x_zi_shape[axis] = 2
x_zi_shape = tuple([n_sections] + x_zi_shape)
if zi.shape != x_zi_shape:
raise ValueError('Invalid zi shape. With axis=%r, an input with '
'shape %r, and an sos array with %d sections, zi '
'must have shape %r, got %r.' %
(axis, x.shape, n_sections, x_zi_shape, zi.shape))
zf = zeros_like(zi)
for section in range(n_sections):
if use_zi:
x, zf[section] = lfilter(sos[section, :3], sos[section, 3:],
x, axis, zi=zi[section])
else:
x = lfilter(sos[section, :3], sos[section, 3:], x, axis)
out = (x, zf) if use_zi else x
return out
def sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None):
"""
A forward-backward digital filter using cascaded second-order sections.
See `filtfilt` for more complete information about this method.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is::
3 * (2 * len(sos) + 1 - min((sos[:, 2] == 0).sum(),
(sos[:, 5] == 0).sum()))
The extra subtraction at the end attempts to compensate for poles
and zeros at the origin (e.g. for odd-order filters) to yield
equivalent estimates of `padlen` to those of `filtfilt` for
second-order section filters built with `scipy.signal` functions.
Returns
-------
y : ndarray
The filtered output with the same shape as `x`.
See Also
--------
filtfilt, sosfilt, sosfilt_zi, sosfreqz
Notes
-----
.. versionadded:: 0.18.0
Examples
--------
>>> from scipy.signal import sosfiltfilt, butter
>>> import matplotlib.pyplot as plt
Create an interesting signal to filter.
>>> n = 201
>>> t = np.linspace(0, 1, n)
>>> np.random.seed(123)
>>> x = 1 + (t < 0.5) - 0.25*t**2 + 0.05*np.random.randn(n)
Create a lowpass Butterworth filter, and use it to filter `x`.
>>> sos = butter(4, 0.125, output='sos')
>>> y = sosfiltfilt(sos, x)
For comparison, apply an 8th order filter using `sosfilt`. The filter
is initialized using the mean of the first four values of `x`.
>>> from scipy.signal import sosfilt, sosfilt_zi
>>> sos8 = butter(8, 0.125, output='sos')
>>> zi = x[:4].mean() * sosfilt_zi(sos8)
>>> y2, zo = sosfilt(sos8, x, zi=zi)
Plot the results. Note that the phase of `y` matches the input, while
`y2` has a significant phase delay.
>>> plt.plot(t, x, alpha=0.5, label='x(t)')
>>> plt.plot(t, y, label='y(t)')
>>> plt.plot(t, y2, label='y2(t)')
>>> plt.legend(framealpha=1, shadow=True)
>>> plt.grid(alpha=0.25)
>>> plt.xlabel('t')
>>> plt.show()
"""
sos, n_sections = _validate_sos(sos)
# `method` is "pad"...
ntaps = 2 * n_sections + 1
ntaps -= min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum())
edge, ext = _validate_pad(padtype, padlen, x, axis,
ntaps=ntaps)
# These steps follow the same form as filtfilt with modifications
zi = sosfilt_zi(sos) # shape (n_sections, 2) --> (n_sections, ..., 2, ...)
zi_shape = [1] * x.ndim
zi_shape[axis] = 2
zi.shape = [n_sections] + zi_shape
x_0 = axis_slice(ext, stop=1, axis=axis)
(y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x_0)
y_0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y_0)
y = axis_reverse(y, axis=axis)
if edge > 0:
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def decimate(x, q, n=None, ftype='iir', axis=-1, zero_phase=True):
"""
Downsample the signal after applying an anti-aliasing filter.
By default, an order 8 Chebyshev type I filter is used. A 30 point FIR
filter with Hamming window is used if `ftype` is 'fir'.
Parameters
----------
x : array_like
The signal to be downsampled, as an N-dimensional array.
q : int
The downsampling factor. When using IIR downsampling, it is recommended
to call `decimate` multiple times for downsampling factors higher than
13.
n : int, optional
The order of the filter (1 less than the length for 'fir'). Defaults to
8 for 'iir' and 20 times the downsampling factor for 'fir'.
ftype : str {'iir', 'fir'} or ``dlti`` instance, optional
If 'iir' or 'fir', specifies the type of lowpass filter. If an instance
of an `dlti` object, uses that object to filter before downsampling.
axis : int, optional
The axis along which to decimate.
zero_phase : bool, optional
Prevent phase shift by filtering with `filtfilt` instead of `lfilter`
when using an IIR filter, and shifting the outputs back by the filter's
group delay when using an FIR filter. The default value of ``True`` is
recommended, since a phase shift is generally not desired.
.. versionadded:: 0.18.0
Returns
-------
y : ndarray
The down-sampled signal.
See Also
--------
resample : Resample up or down using the FFT method.
resample_poly : Resample using polyphase filtering and an FIR filter.
Notes
-----
The ``zero_phase`` keyword was added in 0.18.0.
The possibility to use instances of ``dlti`` as ``ftype`` was added in
0.18.0.
"""
x = asarray(x)
q = operator.index(q)
if n is not None:
n = operator.index(n)
if ftype == 'fir':
if n is None:
half_len = 10 * q # reasonable cutoff for our sinc-like function
n = 2 * half_len
b, a = firwin(n+1, 1. / q, window='hamming'), 1.
elif ftype == 'iir':
if n is None:
n = 8
system = dlti(*cheby1(n, 0.05, 0.8 / q))
b, a = system.num, system.den
elif isinstance(ftype, dlti):
system = ftype._as_tf() # Avoids copying if already in TF form
b, a = system.num, system.den
else:
raise ValueError('invalid ftype')
sl = [slice(None)] * x.ndim
a = np.asarray(a)
if a.size == 1: # FIR case
b = b / a
if zero_phase:
y = resample_poly(x, 1, q, axis=axis, window=b)
else:
# upfirdn is generally faster than lfilter by a factor equal to the
# downsampling factor, since it only calculates the needed outputs
n_out = x.shape[axis] // q + bool(x.shape[axis] % q)
y = upfirdn(b, x, up=1, down=q, axis=axis)
sl[axis] = slice(None, n_out, None)
else: # IIR case
if zero_phase:
y = filtfilt(b, a, x, axis=axis)
else:
y = lfilter(b, a, x, axis=axis)
sl[axis] = slice(None, None, q)
return y[tuple(sl)]
|
Eric89GXL/scipy
|
scipy/signal/signaltools.py
|
Python
|
bsd-3-clause
| 119,638
|
[
"Gaussian"
] |
93ccaa6c04ddb10e5d2abf6e9fb015fec0e733ea63e6720565e8630befecd20c
|
# -*- coding:utf-8 -*-
#
# Copyright 2012 NAMD-EMAP-FGV
#
# This file is part of PyPLN. You can get more information at: http://pypln.org/.
#
# PyPLN is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyPLN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyPLN. If not, see <http://www.gnu.org/licenses/>.
from cStringIO import StringIO
from bson import json_util
import mock
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from pypln.web.core.models import Document, mongodb_storage
class TestWithMongo(TestCase):
def _pre_setup(self, *args, **kwargs):
super(TestWithMongo, self)._pre_setup(*args, **kwargs)
mongodb_storage._connection.drop_database(mongodb_storage._db.name)
if hasattr(self, 'fixtures') and self.fixtures is not None and 'documents' in self.fixtures:
filename = os.path.join(settings.PROJECT_ROOT, 'core/fixtures/mongodb/analysis.json')
with open(filename, 'r') as mongo_fixture:
for obj in json_util.loads(mongo_fixture.read()):
mongodb_storage._connection[settings.MONGODB_DBNAME][settings.MONGODB_COLLECTION].insert(obj)
for doc in Document.objects.all():
mongodb_storage.save(os.path.basename(doc.blob.name),
StringIO(u"Test file with non-ascii char: á.".encode('utf-8')))
def _post_teardown(self, *args, **kwargs):
mongodb_storage._connection.drop_database(mongodb_storage._db.name)
super(TestWithMongo, self)._post_teardown(*args, **kwargs)
|
NAMD/pypln.web
|
pypln/web/core/tests/utils.py
|
Python
|
gpl-3.0
| 2,062
|
[
"NAMD"
] |
e49c9a5e100023131a0fd8f28c4f3d769bb062a5707dd0c46c9b32b1c0aeca82
|
from django.core.exceptions import ValidationError
from cyder.base.tests import ModelTestMixin, TestCase
from cyder.cydns.soa.models import SOA
from cyder.cydns.domain.models import Domain
class SOATests(TestCase, ModelTestMixin):
@property
def objs(self):
"""Create objects for test_create_delete."""
d1 = Domain.objects.create(name='marp')
d2 = Domain.objects.create(name='blook')
d3 = Domain.objects.create(name='bluh')
d4 = Domain.objects.create(name='wep')
d5 = Domain.objects.create(name='blah')
return (
SOA.objects.create(
primary="ns2.oregonstate.edu", contact="admin.oregonstate.edu",
retry=1234, refresh=1234123, description="marp",
root_domain=d1),
SOA.objects.create(
primary="dddo.com", contact="admf.asdf", retry=432152,
refresh=1235146134, description="blook", root_domain=d2),
SOA.objects.create(
primary="ns1.oregonstate.edu", contact="admin.oregonstate.edu",
retry=1234, refresh=1234123, description="bluh",
root_domain=d3),
SOA.objects.create(
primary="do.com", contact="admf.asdf", retry=432152,
refresh=1235146134, description="wep", root_domain=d4),
SOA.objects.create(
primary='ns1.derp.com', contact='admf.asdf', root_domain=d5),
)
def test_duplicate(self):
d = Domain.objects.create(name='flop')
SOA.objects.create(
primary='hoo.ha', contact='me', retry=100009,
refresh=2003, description='flippy', root_domain=d)
# Same root_domain.
self.assertRaises(
ValidationError, SOA.objects.create, primary='hee.ha',
contact='you', retry=40404, refresh=10038, description='floppy',
root_domain=d)
def test_add_invalid(self):
self.assertRaises(
ValidationError, SOA.objects.create, primary='daf..fff',
contact='foo.com')
self.assertRaises(
ValidationError, SOA.objects.create, primary='foo.com',
contact='dkfa..')
self.assertRaises(
ValidationError, SOA.objects.create, primary='adf',
contact='*@#$;')
def test_chain_soa_domain_add(self):
d0 = Domain.objects.create(name='com')
soa = SOA.objects.create(
primary='ns1.foo.com', contact='email.foo.com', root_domain=d0)
d1 = Domain.objects.create(name='foo.com')
self.assertEqual(soa, d1.soa)
d2 = Domain.objects.create(name='bar.foo.com')
self.assertEqual(soa, d2.soa)
d3 = Domain.objects.create(name='new.foo.com')
self.assertEqual(soa, d3.soa)
d4 = Domain.objects.create(name='far.bar.foo.com')
self.assertEqual(soa, d4.soa)
d5 = Domain.objects.create(name='tee.new.foo.com')
self.assertEqual(soa, d5.soa)
d5.delete()
d4.delete()
self.assertEqual(soa, d1.soa)
self.assertEqual(soa, d2.soa)
self.assertEqual(soa, d3.soa)
def test_nested_zones(self):
self.domain_names = (
'y', 'x.y', 'p.x.y', 'q.x.y',
'a.q.x.y', 'b.q.x.y', 'c.q.x.y')
for name in self.domain_names:
d = Domain.objects.create(name=name)
soa_q_x_y = SOA.objects.create(
root_domain=Domain.objects.get(name='q.x.y'),
primary='bleh1', contact='bleh1')
for name in ('y', 'x.y', 'p.x.y'):
self.assertEqual(Domain.objects.get(name=name).soa, None)
for name in ('q.x.y', 'a.q.x.y', 'b.q.x.y', 'c.q.x.y'):
self.assertEqual(Domain.objects.get(name=name).soa, soa_q_x_y)
soa_x_y = SOA.objects.create(
root_domain=Domain.objects.get(name='x.y'),
primary='bleh2', contact='bleh2')
soa_q_x_y = SOA.objects.get(root_domain__name='q.x.y')
self.assertEqual(Domain.objects.get(name='y').soa, None)
for name in ('x.y', 'p.x.y'):
self.assertEqual(Domain.objects.get(name=name).soa, soa_x_y)
for name in ('q.x.y', 'a.q.x.y', 'b.q.x.y', 'c.q.x.y'):
self.assertEqual(Domain.objects.get(name=name).soa, soa_q_x_y)
soa_q_x_y.delete()
soa_x_y = SOA.objects.get(root_domain__name='x.y')
self.assertEqual(Domain.objects.get(name='y').soa, None)
for name in ('x.y', 'p.x.y', 'q.x.y', 'a.q.x.y', 'b.q.x.y', 'c.q.x.y'):
self.assertEqual(Domain.objects.get(name=name).soa, soa_x_y)
|
akeym/cyder
|
cyder/cydns/soa/tests/test_models.py
|
Python
|
bsd-3-clause
| 4,623
|
[
"ADF"
] |
9c15154f16c32bd269edb0bf1b861c2b2f0bdcb545081ff87502c0bd929a6755
|
#
# Copyright 2010 Suinova Designs Ltd.
#
__author__ = "Ted Wen"
__version__ = "$Revision: 1 $"
# $Source$
import logging, time, cgi, hashlib, os, base64, re, hmac, urllib
from datetime import datetime, timedelta
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
from google.appengine.runtime import DeadlineExceededError
from google.appengine.api import taskqueue
from google.appengine.api.memcache import get as from_cache,set as to_cache,delete as decache
from templite import render_text, jsonize
import helper
# change to use Facebook OpenGraph
import facebook
FACEBOOK_APP_ID = "" #Suinova Comics (suicomics.appspot.com)
FACEBOOK_APP_SECRET = ""
FACEBOOK_API_KEY = ""
COOKIE_SECRET = '5121acea344842ee835824525f0ba807' #from uuid.uuid4()
SNSES = ['fb','gg','ms','web']
realpath = os.path.dirname(__file__)
class PermitException(Exception):
pass
class BaseRequest(webapp.RequestHandler):
""" Common POST or GET request handler.
This is platform-independent. Facebook requests use FacebookRequest class derived from this class.
Another derived class AdminRequest is used for admin operations.
User authentication is done in the derived class initialization phase.
This design allows for extending the services to other networks such as Bebo etc.
All concrete operations will be implemented in separate modules such as party.py and
the functions are dynamically called in a RPC fashion. For example,
/fb/patch/plant?p=PatchKey&s=SeedKey
will call plant(BaseRequest) function in module patch.py. This module will be loaded
first time used in the Python interpreter.
The user object is created in the initialization method after authentication, and
accessible as self.user in the module functions through the web parameter.
The web parameter is BaseRequest->webapp.RequestHandler in this case.
It is App Engine determined, but can be redefined if porting to other web servers.
The basic attributes used include BaseRequest.user, BaseRequest.sns (fb),
and operations include BaseRequest.renderPage, BaseRequest.addVar,
and webapp.RequestHandler.request.get[_all], webapp.RequestHandler.response.out.
"""
def post(self):
""" Facebook requests are POST requests.
REST-style requests are formatted as:
/[fb|gg|web|admin]/[module]/[function][/REST_terms/..]?[name=value&...]
"""
paths = self.parse_path(self.request.path)
# logging.debug(paths)
module_name = paths['module']
function_name = paths['function']
try:
mod = __import__(module_name) # the modules will be loaded only once whatsoever
except ImportError:
msg = 'Module "%s" not found' % module_name
logging.error(msg)
self.response.out.write(msg)
return
except DeadlineExceededError,e:
logging.error('Module "%s" caused DeadlineExceededError' % module_name)
self.response.clear()
self.response.out.write('%s (Please retry later)'%e)
return
#params = dict((k,self.request.get(k)) for k in self.request.arguments())
#logging.debug('post params=%s'%params)
try:
if hasattr(mod, function_name):
getattr(mod, function_name)(self, paths['args']) #mod.func(web,arg_list=None)
else:
getattr(mod, 'default')(self, [function_name] + paths['args'])
except DeadlineExceededError,e:
logging.error('Module "%s.%s" caused DeadlineExceededError' % (module_name, function_name))
self.response.clear()
self.response.out.write('%s (Please retry later)'%e)
return
except PermitException,e:
self.fail(str(e))
except Exception,e:
logging.exception(e)
self.fail(str(e))
#else:
#msg = 'Module "%s" has no function "%s"' % (modname,funcname)
#logging.error(msg)
#self.response.out.write(msg)
def get(self):
""" Admin uses GET requests.
/admin/[module]/[function]?[name=value&...]
"""
if self.request.path.startswith('/admin'):
self.post()
else:
self.response.out.write('GET is not allowed')
def parse_path(self,path):
""" Parse request.path to get sns, module, function, and arguments.
If no module is given, it uses 'home', if no function is given, it uses 'default'.
sns is also optional, if included, it's the first term, and module the next and function follows.
@return {'module':mod,'function':func,'sns':sns,'args':args}
"""
mod = func = sns = None
args = []
for s in path.split('/'):
if s == '': continue
if mod is None:
if s not in SNSES: #['fb','gg','ms','web']
mod = s
else:
sns = s
elif func is None:
func = s
else:
args.append(s)
if mod is None: mod = 'home'
if func is None: func = 'default'
return {'module':mod,'function':func,'sns':sns,'args':args}
def get_param(self,pname,default=None):
""" Returns the value for a request parameter.
@param pname: parameter key
@param default: return it if pname is empty, or if None and key's value is '', raise exception, else returns default
@raise Exception if default is None and pname not given
"""
v = self.request.get(pname)
if v == '':
if default is None:
raise Exception('Param %s not given'%pname)
return default
return v
def get_params(self,pname):
""" Returns a list of values for a parameter or a list of parameters. """
if isinstance(pname,list):
return [self.request.get(k) for k in pname]
return self.request.get_all(pname)
def get_var(self, vname):
""" Returns the value of a variable in self.tempvars, or None if not found """
if vname in self.tempvars:
return self.tempvars[vname]
else:
return None
def add_var(self, kod, val=None):
""" Add a key:value pair or a dict of key:value pairs.
@param kod: key str or dict
@param val: value str or None for k=dict
"""
# logging.info('add_var: k,v=%s,%s'%(kod,val))
if not hasattr(self,'tempvars') or self.tempvars is None:
self.tempvars = {}
if isinstance(kod, dict):
# logging.info('kod is dict')
for k,v in kod.items():
self.tempvars[k] = v
elif val is not None:
# logging.info('kod,val')
self.tempvars[kod] = val
#logging.info('add_var: k=%s,v=%s'%(kod,self.tempvars[kod]))
def succeed(self, result=None):
""" Return successful JSON data to the client.
"""
if result is None:
self.response.out.write('{"RE":"OK"}')
elif isinstance(result, dict):
result['RE'] = 'OK'
self.response.out.write(jsonize(result))
elif isinstance(result, list):
self.response.out.write(jsonize(result))
elif isinstance(result, basestring):
self.response.out.write(result)
def fail(self, msg=None, format='JSON', page='error.html'):
""" Return a failure message to the client. If use error.html, replace {{ error }} with msg.
@param msg: message text to return or None, can also be a dict with 'error' in it.
@param format: JSON or HTML to return
@param page: error.html by default for format=='HTML'
@return: {"error":"msg"} #{'RE':'Fail',msg:'...'}
"""
if format == 'JSON':
if msg:
if isinstance(msg,basestring):
err = '{"error":"%s"}'%msg
else:
err = jsonize(msg)
else:
err = '{"error":"Unknown error"}'
self.response.out.write(err)
elif format == 'HTML':
if page == '':
logging.error('main.fail(): page not given')
return
if msg: self.add_var('error',msg)
self.render_page(page)
def redirect_with_msg(self, msg, path=''):
""" Redirect to page path with a msg before relocating the page. """
if path.startswith('/'): path = path[1:]
self.response.out.write('<html><head><meta http-equiv="refresh" content="3;url=/%s"></head><body><h3>%s</h3>Jump in 3 seconds.</body></html>'%(path,msg))
def render_page(self, page, render=True, vars=None):
""" Return an html page to the client.
@param page: html page file name without path or .html
@param render: if False, return file directly, if True, substitute variables with their values in the page by the template engine Templite.
@param vars: additional variable dict in addition to self.tempvars.
"""
if not page:
logging.error('main.return_page error: page is None')
raise Exception('Invalid parameter page')
if page.find('.') < 0:
page = '%s.html' % (page)
path = os.path.join(realpath, page)
try:
import codecs
fi = codecs.open(path,'r','utf-8')
text = fi.read()
fi.close()
except:
logging.error('render_page(%s) file not found'%path)
self.response.out.write('File open error')
return
if render:
if vars:
if isinstance(vars,dict):
self.tempvars.update(vars)
else:
logging.error('render_page error: vars is not dict')
#logging.info('self.tempvars: %s' % self.tempvars)
render_text(text,self.tempvars,self.response.out)
else:
self.response.out.write(text)
@property
def logged_in(self):
return hasattr(self,'user') and self.user
def require_login(self):
""" Returns nothing if user logged in, else raises an exception. """
if not self.logged_in:
raise PermitException('Login required')
def require_author(self):
""" Returns nothing if user logged in as an Author, else raises an exception. """
self.require_login()
if not self.user.isAuthor():
raise PermitException('Author only')
def require_admin(self):
""" Returns nothing if user is admin, else raises an exception. """
self.require_login()
#self.require_author()
from google.appengine.api import users
if not users.is_current_user_admin():
raise Exception('Admin only') #will log this behavior to see who did this
REDIRECT_HTML='''<html><head><meta http-equiv="refresh" content="0;url=/">
</head><body onload="javascript:location.replace('%s')"></body></html>
'''
FACEBOOK_LOGIN0="""<html><head></head><body><fb:login-button perms="email,status_update,publish_stream"></fb:login-button><div id="fb-root"></div>
<script>
window.fbAsyncInit = function() {
FB.init({appId: '%s', status: true, cookie: true, xfbml: true});
FB.Event.subscribe('auth.login',function(response){
var browser = navigator.userAgent.toLowerCase();
if (browser.indexOf('safari') > 0 && browser.indexOf('chrome') < 0){
//alert('safari');
var d=document.getElementsByTagName('body')[0];
var dv=document.createElement('div');
dv.innerHTML='<form id="sessionform" enctype="application/x-www-form-urlencoded" action="http://suicomics.appspot.com/fb/" action="post"></form>';
d.appendChild(dv);
var f=document.getElementById('sessionform');
f.submit();
}else
window.top.location = 'http://apps.facebook.com/suicomics';
});
};
(function(){
var e = document.createElement('script');
e.type = 'text/javascript';
e.src = document.location.protocol + '//connect.facebook.net/en_US/all.js';
e.async = true;
document.getElementById('fb-root').appendChild(e);
}());
</script></body></html>"""%FACEBOOK_APP_ID
FACEBOOK_LOGIN='''<html><head>
<script>
window.top.location = "https://graph.facebook.com/oauth/authorize?client_id=179008415461930"
+"&redirect_uri=http://apps.facebook.com/suicomics/";
</script>
</head></html>
'''
def padtrans(b64s):
""" Add = padding to multiple of 4.
And replace - with +, _ with /
"""
n = 4 - len(b64s) % 4 & 3
return b64s.replace('-','+').replace('_','/') + '='*n
PTN = re.compile(r'"([^"]+)"\s?:\s?"?([^"}]+)"?')
def parse_signed_request(signed_request, secret):
""" Parse Facebook OAuth 2.0 signed_request.user_id,oauth_token,expires,profile_id(on profile_tab)
"""
encoded_sig, payload = signed_request.split('.')
sig = base64.b64decode(padtrans(encoded_sig))
datas = base64.b64decode(padtrans(payload))
data = dict((k,v) for k,v in PTN.findall(datas))
if data['algorithm'].upper() != 'HMAC-SHA256':
logging.error('parse_signed_request error, hmac-sha256 expected')
return None
expected_sig = hmac.new(secret, msg=payload, digestmod=hashlib.sha256).digest()
if expected_sig != sig:
logging.error('parse_signed_request error: bad signature')
return None
return data
class FacebookRemove(webapp.RequestHandler):
def post(self):
logging.info('FacebookRemove.post: received a Deauthorize Call')
sr = self.request.get('signed_request')
if sr:
data = parse_signed_request(sr, FACEBOOK_APP_SECRET)
logging.debug('FacebookRemove.post: data=%s'%data)
if 'user_id' in data:
uid = data['user_id']
ukey = 'fb_%s'%uid
logging.debug('De-authorizing FB user %s'%uid)
helper.unregister_user(ukey)
else:
logging.warning('No user_id in signed_request for /fb/remove callback')
else:
logging.error('FB /fb/remove without signed_request')
class FacebookRequest(BaseRequest):
""" Web Request Handler for Facebook requests with URL as /fb/*.
User is authenticated through Facebook connection, and stored in the database
if first time. Once the user login, the self.user object is created.
self.sns is set to 'fb' and can be used as path '/fb/'.
"""
def initialize(self, request, response):
""" Check cookies, load user session before handling requests. Necessary here? can be merged into POST or GET.
"""
webapp.RequestHandler.initialize(self, request, response)
if self.request.get('use') == 'gift':
self.authorize()
return
sr = self.request.get('signed_request')
if sr:
data = parse_signed_request(sr, FACEBOOK_APP_SECRET)
logging.debug('FacebookRequest.initialize: signed_request = %s'%data)
if data:
if 'oauth_token' not in data:
logging.debug('FacebookRequest.initialize: signed_request has no oauth_token, redirect to oauth/authorize')
self.authorize()
return
else:
logging.debug('FacebookRequest.initialize: ready to login')
self.login(data['user_id'],data['oauth_token'])
else:
logging.debug('FacebookRequest.initialize: Bad signed_request, return home page without login')
else:
fbcookie = facebook.get_user_from_cookie(self.request.cookies, FACEBOOK_APP_ID, FACEBOOK_APP_SECRET)
if fbcookie:
logging.debug('FacebookRequest.initialize: fbcookie = %s'%fbcookie)
self.login(fbcookie['uid'],fbcookie['access_token'])
else:
code = self.request.get('code')
if code:
logging.debug('FacebookRequest.initialize: got code from oauth-authorize exchange for access_token')
aargs={'client_id':FACEBOOK_APP_ID,'client_secret':FACEBOOK_APP_SECRET,'code':code,'redirect_uri':self.request.path_url}
response = cgi.parse_qs(urllib.urlopen("https://graph.facebook.com/oauth/access_token?"+urllib.urlencode(aargs)).read())
logging.debug('FacebookRequest.initialize: response from oauth/access_token: %s'%response)
if response and 'access_token' in response:
access_token = response["access_token"][-1]
graph = facebook.GraphAPI(access_token)
profile = graph.get_object('me')
logging.debug('FacebookRequest.initialize: got graph profile of me:%s'%profile)
self.login(profile['id'],access_token)
else:
logging.debug('FacebookRequest.initialize: Bad result from oauth/access_token, return home page without login')
else:
logging.debug('FacebookRequest.initialize: no code, try oauth/authorize')
self.authorize()
def authorize(self):
use = self.request.get('use')
scope = 'email,status_update,publish_stream'
ex = ''
if use == 'gift':
logging.debug('FacebookRequest.authorize: use=gift')
scope += ',friends_birthday,offline_access'
ex = 'gift/permit'
self.get = self.post = (lambda *args: None)
#args={'client_id':FACEBOOK_APP_ID,'redirect_uri':self.request.path_url,'scope':'email,status_update,publish_stream'}
#self.redirect('https://graph.facebook.com/oauth/authorize?'+urllib.urlencode(args))
args={'client_id':FACEBOOK_APP_ID,'redirect_uri':'http://apps.facebook.com/suicomics/%s'%ex,'scope':scope}
fbs='''<script>top.location="https://graph.facebook.com/oauth/authorize?%s";</script>'''%urllib.urlencode(args)
self.response.out.write(fbs)
def login(self,uid,access_token):
""" Login routine.
From FacebookRequest: login(facebook_uid
"""
self.sns = 'fb'
ukey = '%s_%s' % (self.sns, uid)
u = helper.from_cache(ukey)
if not u:
u = helper.get_user_by_key(ukey,False) #memcache=False
if not u:
graph = facebook.GraphAPI(access_token)
profile = graph.get_object('me')
u = helper.create_user(ukey,profile['name'],profile.get('email',None),False) #save=False
if not u:
logging.error('FacebookRequest.login: helper.create_user failed')
self.redirect_with_msg('Server in maintenance, please try later, thank you.')
self.get = self.post = (lambda *args: None)
return
u.access_token = access_token
u.save()
logging.debug('FacebookRequest.login: New User %s saved'%ukey)
else:
now = datetime.utcnow()
u._cache_time = now
if helper.to_cache(ukey, u, helper.PLAYER_CACHE_SECS): #2 hours, if memcache fails, do not task/dau or send_email
# if u.lastime.day != now.day or u.lastime.month != now.month:
# taskqueue.add(url='/task/dau',params={'usr':ukey,'act':'login'})
taskqueue.add(url='/task/dau',params={'usr':ukey,'act':'login'})
if ukey not in ['fb_669391906','fb_1464710918','fb_1842536962','fb_1831016858']:
helper.send_email('Login SuiComics: %s(%s) @ %s'%(u.name,ukey,now), 'OK')
else:
helper.send_email('Login SuiComics: %s(%s) @ %s'%(u.name,ukey,now), 'OK - to remove this')
else:
#got user from memcache
if u.access_token != access_token:
u.access_token = access_token
u.save()
logging.debug('FacebookRequest.login: access_token updated while %s still in memcache'%ukey)
elif hasattr(u,'_cache_time'):
if (datetime.utcnow()-u._cache_time).seconds >= 3600:
u._cache_time = datetime.utcnow()
helper.to_cache(ukey, u, helper.PLAYER_CACHE_SECS)
if self.request.headers.get('User-Agent','').find('MSIE')>=0:
#logging.debug('addHeader P3P for MSIE')
#self.response.headers.add_header('P3P','CP="IDC DSP COR ADM DEVi TAIi PSA PSD IVAi IVDi CONi HIS OUR IND CNT"')
self.response.headers.add_header('P3P','CP="SuiComics"')
args = get_session_from_cookie(self.request.cookies)
if not args or args['uid'] != ukey:
put_cookie(self.response.headers, ukey, u.token, self.sns)
self.user = u
self.tempvars = {'user':u,'sns':'fb','login':True,'uname':u.name,'onFacebook':True}
if self.request.get('ref')=='bookmarks':
c = self.request.get('count')
if c != '0':
helper.clear_fb_count(uid)
def get(self):
self.post()
def jsondict(self,json):
""" Make a dict out of simple dict in JSON, only string and int allowed, string does not contain comma or }. """
ptn = re.compile(r'"(\w+)":\s*([^,}]+)')
data = {}
for k,v in ptn.items():
if v.startswith('"'):
data[k] = v.strip('"')
else:
data[k] = int(v)
return data
def handle_order(self):
""" This is the routine to handle user payment transactions from facebook.
"""
signed_request = parse_signed_request(self.request.get('signed_request'), FACEBOOK_APP_SECRET)
if not signed_request:
logging.warning('fb/order: invalid signed_request')
web.fail('Unauthorized request')
return
logging.debug('/fb/order: signed_request = %s'%signed_request)
payload = signed_request['credits']
order_id = payload['order_id']
method = self.request.get('method')
data = {'content':[]}
if method == 'payments_get_items':
order_info = payload['order_info']
logging.debug('/fb/order: order_info=%s'%order_info)
item = self.jsondict(order_info)
logging.debug('self.jsondict: parsed item=%s'%item)
item['price'] = int(item['price'])
# if not item['product_url'].startswith('http://'):
# item['product_url'] = 'http://%s' % item['product_url']
# if not item['image_url'].startswith('http://'):
# item['image_url'] = 'http://%s' % item['image_url']
# if 'test_mode' in payload:
# item['title'] = '[Test Mode] %s' % item['title']
# item['description'] = '[Test Mode] %s' % item['description']
data['content'].append(item)
return_value = {"content":[{"title":"",
"description":"",
"item_id":"",
"image_url":"",
"product_url":"",
"price":10,
"data":""}],
"method":"payments_get_items"}
elif method == 'payments_status_update':
#get: order_id (int),status:(placed,reserved,settled,canceled),order_details:
status = payload['status']
ret = {'order_id':order_id}
if status == 'placed':
ret['status'] = 'settled'
elif status == 'settled':
#save user purchase transaction here
logging.debug('/fb/order: settled status received, about to save transaction')
order_info = payload['order_info']
item = self.jsondict(order_info)
logging.debug('/fb/order: status=%s,item=%s'%(status,item))
import pay
data = {'quantity':1,'price':item['price'],'item_id':item['item_id'],'buyer':item['data'],'currency':'FC','method':'FC','order_number':order_id}
logging.debug('/fb/order: save_exchange, data=%s'%data)
pay.save_exchange(buyer, datetime.utcnow(), 0, data)
ret['status'] = 'settled'
elif status == 'refunded':
logging.warning('fb sent refunded')
data['content'].append(ret)
data['method'] = method
logging.debug('/fb/order: returning back to fb:%s'%data)
web.succeed(data)
class GoogleRequest(BaseRequest):
""" Google Request handler for public Web requests /gg/*.
The idea is that, by default the root request goes to WebRequest handler, and once user chooses login via Google,
it redirects to /gg/home location and processed by this class. After user logged in, set the cookie and let the
browser refreshes the location to / again and the WebRequest will load the logged in user according to the cookie.
If user logged in via Google account, it will return a cookie and redirect to / home.
If user not logged in, it will redirect to Google login page.
This requires user has a Google account.
TODO: how to use Google contact etc for social event.
"""
def initialize(self, request, response):
""" Authenticate through Google account.
"""
webapp.RequestHandler.initialize(self, request, response)
from google.appengine.api import users
user = users.get_current_user()
if not user:
logging.debug('GoogleRequest.initialize: not login, redirect to /gg')
self.redirect(users.create_login_url("/gg/home"))
self.get = (lambda *args: None)
self.post = (lambda *args: None)
else:
#user logged in google account,check our cookie
sns = 'gg' #Google: how to make use of GMail contact, chat etc? via OAuth
uid = '%s_%s' % (sns, user.user_id())
logging.debug('GoogleRequest.initialize: %s visit via Google, try login'%uid)
su = helper.from_cache(uid)
if not su:
su = helper.get_user_by_key(uid,False) #no memcache
if su is None:
logging.debug('GoogleRequest.initialize: New user, try create')
em = user.email()
name = em[:em.find('@')]
su = helper.create_user(uid, name, em) #auto cached if successful
if su is None:
logging.error('GoogleRequest.initialize: create_user(%s,%s,%s) failed'%(uid,name,em))
self.response.out.write('Server in maintenance, please come back later. Thank you.')
self.get = self.post = (lambda *args: None) #stop calling request handler
return
else:
logging.debug('GoogleRequest.initialize: new session today, try cache')
su._cache_time = datetime.utcnow()
if helper.to_cache(uid, su, helper.PLAYER_CACHE_SECS):
logging.debug('GoogleRequest.initialize: Memcached, task dau and send email to admin')
taskqueue.add(url='/task/dau',params={'usr':uid,'act':'login'})
#if uid not in ['gg_109722387073140662444','gg_108772542023352813713']:
helper.send_email('Login SuiComics: %s(%s) @ %s'%(su.name,uid,datetime.utcnow()), 'OK')
else:
#in memcache
logging.debug('GoogleRequest.initialize: in memcache, revisit')
if hasattr(su,'_cache_time'):
if (datetime.utcnow()-su._cache_time).seconds >= 3600:
su._cache_time = datetime.utcnow()
helper.to_cache(uid, su, helper.PLAYER_CACHE_SECS)
self.tempvars = {'user':su,'sns':'gg','login':True,'uname':su.name,'onFacebook':False}
args = get_session_from_cookie(self.request.cookies)
if not args:
put_cookie(self.response.headers, uid, su.token, sns) #a generated random token
else:
self.tempvars.update(args) #['sns','uid','token']
self.sns = sns
self.user = su
def get(self):
""" Get for direct access. """
self.post()
class AdminRequest(GoogleRequest):
""" Web Request Handler for Admin requests with URL as /admin/*.
"""
def initialize(self, request, response):
""" authenticate Admin through Google User.
"""
GoogleRequest.initialize(self, request, response)
from google.appengine.api import users
user = users.get_current_user()
if not users.is_current_user_admin():
url = users.create_logout_url('/')
greeting = ("Admin only. (<a href=\"%s\">Sign out</a> and retry)" % url)
self.response.out.write(greeting)
self.get = (lambda *args: None)
self.post = (lambda *args: None)
else:
self.add_var('admin',True)
self.admin = True
MIME = {'png':'image/png','jpg':'image/jpeg','gif':'image/gif','swf':'application/x-shockwave-flash','mid':'audio/mid','mp3':'audio/mpeg','jpeg':'image/jpeg'}
class MediaReader(webapp.RequestHandler):
""" Fetch images etc from MediaStore by key_name. """
def get(self):
""" get an image etc. by Key string.
/mm/filename.jpg?v=1.0 a filename.
"""
fname = self.request.path[self.request.path.rfind('/')+1:]
if fname.find('.') > 0:
fname = fname[:fname.find('.')]
if fname.find('?') > 0:
fname = uc[:fname.find('?')]
ie = helper.load_media(fname)
if ie:
self.response.headers['Content-Type'] = MIME[ie.format]
self.response.out.write(ie.stream)
elif fname.startswith('u_'):
#return default user logo
fname = 'avatar_1'
ie = helper.load_media(fname)
self.response.headers['Content-Type'] = MIME[ie.format]
self.response.out.write(ie.stream)
#self.redirect('/img/avatar.png')
else:
self.error(400)
def put_cookie(headers,uid,token,sns):
""" Create a session cookie after login onto the client browser so that the server will know who is the user during this session.
Set-Cookie: SC_Session="uid=FB1234&token=xxxx.3333_22xx&sns=fb&expires=2134353.3443&sig=abdf3434"; expires=Fri, 01 Jan 2010 11:48:41 GMT; path=/;
Note that sig = md5(expires=dddddtoken=xxxxxuid=xxxxxCOOKIE_SECRET), is used to verify this cookie is from this server without tampering.
@param headers : self.response.headers
@param uid : like FB1234353455
@param token : server generated token string, can be the access_token from FB
"""
xt = datetime.utcnow() + timedelta(hours=2)
session_vars = {'uid':uid,'token':token,'sns':sns,'expires':int(time.time())+7200}
s = ''.join('%s=%s'%(k,session_vars[k]) for k in sorted(session_vars.keys()))
sig = hashlib.md5(s + COOKIE_SECRET).hexdigest()
session_vars['sig'] = sig
sg_session = '&'.join('%s=%s'%(k,v) for k,v in session_vars.items())
cookies = 'SC_Session="%s"; expires=%s; path=/;' % (sg_session, xt.strftime('%a, %d %b %Y %H:%M:%S GMT'))
logging.info('put_cookie: %s'%cookies)
headers.add_header('Set-Cookie', cookies)
def delete_cookie(headers,cookies):
""" Call this with delete_cookie(self.response.headers,self.request.cookies)
"""
cookie = cookies.get('SC_Session','')
if cookie:
xt = datetime.utcnow() - timedelta(hours=2)
cookie = 'SC_Session="uid=&token=&sns=&expires=%d"; expires=%s; path=/;'%(int(time.time())-7200,xt.strftime('%a, %d %b %Y %H:%M:%S GMT'))
headers.add_header('Set-Cookie',cookie)
def get_session_from_cookie(cookies):
""" Check whether there is session cookie issued by this module after user login through a SNS such as Facebook.
Cookie: SC_Session="uid=FB1234&token=xxxx.3333_22xx&expires=2134353.3443&sig=abdf3434";
"""
cookie = cookies.get('SC_Session','')
if not cookie:
# logging.debug('get_session_from_cookie, no cookie SC_Session')
return None
return parse_session(cookie)
def get_session_from_request(request):
""" Check whether session data are in request parameters. This method is used by Flash clients.
The cookie text is the same, but in SC_Session="uid=xxx&token=xxxx..."
"""
session = request.get('SC_Session')
if session:
return parse_session(session)
return None
def parse_session(cookie):
args = dict((k,v[0]) for k,v in cgi.parse_qs(cookie.strip('"')).items())
sortit = ''.join('%s=%s'%(k,args[k]) for k in sorted(args.keys()) if k != 'sig')
sig = hashlib.md5(sortit + COOKIE_SECRET).hexdigest()
expires = int(args['expires'])
#logging.debug('get_session_from_cookie, args=%s'%args)
#if sig == args['sig'] and (expires == 0 or time.time() < expires):
# return args
#return None
if sig != args['sig']:
logging.warning('get_session_from_cookie, sig(%s) != args.sig args=%s'%(sig,args))
elif expires > 0 and time.time() > expires:
logging.warning('get_session_from_cookie, expires < time(%d) args=%s'%(time.time(),args))
else:
return args
return None
class LogoutRequest(webapp.RequestHandler):
""" Clear cookie if any, and redirect to home / """
def post(self):
delete_cookie(self.response.headers, self.request.cookies)
#self.redirect('/')
self.response.out.write('/')
SNS_PREPS = {'fb':'<div id="fb-root"></div>'}
class WebRequest(BaseRequest):
""" New design: Common request handler either before or after login.
The SNS-specific request handlers are for authentication only where cookies are set for a login session.
"""
def initialize(self, request, response):
""" Check cookies, load user session before handling requests. Necessary here? can be merged into POST or GET.
"""
webapp.RequestHandler.initialize(self, request, response)
self.tempvars = {}
args = get_session_from_cookie(self.request.cookies)
# logging.info('WebRequest.initialize args=%s'%args)
if not args:
args = get_session_from_request(self.request)
if not args:
fbcookie = facebook.get_user_from_cookie(self.request.cookies, FACEBOOK_APP_ID, FACEBOOK_APP_SECRET)
if fbcookie:
self.get = self.post = (lambda *a: None)
self.redirect('/fb/')
return
self.sns = 'web'
self.user = None
#self.tempvars = {'sbs':'web'}
self.tempvars['login'] = False
else:
self.tempvars['login'] = True
self.sns = args['sns']
if self.sns == 'fb':
self.tempvars['onFacebook'] = True
fbcookie = facebook.get_user_from_cookie(self.request.cookies, FACEBOOK_APP_ID, FACEBOOK_APP_SECRET)
if fbcookie and fbcookie['uid'] != args['uid'][3:]:
self.get = self.post = (lambda *a: None)
self.redirect('/fb/')
return
self.tempvars.update(args) #['sns','uid','token']
#self.tempvars['SNS_PREP'] = SNS_PREPS.get(args['sns'],'')
try:
self.user = helper.get_user_by_key(args['uid'])
self.tempvars['user'] = self.user #???
self.tempvars['uname'] = self.user.name
except Exception,e:
self.response.out.write('Error:%s'%e)
self.get = self.post = (lambda *args: None)
def get(self):
""" With public get, only the first page or an image is shown."""
BaseRequest.post(self)
def valid_signature(handler,secret=FACEBOOK_APP_SECRET):
""" Validate all parameters from self.request against the signature using md5 and raises an exception if invalid.
All parameters in handler.request.arguments() will be considered excluding 'sig'. These arguments are sorted
in alphabetical order and hashed using md5 and then compare with sig value.
@param handler : webapp.RequestHandler object
@exception : Invalid signature exception
"""
sig = handler.request.get('sig')
if sig != '':
s = ''.join(['%s=%s'%(k,handler.request.get(k)) for k in sorted(handler.request.arguments()) if k != 'sig'])
csig = hashlib.md5(s + secret).hexdigest()
if sig != csig:
logging.warning('Wrong sig: s=%s,\nsecret=%s,\nsig=%s,\ncsig=%s'%(s,secret,sig,csig))
return False
else:
logging.warning('main.valid_signature(): sig = "", signature not validated')
return True
class TaskQueueRequest(webapp.RequestHandler):
""" Sysadmin to update today's statistics.
And log important activities for admin purpose.
/task/dau?
/task/log?user=key_name&game=key_name&action=string&donetime=datetime
/task/genre/add_book?genre=Fantasy
"""
def post(self):
paths = self.request.path
if paths == '/task/dau':
helper.dau(self.request)
elif paths == '/task/log':
helper.log(self.request)
elif paths == '/task/mail':
helper.email(self.request)
elif paths == '/task/genre/add_book':
helper.add_book_to_genre(self.request)
elif paths.startswith('/task/feed/add_'):
helper.post_feed(paths[11:],self.request)
elif paths == '/task/uremind':
helper.remind_users(self)
elif paths == '/task/birthday':
#logging.debug('TaskQueueRequest.post: %s'%paths)
helper.send_birthday_gifts(self)
elif paths == '/task/bdaygift':
helper.send_gift(self)
else:
logging.error('Unknown TaskQueue request path:%s'%paths)
def get(self):
self.post()
class MySpaceRequest(webapp.RequestHandler):
"""
OAuth consumer key: http://www.myspace.com/552003400
OAuth secret: ee27668b30f241c7a6630fdd0715d911b67ea77cd9fa42479b69df7cf2ca3a81
"""
def post(self):
"""
http://suicomics.appspot.com/ms/install when MySpace user installs this app
http://suicomics.appspot.com/ms/remove when MySpace user uninstalls this app
e.g.:
http://www.example.com/install?oauth_consumer_key=http%3A%2F%2Ffoobartestapp%2Ftestappdsadfsafsd&oauth_nonce=633744074946959871&oauth_signature=e5UArG999f7Lo7rzdHq35Iiwrp8%3D&oauth_signature_method=HMAC-SHA1&oauth_timestamp=1238810694&oauth_version=1.0&opensocial_owner_id=198216895&opensocial_viewer_id=198216895&sourceURL=http%3A%2F%2Fwww.myspace.com%2F317482931
"""
paths = self.request.path
if paths.endswith('/ms/install'):
logging.info('MySpace user installs app') #request is signed with uid
consumer_key = self.request.get('oauth_consumer_key') #http://..
oauth_nonce = self.request.get('oauth_nonce') #6337934334343
oauth_signature = self.request.get('oauth_signature') #e5Ua..
oauth_signature_method = self.request.get('oauth_signature_method') #HMAC-SHA1
oauth_timestamp = self.request.get('oauth_timestamp') #12323423
oauth_version = self.request.get('oauth_version') #1.0
opensocial_owner_id = self.request.get('opensocial_owner_id') #23434
opensocial_viewer_id = self.request.get('opensocial_viewer_id') #23432
sourceURL = self.request.get('sourceURL') #http..
elif paths.endswith('/ms/remove'):
logging.info('MySpace user uninstalls app')
class CrossDomainAccess(webapp.RequestHandler):
def get(self):
f = open('crossdomain.xml')
txt=f.read()
f.close()
self.response.headers.add_header('Content-Type','application/xml')
self.response.out.write(txt)
class GooglebotRequest(webapp.RequestHandler):
def get(self):
f = open('robots.txt')
txt = f.read()
f.close()
self.response.out.write(txt)
class GiftViewRequest(webapp.RequestHandler):
def get(self):
gid = self.request.get('g')
if gid:
self.response.out.write(helper.gen_gift_view(gid))
def post(self):
self.get()
class FacebookPayment(webapp.RequestHandler):
def post(self):
import pay
pay.pay_via_facebook_credit(self, FACEBOOK_APP_SECRET)
def get(self):
logging.error('/fb/order using get')
class PageUploadRequest(webapp.RequestHandler):
""" Page image upload request via Flash file uploader (YUI).
No cookie is used, so can't use BaseRequest, the session is passed as parameter 'ck'.
"""
def post(self):
ck = self.request.get('ck')
if not ck or ck.find('SC_Session')<0:
self.fail('No cookie')
return
cki = re.findall(r'SC_Session="([^"]+)"',ck)[0]
args = parse_session(cki) #uid,token,sns,expires
if not args:
logging.warning('PageUploadRequest. parse_session return None')
self.fail('Invalid upload')
return
uid = args['uid']
u = helper.get_user_by_key(uid, None, False)
if not u:
logging.warning('User %s not found'%uid)
self.fail('Invalid user')
return
fname = self.request.get('Filename')
x = fname.rfind('.')
if x < 0:
self.fail('Invalid image file')
return
ext = fname[x+1:]
if not ext in ['jpg','png','gif','jpeg']:
self.fail("Not supported image format (only .jpg,.png,.gif)")
return
bkid = self.request.get('bk')
pgid = self.request.get('pg')
if not bkid or not pgid:
self.fail('No proper book or page')
return
try:
helper.save_page_image(u,bkid,pgid,ext,self.request.get('Filedata'))
self.response.out.write('OK')
except Exception,e:
logging.exception(e)
self.fail(e)
def fail(self,msg):
self.response.out.write('{"error":"%s"}' % msg)
def main():
handlers = [
('/fb/remove',FacebookRemove),
('/fb/gift/open',GiftViewRequest),
('/fb/order',FacebookPayment),
('/gift/open',GiftViewRequest),
('/fb/.*',FacebookRequest),
('/admin/.*',AdminRequest),
('/gg/.*',GoogleRequest),
('/task/.*',TaskQueueRequest),
('/mm/.*',MediaReader),
('/ms/.*',MySpaceRequest),
('/robots.txt',GooglebotRequest),
('/crossdomain.*',CrossDomainAccess),
('/upload',PageUploadRequest),
('/logout',LogoutRequest),
('/.*',WebRequest)
]
app = webapp.WSGIApplication(handlers, debug=True)
run_wsgi_app(app)
#webapp.template.register_template_library('dtfilter')
if __name__ == '__main__':
main()
|
tedwen/suicomics
|
src/main.py
|
Python
|
apache-2.0
| 43,823
|
[
"VisIt"
] |
be0b821cc1f6f9348e61a65b6ab89acf10825ae7697038e4ca8a9568419a29a5
|
# -*- coding: utf-8 -*-
from pd_make import entry_data, aq_correction, stable_entr, form_e, mke_pour_ion_entr
def pd_entries(mtnme_1,mtnme_2):
"""
Creates the entry objects corresponding to a binaray or single component
Pourbaix diagram
Args:
mtnme_1: Name of element 1
mtnme_2: Name of element 2
"""
################################## INPUTS #######################
mprester_key = 'ZJhfHmMTTwbW29Sr' # Input your materials project id
# Local directory containing entry data (solid and ion)
direct_0 = '/home/flores12/01_ORR-MatStabScreen/01_virenv-pymatgen/01_data/01-1_local_MP_entry/'
#################################################################
entry_ion_data = entry_data(mtnme_1, mtnme_2, direct_0, mprester_key)
entries = entry_ion_data["entries"]
ion_dict_1 = entry_ion_data["ion_dict_1"]
if not mtnme_1==mtnme_2:
ion_dict_2 = entry_ion_data["ion_dict_2"]
print ion_dict_1
############################## 1 Element ########################
if mtnme_1 == mtnme_2:
ref_state_1=str(ion_dict_1[0]['Reference Solid'])
ref_dict_1 = {ref_state_1: ion_dict_1[0]['Reference solid energy']}
entries_aqcorr = aq_correction(entries)
# #TEMP
# for i in entries_aqcorr:
# i.correction=0
stable_solids_minus_h2o = stable_entr(entries_aqcorr)
pbx_solid_entries = form_e(stable_solids_minus_h2o,
entries_aqcorr)
pbx_ion_entries_1 = mke_pour_ion_entr(mtnme_1,
ion_dict_1, stable_solids_minus_h2o, ref_state_1,
entries_aqcorr, ref_dict_1)
all_entries = pbx_solid_entries + pbx_ion_entries_1
return all_entries
############################## 2 Elements #######################
else:
ref_state_1=str(ion_dict_1[0]['Reference Solid'])
ref_state_2=str(ion_dict_2[0]['Reference Solid'])
ref_dict_1 = {ref_state_1: ion_dict_1[0]['Reference solid energy']}
ref_dict_2 = {ref_state_2: ion_dict_2[0]['Reference solid energy']}
entries_aqcorr = aq_correction(entries)
# # TEMP
# for i in entries_aqcorr:
# i.correction=0
stable_solids_minus_h2o = stable_entr(entries_aqcorr)
pbx_solid_entries = form_e(stable_solids_minus_h2o,
entries_aqcorr)
pbx_ion_entries_1 = mke_pour_ion_entr(mtnme_1,
ion_dict_1, stable_solids_minus_h2o, ref_state_1,
entries_aqcorr, ref_dict_1)
pbx_ion_entries_2 = mke_pour_ion_entr(mtnme_2,
ion_dict_2, stable_solids_minus_h2o, ref_state_2,
entries_aqcorr, ref_dict_2)
all_entries = pbx_solid_entries + pbx_ion_entries_1 + pbx_ion_entries_2
return all_entries
|
raulf2012/pourbaix_pymatgen
|
pourdiag.py
|
Python
|
mit
| 2,551
|
[
"pymatgen"
] |
6dbd4a00af49ff75bd31cca531e2b52cb454890988aa9c003321b343d61680e1
|
#
# QE.py
#
# Interface to Quantum ESPRESSO (http://www.quantum-espresso.org)
#
# Copyright (c) 2014-2020 Terumasa Tadano
#
# This file is distributed under the terms of the MIT license.
# Please see the file 'LICENCE.txt' in the root directory
# or http://opensource.org/licenses/mit-license.php for information.
#
from __future__ import print_function
import numpy as np
import math
import copy
import sys
class QEParser(object):
def __init__(self):
self._prefix = None
self._lattice_vector = None
self._inverse_lattice_vector = None
self._nat = 0
self._x_fractional = None
self._kd = None
self._kdname = None
self._counter = 1
self._nzerofills = 0
self._disp_conversion_factor = 1.0
self._energy_conversion_factor = 1.0
self._force_conversion_factor = 1.0
self._initial_structure_loaded = False
self._print_disp = True
self._print_force = True
self._print_energy = False
self._print_born = False
self._list_CONTROL = []
self._list_SYSTEM = []
self._list_ELECTRONS = []
self._list_ATOMIC_SPECIES = []
self._list_ATOMIC_POSITIONS = []
self._list_K_POINTS = []
self._list_CELL_PARAMETERS = []
self._list_OCCUPATIONS = []
self._celldm = [None] * 6
self._BOHR_TO_ANGSTROM = 0.5291772108
self._RYDBERG_TO_EV = 13.60569253
def load_initial_structure(self, file_in):
# Parse fortran namelists
self._list_CONTROL = self._get_namelist(file_in, "&CONTROL")
self._list_SYSTEM = self._get_namelist(file_in, "&SYSTEM")
self._list_ELECTRONS = self._get_namelist(file_in, "&ELECTRONS")
# Parse general options
tags = ["ATOMIC_SPECIES", "ATOMIC_POSITIONS", "K_POINTS",
"CELL_PARAMETERS", "OCCUPATIONS", "CONSTRAINTS", "ATOMIC_FORCES"]
self._list_ATOMIC_SPECIES = self._get_options("ATOMIC_SPECIES", tags, file_in)
self._list_ATOMIC_POSITIONS = self._get_options("ATOMIC_POSITIONS", tags, file_in)
self._list_K_POINTS = self._get_options("K_POINTS", tags, file_in)
self._list_CELL_PARAMETERS = self._get_options("CELL_PARAMETERS", tags, file_in)
self._list_OCCUPATIONS = self._get_options("OCCUPATIONS", tags, file_in)
# Set lattice vectors and fractional coordinates
self._set_system_info()
self._initial_structure_loaded = True
def generate_structures(self, prefix, header_list, disp_list):
self._set_number_of_zerofill(len(disp_list))
self._prefix = prefix
self._counter = 1
for header, disp in zip(header_list, disp_list):
self._generate_input(header, disp)
def parse(self, initial_pwin, pwout_files, pwout_file_offset, str_unit,
output_flags, filter_emin=None, filter_emax=None):
if not self._initial_structure_loaded:
self.load_initial_structure(initial_pwin)
self._set_unit_conversion_factor(str_unit)
self._set_output_flags(output_flags)
if self._print_disp or self._print_force:
self._print_displacements_and_forces(pwout_files,
pwout_file_offset,
filter_emin,
filter_emax)
elif self._print_energy:
self._print_energies(pwout_files, pwout_file_offset)
elif self._print_born:
self._print_borninfo(pwout_files)
def get_displacements(self, pwout_files, unit="bohr"):
if not self._initial_structure_loaded:
raise RuntimeError("Please call load_initial_structure before using this method")
x0 = np.round(self._x_fractional, 8)
lavec_transpose = self._lattice_vector.transpose()
vec_refold = np.vectorize(self._refold)
disp_merged = []
if unit == "bohr":
unit_factor = 1.0 / self._BOHR_TO_ANGSTROM
elif unit == "angstrom":
unit_factor = 1.0
else:
raise RuntimeError("Invalid unit type. Valid values are 'bohr' and 'angstrom'.")
for search_target in pwout_files:
x = self._get_coordinates_pwout(search_target)
ndata, _, _ = np.shape(x)
disp = np.zeros((ndata, self._nat, 3))
for idata in range(ndata):
disp[idata, :, :] = x[idata, :, :] - x0
disp[idata, :, :] = np.dot(vec_refold(disp[idata, :, :]), lavec_transpose)
disp[idata, :, :] *= unit_factor
disp_merged.extend(disp)
return disp_merged
def _generate_input(self, header, disp):
filename = self._prefix + str(self._counter).zfill(self._nzerofills) + ".pw.in"
with open(filename, 'w') as f:
for entry in self._list_CONTROL:
f.write(entry)
for entry in self._list_SYSTEM:
f.write(entry)
for entry in self._list_ELECTRONS:
f.write(entry)
for entry in self._list_ATOMIC_SPECIES:
f.write(entry)
f.write("ATOMIC_POSITIONS crystal\n")
for i in range(self._nat):
f.write("%s %20.15f %20.15f %20.15f\n" % (self._kdname[self._kd[i]],
self._x_fractional[i][0] + disp[i, 0],
self._x_fractional[i][1] + disp[i, 1],
self._x_fractional[i][2] + disp[i, 2]))
for entry in self._list_K_POINTS:
f.write(entry)
for entry in self._list_CELL_PARAMETERS:
f.write(entry)
for entry in self._list_OCCUPATIONS:
f.write(entry)
f.write("\n")
self._counter += 1
def _print_displacements_and_forces(self, pwout_files,
file_offset, filter_emin, filter_emax):
x0 = np.round(self._x_fractional, 8)
lavec_transpose = self._lattice_vector.transpose() / self._BOHR_TO_ANGSTROM
vec_refold = np.vectorize(self._refold)
# Parse offset component
if file_offset is None:
disp_offset = np.zeros((1, self._nat, 3))
force_offset = np.zeros((self._nat, 3))
epot_offset = 0.0
else:
x_offset = self._get_coordinates_pwout(file_offset)
if x_offset is None:
raise RuntimeError("File %s does not contain position entry" % file_offset)
ndata_offset, _, _ = np.shape(x_offset)
if ndata_offset > 1:
raise RuntimeError("File %s contains too many position entries" % file_offset)
disp_offset = x_offset - x0
force_offset = self._get_atomicforces_pwout(file_offset)
if force_offset is None:
raise RuntimeError("File %s does not contain force entry" % file_offset)
try:
force_offset = np.reshape(force_offset, (self._nat, 3))
except:
raise RuntimeError("File %s contains too many force entries" % file_offset)
epot_offset = self._get_energies_pwout(file_offset)
if epot_offset is None:
raise RuntimeError("File %s does not contain energy entry" % file_offset)
epot_offset = np.array(epot_offset, dtype=np.float)
if len(epot_offset) > 1:
raise RuntimeError("File %s contains too many energy entries" % file_offset)
for search_target in pwout_files:
x = self._get_coordinates_pwout(search_target)
force = self._get_atomicforces_pwout(search_target)
epot = self._get_energies_pwout(search_target)
if x is None or force is None or epot is None:
continue
num_data_force = len(force) // (3 * self._nat)
force = np.reshape(force, (num_data_force, self._nat, 3))
num_data_disp, _, _ = np.shape(x)
if num_data_disp != num_data_force and self._print_disp and self._print_force:
print(
"Error: The number of entries of displacement and force is inconsistent.")
print("Ndata disp : %d, Ndata force : %d" %
(num_data_disp, num_data_force))
exit(1)
ndata_energy = len(epot)
if ndata_energy != num_data_disp:
raise RuntimeError("The numbers of displacement and energy entries are different.")
epot = np.array(epot, dtype=np.float)
epot -= epot_offset
epot *= self._RYDBERG_TO_EV
for idata in range(num_data_disp):
if filter_emin is not None:
if filter_emin > epot[idata]:
continue
if filter_emax is not None:
if filter_emax < epot[idata]:
continue
if self._print_disp:
disp = x[idata, :, :] - x0 - disp_offset
disp = np.dot(vec_refold(disp[0, :, :]), lavec_transpose)
disp *= self._disp_conversion_factor
if self._print_force:
f = force[idata, :, :] - force_offset
f *= self._force_conversion_factor
print("# Filename: %s, Snapshot: %d, E_pot (eV): %s" %
(search_target, idata + 1, epot[idata]))
if self._print_disp and self._print_force:
for i in range(self._nat):
print("%15.7F %15.7F %15.7F %20.8E %15.8E %15.8E" % (disp[i, 0],
disp[i, 1],
disp[i, 2],
f[i, 0],
f[i, 1],
f[i, 2]))
elif self._print_disp:
for i in range(self._nat):
print("%15.7F %15.7F %15.7F" % (disp[i, 0],
disp[i, 1],
disp[i, 2]))
elif self._print_force:
for i in range(self._nat):
print("%15.8E %15.8E %15.8E" % (f[i, 0],
f[i, 1],
f[i, 2]))
def _print_energies(self, pwout_files, file_offset):
if file_offset is None:
etot_offset = 0.0
else:
data = self._get_energies_pwout(file_offset)
if data is None:
raise RuntimeError("File %s does not contain energy entry" % file_offset)
if len(data) > 1:
raise RuntimeError("File %s contains too many energy entries" % file_offset)
etot_offset = data[0]
print("# Etot")
for search_target in pwout_files:
etot = self._get_energies_pwout(search_target)
if etot is None:
continue
for idata in range(len(etot)):
val = etot[idata] - etot_offset
val *= self._energy_conversion_factor
print("%19.11E" % val)
def _print_borninfo(self, phout_files):
for search_target in phout_files:
dielec, borncharge = self._get_borninfo_phout(search_target)
nat_prim, _, _ = np.shape(borncharge)
for i in range(3):
print("%16.8F %16.8F %16.8F" %
(dielec[i, 0], dielec[i, 1], dielec[i, 2]))
for j in range(nat_prim):
for i in range(3):
print("%16.8F %16.8F %16.8F" % (borncharge[j, i, 0],
borncharge[j, i, 1],
borncharge[j, i, 2]))
def _set_system_info(self):
list_mod = []
for obj in self._list_SYSTEM:
obj_split = obj.rstrip().split(',')
for subobj in obj_split:
if subobj:
index = subobj.find('=')
if index > 0:
subobj = subobj[:index] + " = " + subobj[index + 1:]
list_mod.append(subobj)
str_input = ""
for entry in list_mod:
str_input += entry + " "
entrylist = str_input.split()
for i in range(len(entrylist)):
if "ibrav" in entrylist[i]:
ibrav = int(entrylist[i + 2])
if "nat" in entrylist[i]:
self._nat = int(entrylist[i + 2])
if "ntyp" in entrylist[i]:
ntyp = int(entrylist[i + 2])
if "celldm(1)" in entrylist[i]:
# Do not assign the value if the comment character '!'
# appears in front of the celldm(1) keyword
has_comment = False
for elem in self._list_SYSTEM:
if "celldm(1)" in elem:
has_comment = ('!' == elem.strip().split()[0][0])
if not has_comment:
self._celldm[0] = float(entrylist[i + 2])
if "celldm(2)" in entrylist[i]:
self._celldm[1] = float(entrylist[i + 2])
if "celldm(3)" in entrylist[i]:
self._celldm[2] = float(entrylist[i + 2])
if "celldm(4)" in entrylist[i]:
self._celldm[3] = float(entrylist[i + 2])
if "celldm(5)" in entrylist[i]:
self._celldm[4] = float(entrylist[i + 2])
if "celldm(6)" in entrylist[i]:
self._celldm[5] = float(entrylist[i + 2])
self._set_lattice_vector(ibrav)
self._set_fractional_coordinate()
def _set_lattice_vector(self, ibrav):
""".
Computer lattice vector in units of Angstrom for given ibrav and celldm.
Doc/INPUT_PW.txt was used as a reference.
"""
lavec = np.zeros((3, 3))
if ibrav == 0:
if self._list_CELL_PARAMETERS is None:
raise RuntimeError("CELL_PARAMETERS must be given when ibrav = 0.")
mode = self._list_CELL_PARAMETERS[0].rstrip().split()
if len(mode) == 1:
raise RuntimeError(
"Error : Please specify either alat, bohr, or angstrom for CELL_PARAMETERS")
mode_str = mode[1].lower()
for i in range(3):
lavec[i][:] = [float(entry) for entry in
self._list_CELL_PARAMETERS[i + 1].rstrip().split()]
lavec = np.array(lavec)
if "alat" in mode_str:
if not self._celldm[0]:
raise RuntimeError(
"celldm(1) must be given when 'alat' is used for CELL_PARAMETERS")
for i in range(3):
for j in range(3):
lavec[i][j] *= self._celldm[0]
elif "angstrom" in mode_str:
# convert the lattice vectors in Bohr unit here to make them back to
# the angstrom unit at the end of this method.
for i in range(3):
for j in range(3):
lavec[i][j] /= self._BOHR_TO_ANGSTROM
elif "bohr" not in mode_str:
raise RuntimeError("Error : Invalid option for CELL_PARAMETERS: %s" %
mode[1])
elif ibrav == 1:
if not self._celldm[0]:
raise RuntimeError("celldm(1) must be given when ibrav = 1.")
else:
a = self._celldm[0]
lavec = np.array([[a, 0.0, 0.0],
[0.0, a, 0.0],
[0.0, 0.0, a]])
elif ibrav == 2:
if not self._celldm[0]:
raise RuntimeError("celldm(1) must be given when ibrav = 2.")
else:
a = self._celldm[0] / 2.0
lavec = np.array([[-a, 0.0, a],
[0.0, a, a],
[-a, a, 0.0]])
elif ibrav == 3:
if not self._celldm[0]:
raise RuntimeError("celldm(1) must be given when ibrav = 3.")
else:
a = self._celldm[0] / 2.0
lavec = np.array([[a, a, a],
[-a, a, a],
[-a, -a, a]])
elif ibrav == 4:
if not self._celldm[0] or not self._celldm[2]:
raise RuntimeError("celldm(1) and celldm(3) must be given when ibrav = 4.")
else:
a = self._celldm[0]
c = self._celldm[0] * self._celldm[2]
lavec = np.array([[a, 0.0, 0.0],
[-0.5 * a, math.sqrt(3.) / 2.0 * a, 0.0],
[0.0, 0.0, c]])
elif ibrav == 5 or ibrav == -5:
if not self._celldm[0] or not self._celldm[3]:
raise RuntimeError("celldm(1) and celldm(4) must be given when ibrav = 5, -5.")
else:
a = self._celldm[0]
cosalpha = self._celldm[3]
tx = a * math.sqrt((1.0 - cosalpha) / 2.)
ty = a * math.sqrt((1.0 - cosalpha) / 6.)
tz = a * math.sqrt((1.0 + 2.0 * cosalpha) / 3.)
if ibrav == 5:
lavec = np.array([[tx, -ty, tz],
[0.0, 2.0 * ty, tz],
[-tx, -ty, tz]])
else:
a_prime = a / math.sqrt(3.0)
u = tz - 2.0 * math.sqrt(2.0) * ty
v = tz + math.sqrt(2.0) * ty
u *= a_prime
v *= a_prime
lavec = np.array([[u, v, v],
[v, u, v],
[v, v, u]])
elif ibrav == 6:
if not self._celldm[0] or not self._celldm[2]:
raise RuntimeError("celldm(1) and celldm(3) must be given when ibrav = 6.")
else:
a = self._celldm[0]
c = self._celldm[0] * self._celldm[2]
lavec = np.array([[a, 0.0, 0.0],
[0.0, a, 0.0],
[0.0, 0.0, c]])
elif ibrav == 7:
if not self._celldm[0] or not self._celldm[2]:
raise RuntimeError("celldm(1) and celldm(3) must be given when ibrav = 7.")
else:
a = self._celldm[0]
c = self._celldm[0] * self._celldm[2]
lavec = np.array([[a / 2.0, -a / 2.0, c / 2.0],
[a / 2.0, a / 2.0, c / 2.0],
[-a / 2.0, -a / 2.0, c / 2.0]])
elif ibrav == 8:
if not self._celldm[0] or not self._celldm[1] or not self._celldm[2]:
raise RuntimeError("celldm(1), celldm(2), and celldm(3) must be given\
when ibrav = 8.")
else:
a = self._celldm[0]
b = self._celldm[0] * self._celldm[1]
c = self._celldm[0] * self._celldm[2]
lavec = np.array([[a, 0.0, 0.0],
[0.0, b, 0.0],
[0.0, 0.0, c]])
elif ibrav == 9 or ibrav == -9:
if not self._celldm[0] or not self._celldm[1] or not self._celldm[2]:
raise RuntimeError("celldm(1), celldm(2), and celldm(3) must be given\
when ibrav = 9 or -9.")
else:
a = self._celldm[0]
b = self._celldm[0] * self._celldm[1]
c = self._celldm[0] * self._celldm[2]
if ibrav == 9:
lavec = np.array([[a / 2., b / 2., 0.0],
[-a / 2., b / 2., 0.0],
[0.0, 0.0, c]])
else:
lavec = np.array([[a / 2., -b / 2., 0.0],
[a / 2., b / 2., 0.0],
[0.0, 0.0, c]])
elif ibrav == 10:
if not self._celldm[0] or not self._celldm[1] or not self._celldm[2]:
raise RuntimeError("celldm(1), celldm(2), and celldm(3) must be given\
when ibrav = 10.")
else:
a = self._celldm[0] / 2.0
b = self._celldm[0] * self._celldm[1] / 2.0
c = self._celldm[0] * self._celldm[2] / 2.0
lavec = np.array([[a, 0.0, c],
[a, b, 0.0],
[0.0, b, c]])
elif ibrav == 11:
if not self._celldm[0] or not self._celldm[1] or not self._celldm[2]:
raise RuntimeError("celldm(1), celldm(2), and celldm(3) must be given\
when ibrav = 11.")
else:
a = self._celldm[0] / 2.0
b = self._celldm[0] * self._celldm[1] / 2.0
c = self._celldm[0] * self._celldm[2] / 2.0
lavec = np.array([[a, b, c],
[-a, b, c],
[-a, -b, c]])
elif ibrav == 12:
if not self._celldm[0] or not self._celldm[1] or not self._celldm[2] or \
not self._celldm[3]:
raise RuntimeError("celldm(1), celldm(2), celldm(3), and celldm(4)\
must be given when ibrav = 12.")
else:
a = self._celldm[0]
b = self._celldm[0] * self._celldm[1]
c = self._celldm[0] * self._celldm[2]
gamma = math.acos(self._celldm[3])
lavec = np.array([[a, 0.0, 0.0],
[b * math.cos(gamma), b * math.sin(gamma), 0.0],
[0.0, 0.0, c]])
elif ibrav == -12:
if not self._celldm[0] or not self._celldm[1] or not self._celldm[2] or \
not self._celldm[4]:
raise RuntimeError("celldm(1), celldm(2), celldm(3), and celldm(5)\
must be given when ibrav = -12.")
else:
a = self._celldm[0]
b = self._celldm[0] * self._celldm[1]
c = self._celldm[0] * self._celldm[2]
beta = math.acos(self._celldm[4])
lavec = np.array([[a, 0.0, 0.0],
[0.0, b, 0.0],
[c * math.cos(beta), 0.0, c * math.sin(beta)]])
elif ibrav == 13:
if not self._celldm[0] or not self._celldm[1] or not self._celldm[2] or \
not self._celldm[3]:
raise RuntimeError("celldm(1), celldm(2), celldm(3), and celldm(4)\
must be given when ibrav = 13.")
else:
a = self._celldm[0]
b = self._celldm[0] * self._celldm[1]
c = self._celldm[0] * self._celldm[2]
gamma = math.acos(self._celldm[3])
lavec = np.array([[a / 2.0, 0.0, -c / 2.0],
[b * math.cos(gamma), b * math.sin(gamma), 0.0],
[a / 2.0, 0.0, c / 2.0]])
elif ibrav == 14:
if not self._celldm[0] or not self._celldm[1] or not self._celldm[2] or \
not self._celldm[3] or not self._celldm[4] or not self._celldm[5]:
raise RuntimeError("All celldm must be given when ibrav = 14.")
else:
a = self._celldm[0]
b = self._celldm[0] * self._celldm[1]
c = self._celldm[0] * self._celldm[2]
alpha = math.acos(self._celldm[3])
beta = math.acos(self._celldm[4])
gamma = math.acos(self._celldm[5])
lavec = np.array([[a, 0.0, 0.0],
[b * math.cos(gamma), b * math.sin(gamma), 0.0],
[c * math.cos(beta),
c * (math.cos(alpha) - math.cos(beta) *
math.cos(gamma)) / math.sin(gamma),
c * math.sqrt(1.0 + 2.0 * math.cos(alpha) * math.cos(beta) * math.cos(gamma)
- math.cos(alpha) ** 2 - math.cos(beta) ** 2 - math.cos(
gamma) ** 2) / math.sin(gamma)]])
else:
raise RuntimeError("Invalid ibrav = %s" % ibrav)
# if celldm(1) is empty, calculate it from the lattice vector for later use.
if not self._celldm[0]:
self._celldm[0] = math.sqrt(np.dot(lavec[0][:], lavec[0][:]))
# Transpose for later use
lavec = lavec.transpose()
# Convert to Angstrom unit
for i in range(3):
for j in range(3):
lavec[i][j] *= self._BOHR_TO_ANGSTROM
self._lattice_vector = lavec
self._inverse_lattice_vector = np.linalg.inv(lavec)
def _set_fractional_coordinate(self):
list_tmp = self._list_ATOMIC_POSITIONS[0].rstrip().split()
if len(list_tmp) == 1:
raise RuntimeError("Error : Please specify either alat, "
" bohr, angstrom, or crystal for ATOMIC_POSITIONS")
mode_str = list_tmp[1].lower()
if "crystal_sg" in mode_str:
raise RuntimeError(
"Error : Sorry. 'crystal_sg' is not supported in this script. "
"Please use another option.")
xtmp = np.zeros((self._nat, 3))
kd = []
for i in range(self._nat):
list_tmp = self._list_ATOMIC_POSITIONS[i + 1].rstrip().split()
kd.append(list_tmp[0])
xtmp[i][:] = [float(j) for j in list_tmp[1:4]]
# lattice_vector is in units of Angstrom, so the unit of aa_inv is (Angstrom)^-1
aa_inv = copy.deepcopy(self._inverse_lattice_vector)
if "alat" in mode_str:
# atomic positions are in cartesian coordinates in units of the lattice parameter (celldim(1))
a_angstrom = self._celldm[0] * self._BOHR_TO_ANGSTROM
for i in range(3):
for j in range(3):
aa_inv[i][j] *= a_angstrom
for i in range(self._nat):
xtmp[i][:] = np.dot(xtmp[i][:], aa_inv.transpose())
elif "bohr" in mode_str:
for i in range(3):
for j in range(3):
aa_inv[i][j] *= self._BOHR_TO_ANGSTROM
for i in range(self._nat):
xtmp[i][:] = np.dot(xtmp[i][:], aa_inv.transpose())
elif "angstrom" in mode_str:
for i in range(self._nat):
xtmp[i][:] = np.dot(xtmp[i][:], aa_inv.transpose())
elif "crystal" not in mode_str:
raise RuntimeError("Error : Invalid option for ATOMIC_POSITIONS: %s" % mode_str)
kdname = []
for entry in kd:
if entry not in kdname:
kdname.append(entry)
dict_kd = {}
counter = 0
for name in kdname:
dict_kd[name] = counter
counter += 1
kd_int = []
for entry in kd:
kd_int.append(dict_kd[entry])
self._kd = kd_int
self._kdname = kdname
self._x_fractional = xtmp
def _set_number_of_zerofill(self, npattern):
nzero = 1
while True:
npattern //= 10
if npattern == 0:
break
nzero += 1
self._nzerofills = nzero
def _set_unit_conversion_factor(self, str_unit):
if str_unit == "ev":
self._disp_conversion_factor = self._BOHR_TO_ANGSTROM
self._energy_conversion_factor = self._RYDBERG_TO_EV
elif str_unit == "rydberg":
self._disp_conversion_factor = 1.0
self._energy_conversion_factor = 1.0
elif str_unit == "hartree":
self._disp_conversion_factor = 1.0
self._energy_conversion_factor = 0.5
else:
raise RuntimeError("This cannot happen.")
self._force_conversion_factor = self._energy_conversion_factor / self._disp_conversion_factor
def _set_output_flags(self, output_flags):
self._print_disp, self._print_force, \
self._print_energy, self._print_born = output_flags
@property
def nat(self):
return self._nat
@nat.setter
def nat(self, nat):
self._nat = nat
@property
def lattice_vector(self):
return self._lattice_vector
@lattice_vector.setter
def lattice_vector(self, lattice_vector):
self._lattice_vector = lattice_vector
self._inverse_lattice_vector = np.linalg.inv(lattice_vector)
@property
def inverse_lattice_vector(self):
return self._inverse_lattice_vector
@property
def kd(self):
return self._kd
@property
def kd_in_str(self):
return [self._kdname[i] for i in self._kd]
@kd.setter
def kd(self, kd):
self._kd = kd
@kd_in_str.setter
def kd_in_str(self, kd_in_str):
map_name2num = {}
for i, name in enumerate(self._kdname):
map_name2num[name] = i
self._kd = [map_name2num[t] for t in kd_in_str]
@property
def atomic_kinds(self):
return self._kd
@property
def x_fractional(self):
return self._x_fractional
@x_fractional.setter
def x_fractional(self, x_fractional):
self._x_fractional = x_fractional
@property
def list_system(self):
return self._list_SYSTEM
@list_system.setter
def list_system(self, list_in):
self._list_SYSTEM = list_in
@property
def list_cell_parameters(self):
return self._list_CELL_PARAMETERS
@list_cell_parameters.setter
def list_cell_parameters(self, list_in):
self._list_CELL_PARAMETERS = list_in
@property
def list_k_points(self):
return self._list_K_POINTS
@list_k_points.setter
def list_k_points(self, list_in):
self._list_K_POINTS = list_in
@staticmethod
def _get_namelist(file_in, namelist_tag):
list_out = []
flag_add = False
with open(file_in) as openfileobject:
for line in openfileobject:
line_upper = line.upper()
if namelist_tag in line_upper:
flag_add = True
list_out.append(line)
elif line.strip() == "/":
flag_add = False
elif flag_add:
list_out.append(line)
if len(list_out) == 0:
print("%s field not found" % namelist_tag)
exit(1)
list_out.append("/\n")
return list_out
@staticmethod
def _get_options(option_tag, taglists, file_in):
list_out = []
flag_add = False
with open(file_in) as openfileobject:
for line in openfileobject:
if option_tag in line:
flag_add = True
list_out.append(line)
elif len(line.split()) > 0 and line.split()[0] in taglists:
flag_add = False
elif flag_add:
if line.strip():
list_out.append(line)
return list_out
@staticmethod
def _refold(x):
if x >= 0.5:
return x - 1.0
elif x < -0.5:
return x + 1.0
else:
return x
def _get_coordinates_pwout(self, pwout_file):
"""
Return the fractional coordinates of atoms
"""
search_flag = "site n. atom positions (alat units)"
x = np.zeros((self._nat, 3))
num_data_disp_extra = 0
basis = ""
found_tag = False
f = open(pwout_file, 'r')
line = f.readline()
while line:
if search_flag in line:
found_tag = True
for i in range(self._nat):
line = f.readline()
x[i][:] = [float(t) for t in line.rstrip().split()[6:9]]
break
line = f.readline()
if not found_tag:
#print("%s tag not found in %s" % (search_flag, pwout_file), file=sys.stderr)
return None
x = self._celldm[0] * np.dot(x, self._inverse_lattice_vector.transpose()) \
* self._BOHR_TO_ANGSTROM
# Search additional entries containing atomic position
# (for parsing MD trajectory)
search_flag2 = "ATOMIC_POSITIONS "
x_additional = []
while line:
if search_flag2 in line:
if not basis:
basis = line.rstrip().split()[1]
num_data_disp_extra += 1
for i in range(self._nat):
line = f.readline()
x_additional.extend([t for t in line.rstrip().split()[1:4]])
line = f.readline()
f.close()
x_additional = np.array(x_additional, dtype=np.float)
# The basis of the coordinate in x_additional can be different
# from that of x. Therefore, perform basis conversion here.
if num_data_disp_extra > 0:
if "alat" in basis:
conversion_mat = self._celldm[0] \
* self._inverse_lattice_vector.transpose() \
* self._BOHR_TO_ANGSTROM
elif "bohr" in basis:
conversion_mat = self._inverse_lattice_vector.transpose \
* self._BOHR_TO_ANGSTROM
elif "angstrom" in basis:
conversion_mat = self._inverse_lattice_vector.transpose()
elif "crystal" in basis:
conversion_mat = np.identity(3)
else:
raise RuntimeError("This cannot happen.")
x_additional = np.reshape(x_additional, (num_data_disp_extra, self._nat, 3))
for i in range(num_data_disp_extra):
x_additional[i, :, :] \
= np.dot(x_additional[i, :, :], conversion_mat)
if num_data_disp_extra <= 1:
return np.reshape(x, (1, self._nat, 3))
else:
x_merged = np.zeros((num_data_disp_extra, self._nat, 3))
x_merged[0, :, :] = x[:, :]
x_merged[1:, :, :] = x_additional[:-1, :, :]
return x_merged
def _get_atomicforces_pwout(self, pwout_file):
search_tag = "Forces acting on atoms (Ry/au):"
search_tag_QE6 = "Forces acting on atoms (cartesian axes, Ry/au):"
found_tag = False
f = open(pwout_file, 'r')
line = f.readline()
force = []
while line:
if search_tag in line or search_tag_QE6 in line:
found_tag = True
f.readline()
for i in range(self._nat):
line = f.readline()
force.extend([t for t in line.rstrip().split()[6:9]])
line = f.readline()
f.close()
if not found_tag:
print("following search tags not found in %s" % pwout_file, file=sys.stderr)
print(search_tag, file=sys.stderr)
print(search_tag_QE6, file=sys.stderr)
return None
return np.array(force, dtype=np.float)
@staticmethod
def _get_energies_pwout(pwout_file):
search_tag = "! total energy"
found_tag = False
etot = []
with open(pwout_file) as openfileobject:
for line in openfileobject:
if search_tag in line:
etot.extend([line.rstrip().split()[4]])
found_tag = True
if not found_tag:
print("%s tag not found in %s" % (search_tag, pwout_file), file=sys.stderr)
return None
return np.array(etot, dtype=np.float)
@staticmethod
def _get_borninfo_phout(phout_file):
dielec = []
borncharge = []
search_tag1 = "Dielectric constant in cartesian axis"
f = open(phout_file, 'r')
line = f.readline()
found_tag1 = False
found_tag2 = False
while line:
if search_tag1 in line:
found_tag1 = True
f.readline()
for i in range(3):
line = f.readline()
dielec.extend([float(t) for t in line.strip().split()[1:4]])
if "Px" in line or "Py" in line or "Pz" in line:
found_tag2 = True
borncharge.extend(float(t) for t in line.strip().split()[2:5])
line = f.readline()
f.close()
if not found_tag1 or not found_tag2:
print("Dielectric constants or Born effective charges are not found"
"in %s" % phout_file, file=sys.stderr)
return None
nat = len(borncharge) // 9
dielec = np.reshape(np.array(dielec[9:]), (3, 3))
borncharge = np.reshape(np.array(borncharge), (nat, 3, 3))
return dielec, borncharge
|
ttadano/alamode
|
tools/interface/QE.py
|
Python
|
mit
| 37,994
|
[
"CRYSTAL",
"ESPResSo",
"Quantum ESPRESSO"
] |
81b33e32d2f8e32f807058ab06f58eba2ab222f9a3fab960538e4410b14494dd
|
import logging
logger = logging.getLogger('parse_cdr3.py')
from .all_genes import all_genes, gap_character
def get_cdr3_and_j_match_counts( organism, ab, qseq, j_gene, min_min_j_matchlen = 3,
extended_cdr3 = False ):
#fasta = all_fasta[organism]
jg = all_genes[organism][j_gene]
errors = []
## qseq starts at CA...
assert qseq[0] == 'C'
num_genome_j_positions_in_loop = len(jg.cdrs[0].replace(gap_character,''))-2
#num_genome_j_positions_in_loop = all_num_genome_j_positions_in_loop[organism][ab][j_gene]
if extended_cdr3: num_genome_j_positions_in_loop += 2 ## up to but not including GXG
## history: was only for alpha
aseq = qseq[:] ## starts at the C position
ja_gene = j_gene
#assert ja_gene in fasta
ja_seq = jg.protseq #fasta[ ja_gene ]
min_j_matchlen = min_min_j_matchlen+3
while min_j_matchlen >= min_min_j_matchlen:
ntrim =0
while ntrim+min_j_matchlen<len(ja_seq) and ja_seq[ntrim:ntrim+min_j_matchlen] not in aseq:
ntrim += 1
jatag = ja_seq[ntrim:ntrim+min_j_matchlen]
if jatag in aseq:
break
else:
min_j_matchlen -= 1
#print 'min_j_matchlen:',min_j_matchlen,'jatag:',jatag,'ntrim:',ntrim,'ja_seq:',ja_seq,'qseq',qseq
if jatag not in aseq:
logger.error('whoah %s %s %s',ab,aseq,ja_seq )
errors.append( 'j{}tag_not_in_aseq'.format(ab) )
return '-',[100,0],errors
elif ja_seq.count( jatag ) != 1:
logger.error( 'whoah2 %s %s %s',ab,aseq,ja_seq )
errors.append( 'multiple_j{}tag_in_jseq'.format(ab) )
return '-',[100,0],errors
else:
pos = aseq.find( jatag )
looplen = pos - ntrim + num_genome_j_positions_in_loop
if not extended_cdr3:
aseq = aseq[3:]
looplen -= 3 ## dont count CAX
if len(aseq)<looplen:
logger.error('short %s %s %s',ab,aseq,ja_seq )
errors.append( ab+'seq_too_short' )
return '-',[100,0],errors
cdrseq = aseq[:looplen ]
## now count mismatches in the J gene, beyond the cdrseq
j_seq = jg.protseq #fasta[ j_gene ] ## not sure why we do this again (old legacy code)
if qseq.count( cdrseq ) > 1:
logger.error('multiple cdrseq occurrences %s %s'%(qseq,cdrseq))
errors.append('multiple_cdrseq_occ')
return '-',[100,0],errors
assert qseq.count(cdrseq) == 1
start_counting_qseq = qseq.find(cdrseq)+len(cdrseq)
start_counting_jseq = num_genome_j_positions_in_loop
j_match_counts = [0,0]
#assert extended_cdr3 ## otherwise I think this count is not right?
#print 'here',start_counting_qseq,start_counting_jseq,len(qseq)
for qpos in range( start_counting_qseq, len(qseq)):
jpos = start_counting_jseq + (qpos-start_counting_qseq)
#print 'here',qpos,jpos
if jpos>= len(j_seq): break
if qseq[qpos] == j_seq[jpos]:
j_match_counts[1] += 1
else:
j_match_counts[0] += 1
return cdrseq, j_match_counts,errors
def parse_cdr3( organism, ab, qseq, v_gene, j_gene, q2v_align, extended_cdr3 = False ):
## v_align is a mapping from 0-indexed qseq positions to 0-indexed v_gene protseq positions
#fasta = all_fasta[ organism ]
#align_fasta = all_align_fasta[ organism ]
vg = all_genes[organism][v_gene]
errors = []
## what is the C position in this v gene?
v_seq = vg.protseq #fasta[ v_gene ]
v_alseq = vg.alseq #align_fasta[ v_gene ]
assert v_seq == v_alseq.replace(gap_character,'')
alseq_cpos = vg.cdr_columns[-1][0] - 1 ## now 0-indexed
#alseq_cpos = alseq_C_pos[organism][ab] - 1 ## now 0-indexed
numgaps = v_alseq[:alseq_cpos].count(gap_character)
cpos = alseq_cpos - numgaps ## 0-indexed
cpos_match = -1
v_match_counts = [0,0]
qseq_len = len(qseq)
for (qpos,vpos) in sorted( q2v_align.iteritems() ):
#print 'q2v-align:',qpos, vpos, cpos
if qpos == len(qseq):
continue ## from a partial codon at the end
if vpos == cpos:
cpos_match = qpos
elif vpos <= cpos:
## only count v mismatches here
if qseq[qpos] == v_seq[vpos]:
v_match_counts[1] += 1
else:
v_match_counts[0] += 1
if cpos_match<0 or qseq[ cpos_match ] != 'C':
## problemo
logger.error('failed to find blast match to C position')
errors.append('no_V{}_Cpos_blastmatch'.format(ab))
return '-',[100,0],[100,0],errors
cdrseq, j_match_counts, other_errors = get_cdr3_and_j_match_counts( organism, ab, qseq[ cpos_match: ], j_gene,
extended_cdr3 = extended_cdr3 )
return cdrseq, v_match_counts, j_match_counts, errors+other_errors
|
phbradley/tcr-dist
|
tcrdist/parse_cdr3.py
|
Python
|
mit
| 4,891
|
[
"BLAST"
] |
f759d0c93de25265b042a966076f814bf7ad4d8e50b6df63fcb1383eabbbd152
|
from time import localtime, sleep, time
import paho.mqtt.client as mqtt
from blinkt import set_pixel, show, clear
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("DSM/timer/test")
def on_message(client, userdata, msg):
global t, status
allowed_status = ["active","abort"]
message=msg.payload.decode('UTF-8')
split_message=message.split()
error=True
if len(split_message)==2:
if split_message[0].isdigit():
split_message[0]=int(split_message[0])
if split_message[1] in allowed_status:
error=False
if error==True:
print("SOMETHING FUCKED UP")
return
t=split_message[0]
status=split_message[1]
#**** GLOBAL VARIABLES ****
t=0
status="stop"
previous_status="stop"
#**** MQTT Setup ****
client = mqtt.Client(protocol='MQTTv311')
client.on_connect = on_connect
client.on_message = on_message
client.connect("192.168.1.5", 1883, 60)
client.loop_start()
#count=int(input("Enter the seconds: "))
#timer(count)
#**** Main loop ****
print(time())
print(time()+10)
sleep(10)
print(time())
while True:
if t>0:
time_remaining=t-time()
print(time_remaining)
if time_remaining>=0:
print(time_remaining)
print(t)
if status=="active":
print("Countdown",time_remaining)
mins,secs = divmod(time_remaining,60)
print("t =",time_remaining)
print(mins,secs)
lights=[]
if time_remaining>=60:
if mins>=128:
lights.append("green")
mins=mins-128
else:
lights.append(0)
if mins>=64:
lights.append("green")
mins=mins-64
else:
lights.append(0)
if mins>=32:
lights.append("green")
mins=mins-32
else:
lights.append(0)
if mins>=16:
lights.append("green")
mins=mins-16
else:
lights.append(0)
if mins>=8:
lights.append("green")
mins=mins-8
else:
lights.append(0)
if mins>=4:
lights.append("green")
mins=mins-4
else:
lights.append(0)
if mins>=2:
lights.append("green")
mins=mins-2
else:
lights.append(0)
if mins>=1:
lights.append("green")
mins=mins-1
else:
lights.append(0)
print(lights)
elif time_remaining>8:
if secs>=128:
lights.append("amber")
secs=secs-128
else:
lights.append(0)
if secs>=64:
lights.append("amber")
secs=secs-64
else:
lights.append(0)
if secs>=32:
lights.append("amber")
secs=secs-32
else:
lights.append(0)
if secs>=16:
lights.append("amber")
secs=secs-16
else:
lights.append(0)
if secs>=8:
lights.append("amber")
secs=secs-8
else:
lights.append(0)
if secs>=4:
lights.append("amber")
secs=secs-4
else:
lights.append(0)
if secs>=2:
lights.append("amber")
secs=secs-2
else:
lights.append(0)
if secs>=1:
lights.append("amber")
secs=secs-1
else:
lights.append(0)
print(lights)
elif time_remaining>=1:
for i in range(8,0,-1):
if i>t:
lights.append(0)
else:
lights.append("red")
print(lights)
#t-=1
sleep(1)
elif status=="abort":
print("Stopping...")
clear()
show()
t=0
time_remaining=0
else:
t=0
time_remaining=0
|
dansmith9/MQTT-Blinkt-Countdown
|
MQTTcountdowntimer.py
|
Python
|
gpl-3.0
| 5,433
|
[
"Amber"
] |
bfa23cb44644226b5f5b5c6649de45239d21cff0bccfb32677b877422a485bed
|
# vim: fileencoding=utf-8 et ts=4 sts=4 sw=4 tw=0
"""
A base class for RPC services and proxies.
Authors:
* Brian Granger
* Alexander Glyzov
* Axel Voitier
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2012-2014. Brian Granger, Min Ragan-Kelley, Alexander Glyzov,
# Axel Voitier
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from abc import ABCMeta, abstractmethod
from random import randint, shuffle
from logging import getLogger
import zmq
from .utils import logger, detect_green_env, get_zmq_classes
from .serializer import PickleSerializer
from .datastruct import priority_dict
from .concurrency import get_tools
#-----------------------------------------------------------------------------
# RPC base
#-----------------------------------------------------------------------------
class RPCBase(object):
__metaclass__ = ABCMeta
logger = logger
def __init__(self, serializer=None, identity=None): #{
"""Base class for RPC service and proxy.
Parameters
==========
serializer : [optional] <Serializer>
An instance of a Serializer subclass that will be used to serialize
and deserialize args, kwargs and the result.
identity : [optional] <bytes>
"""
self.identity = identity or b'%08x' % randint(0, 0xFFFFFFFF)
self.socket = None
self._ready = False
self._serializer = serializer if serializer is not None else PickleSerializer()
self.bound = set()
self.connected = set()
self.reset()
#}
@abstractmethod
def _create_socket(self): #{
"A subclass has to create a socket here"
self._ready = False
#}
#-------------------------------------------------------------------------
# Public API
#-------------------------------------------------------------------------
def reset(self): #{
"""Reset the socket/stream."""
if self.socket is not None:
self.socket.close(linger=0)
self._create_socket()
self._ready = False
self.bound = set()
self.connected = set()
#}
def shutdown(self): #{
""" Deallocate resources (cleanup)
"""
self.logger.debug('closing the socket')
self.socket.close(0)
#}
def bind(self, urls, only=False): #{
"""Bind the service to a number of urls of the form proto://address"""
if isinstance(urls, basestring):
urls = [urls]
urls = set(urls)
bound = self.bound
fresh = urls - bound
for url in fresh:
self.socket.bind(url)
bound.add(url)
if only:
stale = bound - urls
for url in stale:
try: self.socket.unbind(url)
except: pass
bound.remove(url)
self._ready = bool(bound)
#}
def connect(self, urls, only=False): #{
"""Connect the service to a number of urls of the form proto://address"""
if isinstance(urls, basestring):
urls = [urls]
urls = set(urls)
connected = self.connected
fresh = urls - connected
for url in fresh:
self.socket.connect(url)
connected.add(url)
if only:
stale = connected - urls
for url in stale:
try: self.socket.disconnect(url)
except: pass
connected.remove(url)
self._ready = bool(connected)
#}
def bind_ports(self, ip, ports): #{
"""Try to bind a socket to the first available tcp port.
The ports argument can either be an integer valued port
or a list of ports to try. This attempts the following logic:
* If ports==0, we bind to a random port.
* If ports > 0, we bind to port.
* If ports is a list, we bind to the first free port in that list.
In all cases we save the eventual url that we bind to.
This raises zmq.ZMQBindError if no free port can be found.
"""
if isinstance(ports, int):
ports = [ports]
for p in ports:
try:
if p==0:
port = self.socket.bind_to_random_port("tcp://%s" % ip)
else:
self.socket.bind("tcp://%s:%i" % (ip, p))
port = p
except zmq.ZMQError:
# bind raises this if the port is not free
continue
except zmq.ZMQBindError:
# bind_to_random_port raises this if no port could be found
continue
else:
break
else:
raise zmq.ZMQBindError('Could not find an available port')
url = 'tcp://%s:%i' % (ip, port)
self.bound.add(url)
self._ready = True
return port
#}
class RPCLoadBalancerBase(object):
""" RPC Load Balancer base class.
It is a smart ZMQ device with the ROUTER sockets on both sides.
It can be plugged in between an RPC client and RPC services to
achieve a _fair_ load balancing based on the total number of
running tasks in each connected service.
This device replaces the simplistic round-robin routing that is
built into the ZMQ DEALER socket. The round-robin approach is not
suited RPC because it makes for an uneven work spread among the
connected services.
Moreover the peer service discovery is done by a supplied function
`discovery_func` every `interval` seconds.
As a bonus such an intermediary quickly recognizes dead or
disconnected peers.
"""
__metaclass__ = ABCMeta
logger = getLogger('netcall.balancer')
def __init__(self, discover_func, interval=30, context=None, executor=None, bind_url=None):
""" Parameters
----------
discover_func - <callable> that returns a <dict> {<url>:<identity>} for active services
interval - (opt) <float> number of seconds to wait between service discoveries
context - (opt) ZMQ <Context> for sockets
executor - (opt) <Executor> for threads/greenlets
bind_addr - (opt) <str> URL address for the client side ZMQ ROUTER socket
"""
self.discover_func = discover_func
self.interval = interval
Context, _ = get_zmq_classes() # auto detect green env
if context is None:
self.context = Context.instance()
else:
assert isinstance(context, Context)
self.context = context
self.green_env = detect_green_env()
self.tools = get_tools(env=self.green_env) # <netcall.concurrency.ConcurrencyTools>
self.executor = executor or self.tools.Executor(3)
self._ext_executor = executor is not None
# shared objects for tracking running tasks (protected with _lock)
self.addr_set = set() # set of connected service addresses
self.wid2addr_map = {} # {<worker_id> : <worker_addr>}
self.wid2nrun_map = priority_dict() # {<worker_id> : <n_running>}
self._lock = self.tools.Lock()
self._exit_ev = self.tools.Event()
self._pending = [] # pending requests
# client side socket
self.inp_sock = self.context.socket(zmq.ROUTER)
self.inp_addr = bind_url or 'inproc://%s-%s' % (
self.__class__.__name__,
b'%08x' % randint(0, 0xFFFFFFFF)
)
self.inp_sock.bind(self.inp_addr)
# worker side socket
self.out_sock = self.context.socket(zmq.ROUTER)
self.out_sock.ROUTER_MANDATORY = 1 # fail explicitly if route_id is unknown
def _update_connections(self, fresh_addrs, stale_addrs):
""" Updates connections to the services
"""
#self.logger.debug('_update_connections(%r, %r)', fresh_addrs, stale_addrs)
for addr in stale_addrs:
self.out_sock.disconnect(addr)
for addr in fresh_addrs:
self.out_sock.connect(addr)
def _init_peer_refresher(self):
"This might be used in a subclass to init the _peer_refresher thread"
pass
def _peer_refresher(self):
""" Refresher thread discovers active RPC services every `self.interval` secs
and makes sure `self.out_sock` is connected accordingly.
"""
exit_ev = self._exit_ev
lock = self._lock
logger = self.logger
addr_set = self.addr_set
wid2addr_map = self.wid2addr_map
wid2nrun_map = self.wid2nrun_map
self._init_peer_refresher()
while not exit_ev.is_set():
logger.debug('discovering active services')
url2wid_map = self.discover_func()
logger.debug('found %s services', len(url2wid_map))
addrs = set(url2wid_map)
fresh_addrs = addrs - addr_set
stale_addrs = addr_set - addrs
if fresh_addrs or stale_addrs:
fresh_wids = []
with lock:
for addr in fresh_addrs:
logger.debug(' + %-20s (new)', addr)
addr_set.add(addr)
wid = url2wid_map[addr]
wid2addr_map[wid] = addr
fresh_wids.append(wid)
for addr in stale_addrs:
logger.debug(' - %-20s (old)', addr)
addr_set.discard(addr)
wid = url2wid_map[addr]
wid2addr_map.pop(wid, None)
wid2nrun_map.pop(wid, None)
self._update_connections(fresh_addrs, stale_addrs)
if fresh_wids:
# wait a bit to make sure sockets are connected
exit_ev.wait(0.33)
# to make wid2nrun_map.smallest() non-deterministic on different machines
shuffle(fresh_wids)
# now expose fresh ids to the balancer
with lock:
wid2nrun_map.update((wid,0) for wid in fresh_wids)
# trigger sending of pending requests
if self._pending:
self.send_requests()
if addrs:
exit_ev.wait(self.interval)
else:
logger.warning('no workers, waiting')
exit_ev.wait(3)
logger.debug('peer_refresher exited')
def pick_worker(self):
""" Returns <worker_id> for the least used worker
or None if there are no workers
"""
with self._lock:
if self.wid2nrun_map:
return self.wid2nrun_map.smallest()
else:
return None
def send_answer(self, answer):
""" Sends an answer to the client
"""
wid2nrun_map = self.wid2nrun_map
wid = answer[0]
idx = answer.index(b'|')
typ = answer[idx+2]
if typ in (b'OK', b'FAIL'):
with self._lock:
# decrement the number of running tasks for this worker
wid2nrun_map[wid] = max(0, wid2nrun_map.get(wid, 1) - 1)
# we skip the first identity -- it's a <service_id> added by our out_sock just now.
# the next identity should be <client_id> for it was set by our inp_sock
# on the request -- thus a necessary routing is already in place.
self.inp_sock.send_multipart(answer[1:])
def send_requests(self, *requests):
""" Sends all requests (including pending) to the
least loaded connected services.
Returns a number of sent requests.
"""
pending = self._pending
pending.extend(requests)
logger = self.logger
pick_worker = self.pick_worker
wid = pick_worker()
if wid is None:
logger.debug('no workers, postponing sending')
return 0
wid2addr_map = self.wid2addr_map
wid2nrun_map = self.wid2nrun_map
lock = self._lock
exit_ev = self._exit_ev
to_service = self.out_sock
n_sent = 0
while wid and pending and not exit_ev.is_set():
request = pending.pop(0)
try:
# prepending worker_id we picked to explicitly route the request
to_service.send_multipart([wid] + request)
except Exception, err:
pending.insert(0, request)
logger.warning('disabling worker %s (id:%s): %s', wid2addr_map.get(wid), wid, err)
with lock:
wid2nrun_map.pop(wid, None)
wid2addr_map.pop(wid, None)
else:
n_sent += 1
idx = request.index(b'|')
try: ignore = bool(int(request[idx+5]))
except: ignore = True
if not ignore:
with lock:
# increment the number of running tasks for this worker
wid2nrun_map[wid] = wid2nrun_map.get(wid, 0) + 1
if pending:
wid = pick_worker()
return n_sent
def _close_sockets(self, linger=0):
self.inp_sock.close(linger)
self.out_sock.close(linger)
def shutdown(self):
logger = self.logger
# set the exit event for the threads/greenlets
logger.debug('setting exit_ev for the threads')
self._exit_ev.set()
# send QUIT signal to the threads/greenlets
logger.debug('sending a QUIT signal to the threads')
# client side
exiter1 = self.context.socket(zmq.DEALER)
exiter1.IDENTITY = b'QUIT'
exiter1.connect(self.inp_addr)
exiter1.send(b'')
# service side
exiter2 = self.context.socket(zmq.DEALER)
exiter2.IDENTITY = b'QUIT'
addr = 'inproc://exiter-%08x' % randint(0, 0xFFFFFFFF)
exiter2.bind(addr)
self.out_sock.connect(addr)
exiter2.send(b'')
# shutdown the executor
if not self._ext_executor:
logger.debug('shutting down the executor')
self.executor.shutdown()
# close ZMQ sockets
self.logger.debug('closing ZMQ sockets')
exiter1.close(0)
exiter2.close(0)
self._close_sockets(0)
# we never destroy the ZMQ context here because we did not create it
|
srault95/netcall
|
netcall/base.py
|
Python
|
bsd-3-clause
| 15,103
|
[
"Brian"
] |
31433d6358d34e711662f5049c396bb0d149978992398573b04bb117a44c3a25
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import print_function, absolute_import, unicode_literals
from itertools import chain
from commoncode import fileset
from commoncode import filetype
from commoncode import fileutils
"""
Support for ignoring some file patterns such as .git or .svn directories, used
typically when walking file systems.
Also handle .ignore-like file and provide common default ignores.
"""
def is_ignored(location, ignores, unignores, skip_special=True):
"""
Return a tuple of (pattern , message) if a file at location is ignored
or False otherwise.
`ignores` and `unignores` are mappings of patterns to a reason.
"""
if skip_special and filetype.is_special(location):
return True
return fileset.match(location, includes=ignores, excludes=unignores)
def is_ignore_file(location):
"""
Return True if the location is an ignore file.
"""
return (filetype.is_file(location)
and fileutils.file_name(location) == '.scancodeignore')
def get_ignores(location, include_defaults=True):
"""
Return a ignores and unignores patterns mappings loaded from the
file at `location`. Optionally include defaults patterns
"""
ignores = {}
unignores = {}
if include_defaults:
ignores.update(default_ignores)
patterns = fileset.load(location)
ign, uni = fileset.includes_excludes(patterns, location)
ignores.update(ign)
unignores.update(uni)
return ignores, unignores
#
# Default ignores
#
ignores_MacOSX = {
'.DS_Store': 'Default ignore: MacOSX artifact',
'._.DS_Store': 'Default ignore: MacOSX artifact',
'__MACOSX': 'Default ignore: MacOSX artifact',
'.AppleDouble': 'Default ignore: MacOSX artifact',
'.LSOverride': 'Default ignore: MacOSX artifact',
'.DocumentRevisions-V100': 'Default ignore: MacOSX artifact',
'.fseventsd': 'Default ignore: MacOSX artifact',
'.Spotlight-V100': 'Default ignore: MacOSX artifact',
'.VolumeIcon.icns': 'Default ignore: MacOSX artifact',
'.journal': 'Default ignore: MacOSX DMG/HFS+ artifact',
'.journal_info_block': 'Default ignore: MacOSX DMG/HFS+ artifact',
'.Trashes': 'Default ignore: MacOSX DMG/HFS+ artifact',
'\[HFS+ Private Data\]': 'Default ignore: MacOSX DMG/HFS+ artifact private data',
}
ignores_Windows = {
'Thumbs.db': 'Default ignore: Windows artifact',
'ehthumbs.db': 'Default ignore: Windows artifact',
'Desktop.ini': 'Default ignore: Windows artifact',
'$RECYCLE.BIN': 'Default ignore: Windows artifact',
'*.lnk': 'Default ignore: Windows artifact',
'System Volume Information': 'Default ignore: Windows FS artifact',
'NTUSER.DAT*': 'Default ignore: Windows FS artifact',
}
ignores_Linux = {
'.directory': 'Default ignore: KDE artifact',
'.Trash-*': 'Default ignore: Linux/Gome/KDE artifact',
}
ignores_IDEs = {
'*.el': 'Default ignore: EMACS Elisp artifact',
'*.swp': 'Default ignore: VIM artifact',
'.project': 'Default ignore: Eclipse IDE artifact',
'.pydevproject': 'Default ignore: Eclipse IDE artifact',
'.settings': 'Default ignore: Eclipse IDE artifact',
'.eclipse': 'Default ignore: Eclipse IDE artifact',
'.loadpath': 'Default ignore: Eclipse IDE artifact',
'*.launch': 'Default ignore: Eclipse IDE artifact',
'.cproject': 'Default ignore: Eclipse IDE artifact',
'.cdtproject': 'Default ignore: Eclipse IDE artifact',
'.classpath': 'Default ignore: Eclipse IDE artifact',
'.buildpath': 'Default ignore: Eclipse IDE artifact',
'.texlipse': 'Default ignore: Eclipse IDE artifact',
'*.iml': 'Default ignore: JetBrains IDE artifact',
'*.ipr': 'Default ignore: JetBrains IDE artifact',
'*.iws': 'Default ignore: JetBrains IDE artifact',
'.idea/': 'Default ignore: JetBrains IDE artifact',
'.idea_modules/': 'Default ignore: JetBrains IDE artifact',
'*.kdev4': 'Default ignore: Kdevelop artifact',
'.kdev4/': 'Default ignore: Kdevelop artifact',
'*.nib': 'Default ignore: Apple Xcode artifact',
'*.plst': 'Default ignore: Apple Xcode plist artifact',
'*.pbxuser': 'Default ignore: Apple Xcode artifact',
'*.pbxproj': 'Default ignore: Apple Xcode artifact',
'xcuserdata': 'Default ignore: Apple Xcode artifact',
'*.xcuserstate': 'Default ignore: Apple Xcode artifact',
'*.csproj': 'Default ignore: Microsoft VS project artifact',
'*.unityproj': 'Default ignore: Microsoft VS project artifact',
'*.sln': 'Default ignore: Microsoft VS project artifact',
'*.sluo': 'Default ignore: Microsoft VS project artifact',
'*.suo': 'Default ignore: Microsoft VS project artifact',
'*.user': 'Default ignore: Microsoft VS project artifact',
'*.sln.docstates': 'Default ignore: Microsoft VS project artifact',
'*.dsw': 'Default ignore: Microsoft VS project artifact',
'.editorconfig': 'Default ignore: Editor config artifact',
' Leiningen.gitignore': 'Default ignore: Leiningen artifact',
'.architect': 'Default ignore: ExtJS artifact',
'*.tmproj': 'Default ignore: Textmate artifact',
'*.tmproject': 'Default ignore: Textmate artifact',
}
ignores_web = {
'.htaccess': 'Default ignore: .htaccess file',
'robots.txt': 'Default ignore: robots file',
'humans.txt': 'Default ignore: robots file',
'web.config': 'Default ignore: web config',
'.htaccess.sample': 'Default ignore: .htaccess file',
}
ignores_Maven = {
'pom.xml.tag': 'Default ignore: Maven artifact',
'pom.xml.releaseBackup': 'Default ignore: Maven artifact',
'pom.xml.versionsBackup': 'Default ignore: Maven artifact',
'pom.xml.next': 'Default ignore: Maven artifact',
'release.properties': 'Default ignore: Maven artifact',
'dependency-reduced-pom.xml': 'Default ignore: Maven artifact',
'buildNumber.properties': 'Default ignore: Maven artifact',
}
ignores_VCS = {
'.bzr': 'Default ignore: Bazaar artifact',
'.bzrignore' : 'Default ignore: Bazaar config artifact',
'.git': 'Default ignore: Git artifact',
'.gitignore' : 'Default ignore: Git config artifact',
'.gitattributes': 'Default ignore: Git config artifact',
'.hg': 'Default ignore: Mercurial artifact',
'.hgignore' : 'Default ignore: Mercurial config artifact',
'.repo': 'Default ignore: Multiple Git repository artifact',
'.svn': 'Default ignore: SVN artifact',
'.svnignore': 'Default ignore: SVN config artifact',
'.tfignore': 'Default ignore: Microsft TFS config artifact',
'vssver.scc': 'Default ignore: Visual Source Safe artifact',
'CVS': 'Default ignore: CVS artifact',
'.cvsignore': 'Default ignore: CVS config artifact',
'*/RCS': 'Default ignore: CVS artifact',
'*/SCCS': 'Default ignore: CVS artifact',
'*/_MTN': 'Default ignore: Monotone artifact',
'*/_darcs': 'Default ignore: Darcs artifact',
'*/{arch}': 'Default ignore: GNU Arch artifact',
}
ignores_Medias = {
'pspbrwse.jbf': 'Default ignore: Paintshop browse file',
'Thumbs.db': 'Default ignore: Image thumbnails DB',
'Thumbs.db:encryptable': 'Default ignore: Image thumbnails DB',
'thumbs/': 'Default ignore: Image thumbnails DB',
'_thumbs/': 'Default ignore: Image thumbnails DB',
}
ignores_Build_scripts = {
'Makefile.in': 'Default ignore: automake artifact',
'Makefile.am': 'Default ignore: automake artifact',
'autom4te.cache': 'Default ignore: autoconf artifact',
'*.m4': 'Default ignore: autotools artifact',
'configure': 'Default ignore: Configure script',
'configure.bat': 'Default ignore: Configure script',
'configure.sh': 'Default ignore: Configure script',
'configure.ac': 'Default ignore: Configure script',
'config.guess': 'Default ignore: Configure script',
'config.sub': 'Default ignore: Configure script',
'compile': 'Default ignore: autoconf artifact',
'depcomp': 'Default ignore: autoconf artifact',
'ltmain.sh': 'Default ignore: libtool autoconf artifact',
'install-sh': 'Default ignore: autoconf artifact',
'missing': 'Default ignore: autoconf artifact',
'mkinstalldirs': 'Default ignore: autoconf artifact',
'stamp-h1': 'Default ignore: autoconf artifact',
'm4/': 'Default ignore: autoconf artifact',
'autogen.sh': 'Default ignore: autotools artifact',
'autogen.sh': 'Default ignore: autotools artifact',
'CMakeCache.txt': 'Default ignore: CMake artifact',
'cmake_install.cmake': 'Default ignore: CMake artifact',
'install_manifest.txt': 'Default ignore: CMake artifact',
}
ignores_CI = {
'.travis.yml' : 'Default ignore: Travis config',
'.coveragerc' : 'Default ignore: Coverall config',
}
ignores_Python = {
'pip-selfcheck.json': 'Default ignore: Pip workfile',
'pytest.ini': 'Default ignore: Python pytest config',
'tox.ini': 'Default ignore: Python tox config',
'__pycache__/': 'Default ignore: Python bytecode cache',
'.installed.cfg': 'Default ignore: Python Buildout artifact',
'pip-log.txt': 'Default ignore: Python pip artifact',
'pip-delete-this-directory.txt': 'Default ignore: Python pip artifact',
'pyvenv.cfg': 'Default ignore: Python virtualenv artifact',
}
ignores_I18N = {
'*.mo': 'Default ignore: Translation file',
'*.pot': 'Default ignore: Translation file',
'.localized': 'Default ignore: localized file',
}
ignores_coverage_and_tests = {
'*.gcno': 'Default ignore: GCC coverage',
'*.gcda': 'Default ignore: GCC coverage',
'*.gcov': 'Default ignore: GCC coverage',
'.last_cover_stats': 'Default ignore: Perl coverage',
'htmlcov/': 'Default ignore: Python coverage',
'.tox/': 'Default ignore: Tox tem dir',
'.coverage': 'Default ignore: Python coverage',
'.coverage.*': 'Default ignore: Python coverage',
'nosetests.xml': 'Default ignore: Python nose tests',
'coverage.xml': 'Default ignore: Python coverage',
'/spec/reports/': 'Default ignore: Ruby Rails test report',
'/rdoc/': 'Default ignore: Ruby doc',
'.rvmrc': 'Default ignore: Ruby RVM',
'.sass-cache': 'Default ignore: Saas cache',
'*.css.map': 'Default ignore: Saas map',
'phpunit.xml': 'Default ignore: phpunit',
'*.VisualState.xml': 'Default ignore: Nunit',
'TestResult.xml': 'Default ignore: Nunit',
}
ignores_Misc = {
'pax_global_header': 'Default ignore: Pax header file',
'C++.gitignore': 'Default ignore: C++.gitignore',
'.gwt/': 'Default ignore: GWT compilation logs',
'.gwt-tmp/': 'Default ignore: GWT temp files',
'gradle-app.setting': 'Default ignore: Graddle app settings',
'hs_err_pid*': 'Default ignore: Java VM crash logs',
'.grunt': 'Default ignore: Grunt intermediate storage',
'.history': 'Default ignore: History file',
'.~lock.*#': 'Default ignore: LibreOffice locks',
'/.ssh': 'Default ignore: SSH configuration',
}
default_ignores = {}
default_ignores.update(chain(*[d.items() for d in [
ignores_MacOSX,
ignores_Windows,
ignores_Linux,
ignores_IDEs,
ignores_web,
ignores_Maven,
ignores_VCS,
ignores_Medias,
ignores_Build_scripts,
ignores_CI,
ignores_Python,
ignores_I18N,
ignores_coverage_and_tests,
ignores_Misc,
ignores_Build_scripts,
]]))
|
yashdsaraf/scancode-toolkit
|
src/commoncode/ignore.py
|
Python
|
apache-2.0
| 12,598
|
[
"VisIt"
] |
27a489dd40da44a15d65a30a8d2673403e65a1b001edcb64c31f3da7db4ae7d0
|
import os
import numpy as np
import mmap
import dynaphopy.dynamics as dyn
import warnings
# VASP OUTCAR file parser
def read_vasp_trajectory(file_name, structure=None, time_step=None,
limit_number_steps=10000000, # Maximum number of steps read (for security)
last_steps=None,
initial_cut=1,
end_cut=None,
memmap=False,
template=None):
# warning
warnings.warn('This parser will be deprecated, you can use XDATCAR instead', DeprecationWarning)
# Check file exists
if not os.path.isfile(file_name):
print('Trajectory file does not exist!')
exit()
# Check time step
if time_step is not None:
print('Warning! Time step flag has no effect reading from VASP OUTCAR file (time step will be read from file)')
if memmap:
print('Warning! Memory mapping is not implemented in VASP OUTCAR parser')
# Starting reading
print("Reading VASP trajectory")
print("This could take long, please wait..")
# Dimensionality of VASP calculation
number_of_dimensions = 3
with open(file_name, "r+") as f:
#Memory-map the file
file_map = mmap.mmap(f.fileno(), 0)
position_number=file_map.find(b'NIONS =')
file_map.seek(position_number+7)
number_of_atoms = int(file_map.readline())
#Read time step
position_number=file_map.find(b'POTIM =')
file_map.seek(position_number+8)
time_step = float(file_map.readline().split()[0])* 1E-3 # in picoseconds
#Reading super cell
position_number = file_map.find(b'direct lattice vectors')
file_map.seek(position_number)
file_map.readline()
super_cell = []
for i in range (number_of_dimensions):
super_cell.append(file_map.readline().split()[0:number_of_dimensions])
super_cell = np.array(super_cell, dtype='double')
file_map.seek(position_number)
file_map.readline()
# Check if number of atoms is multiple of cell atoms
if structure is not None:
if number_of_atoms % structure.get_number_of_cell_atoms() != 0:
print('Warning: Number of atoms not matching, check VASP output files')
# structure.set_number_of_atoms(number_of_atoms)
# Read coordinates and energy
trajectory = []
energy = []
counter = 0
while True:
counter +=1
#Initial cut control
if initial_cut > counter:
continue
position_number=file_map.find(b'POSITION')
if position_number < 0 : break
file_map.seek(position_number)
file_map.readline()
file_map.readline()
read_coordinates = []
for i in range (number_of_atoms):
read_coordinates.append(file_map.readline().split()[0:number_of_dimensions])
read_coordinates = np.array(read_coordinates, dtype=float) # in angstrom
if template is not None:
indexing = np.argsort(template)
read_coordinates = read_coordinates[indexing, :]
position_number=file_map.find(b'energy(')
file_map.seek(position_number)
read_energy = file_map.readline().split()[2]
trajectory.append(read_coordinates.flatten()) #in angstrom
energy.append(np.array(read_energy, dtype=float))
#security routine to limit maximum of steps to read and put in memory
if limit_number_steps+initial_cut < counter:
print("Warning! maximum number of steps reached! No more steps will be read")
break
if end_cut is not None and end_cut <= counter:
break
file_map.close()
trajectory = np.array([[[trajectory[i][j*number_of_dimensions+k]
for k in range(number_of_dimensions)]
for j in range(number_of_atoms)]
for i in range (len(trajectory))])
if last_steps is not None:
trajectory = trajectory[-last_steps:,:,:]
energy = energy[-last_steps:]
print('Number of total steps read: {0}'.format(trajectory.shape[0]))
time = np.array([i*time_step for i in range(trajectory.shape[0])], dtype=float)
print('Trajectory file read')
return dyn.Dynamics(structure=structure,
trajectory=np.array(trajectory, dtype=complex),
energy=np.array(energy),
time=time,
supercell=super_cell,
memmap=memmap)
# LAMMPS custom dump file parser
def read_lammps_trajectory(file_name, structure=None, time_step=None,
limit_number_steps=10000000,
last_steps=None,
initial_cut=1,
end_cut=None,
memmap=False,
template=None):
# Time in picoseconds
# Coordinates in Angstroms
# Read environtment variables
try:
temp_directory = os.environ["DYNAPHOPY_TEMPDIR"]
if os.path.isdir(temp_directory):
print('Set temporal directory: {0}'.format(temp_directory))
temp_directory += '/'
else:
temp_directory = ''
except KeyError:
temp_directory = ''
number_of_atoms = None
bounds = None
#Check file exists
if not os.path.isfile(file_name):
print('Trajectory file does not exist!')
exit()
# Check time step
if time_step is None:
print('Warning! LAMMPS trajectory file does not contain time step information')
print('Using default: 0.001 ps')
time_step = 0.001
# Starting reading
print("Reading LAMMPS trajectory")
print("This could take long, please wait..")
# Dimension of LAMMP calculation
if structure is None:
number_of_dimensions = 3
else:
number_of_dimensions = structure.get_number_of_dimensions()
time = []
data = []
counter = 0
lammps_labels = False
with open(file_name, "r+") as f:
file_map = mmap.mmap(f.fileno(), 0)
while True:
counter += 1
#Read time steps
position_number=file_map.find(b'TIMESTEP')
if position_number < 0: break
file_map.seek(position_number)
file_map.readline()
time.append(float(file_map.readline()))
if number_of_atoms is None:
#Read number of atoms
file_map = mmap.mmap(f.fileno(), 0)
position_number=file_map.find(b'NUMBER OF ATOMS')
file_map.seek(position_number)
file_map.readline()
number_of_atoms = int(file_map.readline())
# Check if number of atoms is multiple of cell atoms
if structure is not None:
if number_of_atoms % structure.get_number_of_cell_atoms() != 0:
print('Warning: Number of atoms not matching, check LAMMPS output file')
if bounds is None:
#Read cell
file_map = mmap.mmap(f.fileno(), 0)
position_number=file_map.find(b'BOX BOUNDS')
file_map.seek(position_number)
file_map.readline()
bounds = []
for i in range(3):
bounds.append(file_map.readline().split())
bounds = np.array(bounds, dtype=float)
if bounds.shape[1] == 2:
bounds = np.append(bounds, np.array([0, 0, 0])[None].T ,axis=1)
xy = bounds[0, 2]
xz = bounds[1, 2]
yz = bounds[2, 2]
xlo = bounds[0, 0] - np.min([0.0, xy, xz, xy+xz])
xhi = bounds[0, 1] - np.max([0.0, xy, xz, xy+xz])
ylo = bounds[1, 0] - np.min([0.0, yz])
yhi = bounds[1, 1] - np.max([0.0, yz])
zlo = bounds[2, 0]
zhi = bounds[2, 1]
supercell = np.array([[xhi-xlo, xy, xz],
[0, yhi-ylo, yz],
[0, 0, zhi-zlo]]).T
#for 2D
supercell = supercell[:number_of_dimensions, :number_of_dimensions]
# Testing cell
lx = xhi-xlo
ly = yhi-ylo
lz = zhi-zlo
a = lx
b = np.sqrt(pow(ly,2) + pow(xy,2))
c = np.sqrt(pow(lz,2) + pow(xz,2) + pow(yz,2))
alpha = np.arccos((xy*xz + ly*yz)/(b*c))
beta = np.arccos(xz/c)
gamma = np.arccos(xy/b)
# End testing cell
# rotate lammps supercell to match unitcell orientation
def unit_matrix(matrix):
return np.array([np.array(row)/np.linalg.norm(row) for row in matrix])
unit_structure = unit_matrix(structure.get_cell())
unit_supercell_lammps = unit_matrix(supercell)
transformation_mat = np.dot(np.linalg.inv(unit_structure), unit_supercell_lammps).T
supercell = np.dot(supercell, transformation_mat)
if memmap:
if end_cut:
data = np.memmap(temp_directory+'trajectory.{0}'.format(os.getpid()), dtype='complex', mode='w+', shape=(end_cut - initial_cut+1, number_of_atoms, number_of_dimensions))
else:
print('Memory mapping requires to define reading range (use read_from/read_to option)')
exit()
position_number = file_map.find(b'ITEM: ATOMS')
file_map.seek(position_number)
lammps_labels=file_map.readline()
#Initial cut control
if initial_cut > counter:
time = []
continue
#Reading coordinates
read_coordinates = []
for i in range (number_of_atoms):
read_coordinates.append(file_map.readline().split()[0:number_of_dimensions])
read_coordinates = np.array(read_coordinates, dtype=float)
if template is not None:
indexing = np.argsort(template)
read_coordinates = read_coordinates[indexing, :]
try:
read_coordinates = np.dot(read_coordinates, transformation_mat)
if memmap:
data[counter-initial_cut, :, :] = read_coordinates #in angstroms
else:
data.append(read_coordinates) #in angstroms
except ValueError:
print("Error reading step {0}".format(counter))
break
# print(read_coordinates)
#security routine to limit maximum of steps to read and put in memory
if limit_number_steps+initial_cut < counter:
print("Warning! maximum number of steps reached! No more steps will be read")
break
if end_cut is not None and end_cut <= counter:
break
file_map.close()
time = np.array(time) * time_step
if not memmap:
data = np.array(data, dtype=complex)
if last_steps is not None:
data = data[-last_steps:, :, :]
time = time[-last_steps:]
# Check position/velocity dump
if b'vx vy' in lammps_labels:
return dyn.Dynamics(structure=structure,
velocity=data,
time=time,
supercell=supercell,
memmap=memmap)
if b'x y' in lammps_labels:
return dyn.Dynamics(structure=structure,
trajectory=data,
time=time,
supercell=supercell,
memmap=memmap)
print('LAMMPS parsing error. Data not recognized: {}'.format(lammps_labels))
exit()
def read_VASP_XDATCAR(file_name, structure=None, time_step=None,
limit_number_steps=10000000,
last_steps=None,
initial_cut=1,
end_cut=None,
memmap=False,
template=None):
# Time in picoseconds
# Coordinates in Angstroms
#Read environtment variables
try:
temp_directory = os.environ["DYNAPHOPY_TEMPDIR"]
if os.path.isdir(temp_directory):
print('Set temporal directory: {0}'.format(temp_directory))
temp_directory += '/'
else:
temp_directory = ''
except KeyError:
temp_directory = ''
number_of_atoms = None
bounds = None
#Check file exists
if not os.path.isfile(file_name):
print('Trajectory file does not exist!')
exit()
#Check time step
if time_step is None:
print('Warning! XDATCAR file does not contain time step information')
print('Using default: 0.001 ps')
time_step = 0.001
#Starting reading
print("Reading XDATCAR file")
print("This could take long, please wait..")
#Dimensionality of VASP calculation
number_of_dimensions = 3
time = []
data = []
counter = 0
with open(file_name, "r+b") as f:
file_map = mmap.mmap(f.fileno(), 0)
#Read cell
for i in range(2): file_map.readline()
a = file_map.readline().split()
b = file_map.readline().split()
c = file_map.readline().split()
super_cell = np.array([a, b, c], dtype='double')
for i in range(1): file_map.readline()
number_of_atoms = np.array(file_map.readline().split(), dtype=int).sum()
while True:
counter += 1
#Read time steps
position_number=file_map.find(b'Direct configuration')
if position_number < 0: break
file_map.seek(position_number)
time.append(float(file_map.readline().split(b'=')[1]))
if memmap:
if end_cut:
data = np.memmap(temp_directory+'trajectory.{0}'.format(os.getpid()), dtype='complex', mode='w+', shape=(end_cut - initial_cut+1, number_of_atoms, number_of_dimensions))
else:
print('Memory mapping requires to define reading range (use read_from/read_to option)')
exit()
#Initial cut control
if initial_cut > counter:
continue
#Reading coordinates
read_coordinates = []
for i in range (number_of_atoms):
read_coordinates.append(file_map.readline().split()[0:number_of_dimensions])
read_coordinates = np.array(read_coordinates, dtype=float) # in angstroms
if template is not None:
indexing = np.argsort(template)
read_coordinates = read_coordinates[indexing, :]
try:
if memmap:
data[counter-initial_cut, :, :] = read_coordinates #in angstroms
else:
data.append(read_coordinates) #in angstroms
except ValueError:
print("Error reading step {0}".format(counter))
break
# print(read_coordinates)
#security routine to limit maximum of steps to read and put in memory
if limit_number_steps+initial_cut < counter:
print("Warning! maximum number of steps reached! No more steps will be read")
break
if end_cut is not None and end_cut <= counter:
break
file_map.close()
time = np.array(time) * time_step
if not memmap:
data = np.array(data, dtype=complex)
if last_steps is not None:
data = data[-last_steps:, :, :]
time = time[-last_steps:]
return dyn.Dynamics(structure=structure,
scaled_trajectory=data,
time=time,
supercell=super_cell,
memmap=memmap)
if __name__ == "__main__":
read_VASP_XDATCAR('/home/abel/VASP/MgO/MgO-FINAL/MgO_0.5_1600/No1/XDATCAR')
|
abelcarreras/DynaPhoPy
|
dynaphopy/interface/iofile/trajectory_parsers.py
|
Python
|
mit
| 16,636
|
[
"LAMMPS",
"VASP"
] |
48965cabac27a57ff80c02f4165fa9184ce529fac9bd0b3f2608ca2634ec4fb1
|
# coding: utf-8
#
# Copyright 2012 NAMD-EMAP-FGV
#
# This file is part of PyPLN. You can get more information at: http://pypln.org/.
#
# PyPLN is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyPLN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyPLN. If not, see <http://www.gnu.org/licenses/>.
import nltk
from nltk.collocations import TrigramCollocationFinder
from collections import defaultdict
from pypln.backend.celery_task import PyPLNTask
class Trigrams(PyPLNTask):
"""Create a NLTK trigram finder and returns a table in JSON format"""
def process(self, document):
trigram_measures = nltk.collocations.TrigramAssocMeasures()
metrics = ['chi_sq',
'jaccard',
'likelihood_ratio',
'mi_like',
'pmi',
'poisson_stirling',
'raw_freq',
'student_t']
trigram_finder = TrigramCollocationFinder.from_words(document['tokens'])
tr = defaultdict(lambda: [])
for m in metrics:
for res in trigram_finder.score_ngrams(getattr(trigram_measures,m)):
# We cannot store the trigram as a tuple (mongo keys need to be
# strings). We decided to join tokens using spaces since a
# space will never be in a token.
key = u' '.join(res[0])
# Mongo cannot have `.` or `$` in key names. Unfortunatelly
# this means we need to replace them with placeholders.
key = key.replace(u'$', u'\dollarsign')
key = key.replace(u'.', u'\dot')
tr[key].append(res[1])
return {'trigram_rank': tr, 'metrics':metrics}
|
NAMD/pypln.backend
|
pypln/backend/workers/trigrams.py
|
Python
|
gpl-3.0
| 2,181
|
[
"NAMD"
] |
4064ae0905bebf35c68d57851163d2f62479dc21caa7cb36c4ba8629351668e2
|
"""Rewrite assertion AST to produce nice error messages"""
from __future__ import absolute_import, division, print_function
import ast
import _ast
import errno
import itertools
import imp
import marshal
import os
import re
import struct
import sys
import types
import py
from _pytest.assertion import util
# pytest caches rewritten pycs in __pycache__.
if hasattr(imp, "get_tag"):
PYTEST_TAG = imp.get_tag() + "-PYTEST"
else:
if hasattr(sys, "pypy_version_info"):
impl = "pypy"
elif sys.platform == "java":
impl = "jython"
else:
impl = "cpython"
ver = sys.version_info
PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1])
del ver, impl
PYC_EXT = ".py" + (__debug__ and "c" or "o")
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2)
ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
if sys.version_info >= (3,5):
ast_Call = ast.Call
else:
ast_Call = lambda a,b,c: ast.Call(a, b, c, None, None)
class AssertionRewritingHook(object):
"""PEP302 Import hook which rewrites asserts."""
def __init__(self, config):
self.config = config
self.fnpats = config.getini("python_files")
self.session = None
self.modules = {}
self._rewritten_names = set()
self._register_with_pkg_resources()
self._must_rewrite = set()
def set_session(self, session):
self.session = session
def find_module(self, name, path=None):
state = self.config._assertstate
state.trace("find_module called for: %s" % name)
names = name.rsplit(".", 1)
lastname = names[-1]
pth = None
if path is not None:
# Starting with Python 3.3, path is a _NamespacePath(), which
# causes problems if not converted to list.
path = list(path)
if len(path) == 1:
pth = path[0]
if pth is None:
try:
fd, fn, desc = imp.find_module(lastname, path)
except ImportError:
return None
if fd is not None:
fd.close()
tp = desc[2]
if tp == imp.PY_COMPILED:
if hasattr(imp, "source_from_cache"):
try:
fn = imp.source_from_cache(fn)
except ValueError:
# Python 3 doesn't like orphaned but still-importable
# .pyc files.
fn = fn[:-1]
else:
fn = fn[:-1]
elif tp != imp.PY_SOURCE:
# Don't know what this is.
return None
else:
fn = os.path.join(pth, name.rpartition(".")[2] + ".py")
fn_pypath = py.path.local(fn)
if not self._should_rewrite(name, fn_pypath, state):
return None
self._rewritten_names.add(name)
# The requested module looks like a test file, so rewrite it. This is
# the most magical part of the process: load the source, rewrite the
# asserts, and load the rewritten source. We also cache the rewritten
# module code in a special pyc. We must be aware of the possibility of
# concurrent pytest processes rewriting and loading pycs. To avoid
# tricky race conditions, we maintain the following invariant: The
# cached pyc is always a complete, valid pyc. Operations on it must be
# atomic. POSIX's atomic rename comes in handy.
write = not sys.dont_write_bytecode
cache_dir = os.path.join(fn_pypath.dirname, "__pycache__")
if write:
try:
os.mkdir(cache_dir)
except OSError:
e = sys.exc_info()[1].errno
if e == errno.EEXIST:
# Either the __pycache__ directory already exists (the
# common case) or it's blocked by a non-dir node. In the
# latter case, we'll ignore it in _write_pyc.
pass
elif e in [errno.ENOENT, errno.ENOTDIR]:
# One of the path components was not a directory, likely
# because we're in a zip file.
write = False
elif e in [errno.EACCES, errno.EROFS, errno.EPERM]:
state.trace("read only directory: %r" % fn_pypath.dirname)
write = False
else:
raise
cache_name = fn_pypath.basename[:-3] + PYC_TAIL
pyc = os.path.join(cache_dir, cache_name)
# Notice that even if we're in a read-only directory, I'm going
# to check for a cached pyc. This may not be optimal...
co = _read_pyc(fn_pypath, pyc, state.trace)
if co is None:
state.trace("rewriting %r" % (fn,))
source_stat, co = _rewrite_test(self.config, fn_pypath)
if co is None:
# Probably a SyntaxError in the test.
return None
if write:
_make_rewritten_pyc(state, source_stat, pyc, co)
else:
state.trace("found cached rewritten pyc for %r" % (fn,))
self.modules[name] = co, pyc
return self
def _should_rewrite(self, name, fn_pypath, state):
# always rewrite conftest files
fn = str(fn_pypath)
if fn_pypath.basename == 'conftest.py':
state.trace("rewriting conftest file: %r" % (fn,))
return True
if self.session is not None:
if self.session.isinitpath(fn):
state.trace("matched test file (was specified on cmdline): %r" %
(fn,))
return True
# modules not passed explicitly on the command line are only
# rewritten if they match the naming convention for test files
for pat in self.fnpats:
if fn_pypath.fnmatch(pat):
state.trace("matched test file %r" % (fn,))
return True
for marked in self._must_rewrite:
if name.startswith(marked):
state.trace("matched marked file %r (from %r)" % (name, marked))
return True
return False
def mark_rewrite(self, *names):
"""Mark import names as needing to be re-written.
The named module or package as well as any nested modules will
be re-written on import.
"""
already_imported = set(names).intersection(set(sys.modules))
if already_imported:
for name in already_imported:
if name not in self._rewritten_names:
self._warn_already_imported(name)
self._must_rewrite.update(names)
def _warn_already_imported(self, name):
self.config.warn(
'P1',
'Module already imported so can not be re-written: %s' % name)
def load_module(self, name):
# If there is an existing module object named 'fullname' in
# sys.modules, the loader must use that existing module. (Otherwise,
# the reload() builtin will not work correctly.)
if name in sys.modules:
return sys.modules[name]
co, pyc = self.modules.pop(name)
# I wish I could just call imp.load_compiled here, but __file__ has to
# be set properly. In Python 3.2+, this all would be handled correctly
# by load_compiled.
mod = sys.modules[name] = imp.new_module(name)
try:
mod.__file__ = co.co_filename
# Normally, this attribute is 3.2+.
mod.__cached__ = pyc
mod.__loader__ = self
py.builtin.exec_(co, mod.__dict__)
except:
if name in sys.modules:
del sys.modules[name]
raise
return sys.modules[name]
def is_package(self, name):
try:
fd, fn, desc = imp.find_module(name)
except ImportError:
return False
if fd is not None:
fd.close()
tp = desc[2]
return tp == imp.PKG_DIRECTORY
@classmethod
def _register_with_pkg_resources(cls):
"""
Ensure package resources can be loaded from this loader. May be called
multiple times, as the operation is idempotent.
"""
try:
import pkg_resources
# access an attribute in case a deferred importer is present
pkg_resources.__name__
except ImportError:
return
# Since pytest tests are always located in the file system, the
# DefaultProvider is appropriate.
pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
def get_data(self, pathname):
"""Optional PEP302 get_data API.
"""
with open(pathname, 'rb') as f:
return f.read()
def _write_pyc(state, co, source_stat, pyc):
# Technically, we don't have to have the same pyc format as
# (C)Python, since these "pycs" should never be seen by builtin
# import. However, there's little reason deviate, and I hope
# sometime to be able to use imp.load_compiled to load them. (See
# the comment in load_module above.)
try:
fp = open(pyc, "wb")
except IOError:
err = sys.exc_info()[1].errno
state.trace("error writing pyc file at %s: errno=%s" %(pyc, err))
# we ignore any failure to write the cache file
# there are many reasons, permission-denied, __pycache__ being a
# file etc.
return False
try:
fp.write(imp.get_magic())
mtime = int(source_stat.mtime)
size = source_stat.size & 0xFFFFFFFF
fp.write(struct.pack("<ll", mtime, size))
marshal.dump(co, fp)
finally:
fp.close()
return True
RN = "\r\n".encode("utf-8")
N = "\n".encode("utf-8")
cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")
BOM_UTF8 = '\xef\xbb\xbf'
def _rewrite_test(config, fn):
"""Try to read and rewrite *fn* and return the code object."""
state = config._assertstate
try:
stat = fn.stat()
source = fn.read("rb")
except EnvironmentError:
return None, None
if ASCII_IS_DEFAULT_ENCODING:
# ASCII is the default encoding in Python 2. Without a coding
# declaration, Python 2 will complain about any bytes in the file
# outside the ASCII range. Sadly, this behavior does not extend to
# compile() or ast.parse(), which prefer to interpret the bytes as
# latin-1. (At least they properly handle explicit coding cookies.) To
# preserve this error behavior, we could force ast.parse() to use ASCII
# as the encoding by inserting a coding cookie. Unfortunately, that
# messes up line numbers. Thus, we have to check ourselves if anything
# is outside the ASCII range in the case no encoding is explicitly
# declared. For more context, see issue #269. Yay for Python 3 which
# gets this right.
end1 = source.find("\n")
end2 = source.find("\n", end1 + 1)
if (not source.startswith(BOM_UTF8) and
cookie_re.match(source[0:end1]) is None and
cookie_re.match(source[end1 + 1:end2]) is None):
if hasattr(state, "_indecode"):
# encodings imported us again, so don't rewrite.
return None, None
state._indecode = True
try:
try:
source.decode("ascii")
except UnicodeDecodeError:
# Let it fail in real import.
return None, None
finally:
del state._indecode
# On Python versions which are not 2.7 and less than or equal to 3.1, the
# parser expects *nix newlines.
if REWRITE_NEWLINES:
source = source.replace(RN, N) + N
try:
tree = ast.parse(source)
except SyntaxError:
# Let this pop up again in the real import.
state.trace("failed to parse: %r" % (fn,))
return None, None
rewrite_asserts(tree, fn, config)
try:
co = compile(tree, fn.strpath, "exec", dont_inherit=True)
except SyntaxError:
# It's possible that this error is from some bug in the
# assertion rewriting, but I don't know of a fast way to tell.
state.trace("failed to compile: %r" % (fn,))
return None, None
return stat, co
def _make_rewritten_pyc(state, source_stat, pyc, co):
"""Try to dump rewritten code to *pyc*."""
if sys.platform.startswith("win"):
# Windows grants exclusive access to open files and doesn't have atomic
# rename, so just write into the final file.
_write_pyc(state, co, source_stat, pyc)
else:
# When not on windows, assume rename is atomic. Dump the code object
# into a file specific to this process and atomically replace it.
proc_pyc = pyc + "." + str(os.getpid())
if _write_pyc(state, co, source_stat, proc_pyc):
os.rename(proc_pyc, pyc)
def _read_pyc(source, pyc, trace=lambda x: None):
"""Possibly read a pytest pyc containing rewritten code.
Return rewritten code if successful or None if not.
"""
try:
fp = open(pyc, "rb")
except IOError:
return None
with fp:
try:
mtime = int(source.mtime())
size = source.size()
data = fp.read(12)
except EnvironmentError as e:
trace('_read_pyc(%s): EnvironmentError %s' % (source, e))
return None
# Check for invalid or out of date pyc file.
if (len(data) != 12 or data[:4] != imp.get_magic() or
struct.unpack("<ll", data[4:]) != (mtime, size)):
trace('_read_pyc(%s): invalid or out of date pyc' % source)
return None
try:
co = marshal.load(fp)
except Exception as e:
trace('_read_pyc(%s): marshal.load error %s' % (source, e))
return None
if not isinstance(co, types.CodeType):
trace('_read_pyc(%s): not a code object' % source)
return None
return co
def rewrite_asserts(mod, module_path=None, config=None):
"""Rewrite the assert statements in mod."""
AssertionRewriter(module_path, config).run(mod)
def _saferepr(obj):
"""Get a safe repr of an object for assertion error messages.
The assertion formatting (util.format_explanation()) requires
newlines to be escaped since they are a special character for it.
Normally assertion.util.format_explanation() does this but for a
custom repr it is possible to contain one of the special escape
sequences, especially '\n{' and '\n}' are likely to be present in
JSON reprs.
"""
repr = py.io.saferepr(obj)
if py.builtin._istext(repr):
t = py.builtin.text
else:
t = py.builtin.bytes
return repr.replace(t("\n"), t("\\n"))
from _pytest.assertion.util import format_explanation as _format_explanation # noqa
def _format_assertmsg(obj):
"""Format the custom assertion message given.
For strings this simply replaces newlines with '\n~' so that
util.format_explanation() will preserve them instead of escaping
newlines. For other objects py.io.saferepr() is used first.
"""
# reprlib appears to have a bug which means that if a string
# contains a newline it gets escaped, however if an object has a
# .__repr__() which contains newlines it does not get escaped.
# However in either case we want to preserve the newline.
if py.builtin._istext(obj) or py.builtin._isbytes(obj):
s = obj
is_repr = False
else:
s = py.io.saferepr(obj)
is_repr = True
if py.builtin._istext(s):
t = py.builtin.text
else:
t = py.builtin.bytes
s = s.replace(t("\n"), t("\n~")).replace(t("%"), t("%%"))
if is_repr:
s = s.replace(t("\\n"), t("\n~"))
return s
def _should_repr_global_name(obj):
return not hasattr(obj, "__name__") and not py.builtin.callable(obj)
def _format_boolop(explanations, is_or):
explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")"
if py.builtin._istext(explanation):
t = py.builtin.text
else:
t = py.builtin.bytes
return explanation.replace(t('%'), t('%%'))
def _call_reprcompare(ops, results, expls, each_obj):
for i, res, expl in zip(range(len(ops)), results, expls):
try:
done = not res
except Exception:
done = True
if done:
break
if util._reprcompare is not None:
custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1])
if custom is not None:
return custom
return expl
unary_map = {
ast.Not: "not %s",
ast.Invert: "~%s",
ast.USub: "-%s",
ast.UAdd: "+%s"
}
binop_map = {
ast.BitOr: "|",
ast.BitXor: "^",
ast.BitAnd: "&",
ast.LShift: "<<",
ast.RShift: ">>",
ast.Add: "+",
ast.Sub: "-",
ast.Mult: "*",
ast.Div: "/",
ast.FloorDiv: "//",
ast.Mod: "%%", # escaped for string formatting
ast.Eq: "==",
ast.NotEq: "!=",
ast.Lt: "<",
ast.LtE: "<=",
ast.Gt: ">",
ast.GtE: ">=",
ast.Pow: "**",
ast.Is: "is",
ast.IsNot: "is not",
ast.In: "in",
ast.NotIn: "not in"
}
# Python 3.5+ compatibility
try:
binop_map[ast.MatMult] = "@"
except AttributeError:
pass
# Python 3.4+ compatibility
if hasattr(ast, "NameConstant"):
_NameConstant = ast.NameConstant
else:
def _NameConstant(c):
return ast.Name(str(c), ast.Load())
def set_location(node, lineno, col_offset):
"""Set node location information recursively."""
def _fix(node, lineno, col_offset):
if "lineno" in node._attributes:
node.lineno = lineno
if "col_offset" in node._attributes:
node.col_offset = col_offset
for child in ast.iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, lineno, col_offset)
return node
class AssertionRewriter(ast.NodeVisitor):
"""Assertion rewriting implementation.
The main entrypoint is to call .run() with an ast.Module instance,
this will then find all the assert statements and re-write them to
provide intermediate values and a detailed assertion error. See
http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html
for an overview of how this works.
The entry point here is .run() which will iterate over all the
statements in an ast.Module and for each ast.Assert statement it
finds call .visit() with it. Then .visit_Assert() takes over and
is responsible for creating new ast statements to replace the
original assert statement: it re-writes the test of an assertion
to provide intermediate values and replace it with an if statement
which raises an assertion error with a detailed explanation in
case the expression is false.
For this .visit_Assert() uses the visitor pattern to visit all the
AST nodes of the ast.Assert.test field, each visit call returning
an AST node and the corresponding explanation string. During this
state is kept in several instance attributes:
:statements: All the AST statements which will replace the assert
statement.
:variables: This is populated by .variable() with each variable
used by the statements so that they can all be set to None at
the end of the statements.
:variable_counter: Counter to create new unique variables needed
by statements. Variables are created using .variable() and
have the form of "@py_assert0".
:on_failure: The AST statements which will be executed if the
assertion test fails. This is the code which will construct
the failure message and raises the AssertionError.
:explanation_specifiers: A dict filled by .explanation_param()
with %-formatting placeholders and their corresponding
expressions to use in the building of an assertion message.
This is used by .pop_format_context() to build a message.
:stack: A stack of the explanation_specifiers dicts maintained by
.push_format_context() and .pop_format_context() which allows
to build another %-formatted string while already building one.
This state is reset on every new assert statement visited and used
by the other visitors.
"""
def __init__(self, module_path, config):
super(AssertionRewriter, self).__init__()
self.module_path = module_path
self.config = config
def run(self, mod):
"""Find all assert statements in *mod* and rewrite them."""
if not mod.body:
# Nothing to do.
return
# Insert some special imports at the top of the module but after any
# docstrings and __future__ imports.
aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
ast.alias("_pytest.assertion.rewrite", "@pytest_ar")]
expect_docstring = True
pos = 0
lineno = 0
for item in mod.body:
if (expect_docstring and isinstance(item, ast.Expr) and
isinstance(item.value, ast.Str)):
doc = item.value.s
if "PYTEST_DONT_REWRITE" in doc:
# The module has disabled assertion rewriting.
return
lineno += len(doc) - 1
expect_docstring = False
elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
item.module != "__future__"):
lineno = item.lineno
break
pos += 1
imports = [ast.Import([alias], lineno=lineno, col_offset=0)
for alias in aliases]
mod.body[pos:pos] = imports
# Collect asserts.
nodes = [mod]
while nodes:
node = nodes.pop()
for name, field in ast.iter_fields(node):
if isinstance(field, list):
new = []
for i, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
new.extend(self.visit(child))
else:
new.append(child)
if isinstance(child, ast.AST):
nodes.append(child)
setattr(node, name, new)
elif (isinstance(field, ast.AST) and
# Don't recurse into expressions as they can't contain
# asserts.
not isinstance(field, ast.expr)):
nodes.append(field)
def variable(self):
"""Get a new variable."""
# Use a character invalid in python identifiers to avoid clashing.
name = "@py_assert" + str(next(self.variable_counter))
self.variables.append(name)
return name
def assign(self, expr):
"""Give *expr* a name."""
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.Name(name, ast.Load())
def display(self, expr):
"""Call py.io.saferepr on the expression."""
return self.helper("saferepr", expr)
def helper(self, name, *args):
"""Call a helper in this module."""
py_name = ast.Name("@pytest_ar", ast.Load())
attr = ast.Attribute(py_name, "_" + name, ast.Load())
return ast_Call(attr, list(args), [])
def builtin(self, name):
"""Return the builtin called *name*."""
builtin_name = ast.Name("@py_builtins", ast.Load())
return ast.Attribute(builtin_name, name, ast.Load())
def explanation_param(self, expr):
"""Return a new named %-formatting placeholder for expr.
This creates a %-formatting placeholder for expr in the
current formatting context, e.g. ``%(py0)s``. The placeholder
and expr are placed in the current format context so that it
can be used on the next call to .pop_format_context().
"""
specifier = "py" + str(next(self.variable_counter))
self.explanation_specifiers[specifier] = expr
return "%(" + specifier + ")s"
def push_format_context(self):
"""Create a new formatting context.
The format context is used for when an explanation wants to
have a variable value formatted in the assertion message. In
this case the value required can be added using
.explanation_param(). Finally .pop_format_context() is used
to format a string of %-formatted values as added by
.explanation_param().
"""
self.explanation_specifiers = {}
self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr):
"""Format the %-formatted string with current format context.
The expl_expr should be an ast.Str instance constructed from
the %-placeholders created by .explanation_param(). This will
add the required code to format said string to .on_failure and
return the ast.Name instance of the formatted string.
"""
current = self.stack.pop()
if self.stack:
self.explanation_specifiers = self.stack[-1]
keys = [ast.Str(key) for key in current.keys()]
format_dict = ast.Dict(keys, list(current.values()))
form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
name = "@py_format" + str(next(self.variable_counter))
self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
return ast.Name(name, ast.Load())
def generic_visit(self, node):
"""Handle expressions we don't have custom code for."""
assert isinstance(node, ast.expr)
res = self.assign(node)
return res, self.explanation_param(self.display(res))
def visit_Assert(self, assert_):
"""Return the AST statements to replace the ast.Assert instance.
This re-writes the test of an assertion to provide
intermediate values and replace it with an if statement which
raises an assertion error with a detailed explanation in case
the expression is false.
"""
if isinstance(assert_.test, ast.Tuple) and self.config is not None:
fslocation = (self.module_path, assert_.lineno)
self.config.warn('R1', 'assertion is always true, perhaps '
'remove parentheses?', fslocation=fslocation)
self.statements = []
self.variables = []
self.variable_counter = itertools.count()
self.stack = []
self.on_failure = []
self.push_format_context()
# Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test)
# Create failure message.
body = self.on_failure
negation = ast.UnaryOp(ast.Not(), top_condition)
self.statements.append(ast.If(negation, body, []))
if assert_.msg:
assertmsg = self.helper('format_assertmsg', assert_.msg)
explanation = "\n>assert " + explanation
else:
assertmsg = ast.Str("")
explanation = "assert " + explanation
template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
msg = self.pop_format_context(template)
fmt = self.helper("format_explanation", msg)
err_name = ast.Name("AssertionError", ast.Load())
exc = ast_Call(err_name, [fmt], [])
if sys.version_info[0] >= 3:
raise_ = ast.Raise(exc, None)
else:
raise_ = ast.Raise(exc, None, None)
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
variables = [ast.Name(name, ast.Store())
for name in self.variables]
clear = ast.Assign(variables, _NameConstant(None))
self.statements.append(clear)
# Fix line numbers.
for stmt in self.statements:
set_location(stmt, assert_.lineno, assert_.col_offset)
return self.statements
def visit_Name(self, name):
# Display the repr of the name if it's a local variable or
# _should_repr_global_name() thinks it's acceptable.
locs = ast_Call(self.builtin("locals"), [], [])
inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
dorepr = self.helper("should_repr_global_name", name)
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
def visit_BoolOp(self, boolop):
res_var = self.variable()
expl_list = self.assign(ast.List([], ast.Load()))
app = ast.Attribute(expl_list, "append", ast.Load())
is_or = int(isinstance(boolop.op, ast.Or))
body = save = self.statements
fail_save = self.on_failure
levels = len(boolop.values) - 1
self.push_format_context()
# Process each operand, short-circuting if needed.
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
# cond is set in a prior loop iteration below
self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
expl_format = self.pop_format_context(ast.Str(expl))
call = ast_Call(app, [expl_format], [])
self.on_failure.append(ast.Expr(call))
if i < levels:
cond = res
if is_or:
cond = ast.UnaryOp(ast.Not(), cond)
inner = []
self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner
self.statements = save
self.on_failure = fail_save
expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
expl = self.pop_format_context(expl_template)
return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_res, operand_expl = self.visit(unary.operand)
res = self.assign(ast.UnaryOp(unary.op, operand_res))
return res, pattern % (operand_expl,)
def visit_BinOp(self, binop):
symbol = binop_map[binop.op.__class__]
left_expr, left_expl = self.visit(binop.left)
right_expr, right_expl = self.visit(binop.right)
explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
return res, explanation
def visit_Call_35(self, call):
"""
visit `ast.Call` nodes on Python3.5 and after
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
for arg in call.args:
res, expl = self.visit(arg)
arg_expls.append(expl)
new_args.append(res)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
if keyword.arg:
arg_expls.append(keyword.arg + "=" + expl)
else: ## **args have `arg` keywords with an .arg of None
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
def visit_Starred(self, starred):
# From Python 3.5, a Starred node can appear in a function call
res, expl = self.visit(starred.value)
return starred, '*' + expl
def visit_Call_legacy(self, call):
"""
visit `ast.Call nodes on 3.4 and below`
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
new_star = new_kwarg = None
for arg in call.args:
res, expl = self.visit(arg)
new_args.append(res)
arg_expls.append(expl)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
arg_expls.append(keyword.arg + "=" + expl)
if call.starargs:
new_star, expl = self.visit(call.starargs)
arg_expls.append("*" + expl)
if call.kwargs:
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs,
new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
# ast.Call signature changed on 3.5,
# conditionally change which methods is named
# visit_Call depending on Python version
if sys.version_info >= (3, 5):
visit_Call = visit_Call_35
else:
visit_Call = visit_Call_legacy
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
value, value_expl = self.visit(attr.value)
res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
res_expl = self.explanation_param(self.display(res))
pat = "%s\n{%s = %s.%s\n}"
expl = pat % (res_expl, res_expl, value_expl, attr.attr)
return res, expl
def visit_Compare(self, comp):
self.push_format_context()
left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)):
left_expl = "({0})".format(left_expl)
res_variables = [self.variable() for i in range(len(comp.ops))]
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
expls = []
syms = []
results = [left_res]
for i, op, next_operand in it:
next_res, next_expl = self.visit(next_operand)
if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)):
next_expl = "({0})".format(next_expl)
results.append(next_res)
sym = binop_map[op.__class__]
syms.append(ast.Str(sym))
expl = "%s %s %s" % (left_expl, sym, next_expl)
expls.append(ast.Str(expl))
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
# Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper("call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
ast.Tuple(expls, ast.Load()),
ast.Tuple(results, ast.Load()))
if len(comp.ops) > 1:
res = ast.BoolOp(ast.And(), load_names)
else:
res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call))
|
flub/pytest
|
_pytest/assertion/rewrite.py
|
Python
|
mit
| 36,227
|
[
"VisIt"
] |
3d884ea19b2bc8855298dc2183db9f11356227a7ba34e536471a01bd8e495777
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*******************************************************
**BerendsenThermostat** - Berendsen thermostat Object
*******************************************************
This is the Berendsen thermostat implementation according to the original paper [Berendsen84]_.
If Berendsen thermostat is defined (as a property of integrator) then at the each run the system size
and the particle coordinates will be scaled by scaling parameter :math:`\lambda` according to
the formula:
.. math:: \lambda = [1 + \Delta t/\tau_{T} (T_{0}/T - 1)]^{1/2}
where :math:`\Delta t` - integration timestep, :math:`\tau_{T}` - time parameter (coupling parameter),
:math:`T_{0}` - external temperature and :math:`T` - instantaneous temperature.
Example:
>>> berendsenT = espressopp.integrator.BerendsenThermostat(system)
>>> berendsenT.tau = 1.0
>>> berendsenT.temperature = 1.0
>>> integrator.addExtension(berendsenT)
Definition:
In order to define the Berendsen thermostat
>>> berendsenT = espressopp.integrator.BerendsenThermostat(system)
one should have the System_ defined.
.. _System: espressopp.System.html
Properties:
* *berendsenT.tau*
The property 'tau' defines the time parameter :math:`\tau_{T}`.
* *berendsenT.temperature*
The property 'temperature' defines the external temperature :math:`T_{0}`.
Setting the integration property:
>>> integrator.addExtension(berendsenT)
It will define Berendsen thermostat as a property of integrator.
One more example:
>>> berendsen_thermostat = espressopp.integrator.BerendsenThermostat(system)
>>> berendsen_thermostat.tau = 0.1
>>> berendsen_thermostat.temperature = 3.2
>>> integrator.addExtension(berendsen_thermostat)
Canceling the thermostat:
>>> # define thermostat with parameters
>>> berendsen = espressopp.integrator.BerendsenThermostat(system)
>>> berendsen.tau = 2.0
>>> berendsen.temperature = 5.0
>>> integrator.addExtension(berendsen)
>>> ...
>>> # some runs
>>> ...
>>> # disconnect Berendsen thermostat
>>> berendsen.disconnect()
Connecting the thermostat back after the disconnection
>>> berendsen.connect()
.. function:: espressopp.integrator.BerendsenThermostat(system)
:param system:
:type system:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.Extension import *
from _espressopp import integrator_BerendsenThermostat
class BerendsenThermostatLocal(ExtensionLocal, integrator_BerendsenThermostat):
def __init__(self, system):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or \
pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, integrator_BerendsenThermostat, system)
if pmi.isController:
class BerendsenThermostat(Extension):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.integrator.BerendsenThermostatLocal',
pmiproperty = [ 'tau', 'temperature' ]
)
|
capoe/espressopp.soap
|
src/integrator/BerendsenThermostat.py
|
Python
|
gpl-3.0
| 3,904
|
[
"ESPResSo"
] |
373911c11e751ed1d3182ac17149c119849b1120d922b7f6859f63ee947625c7
|
#!/usr/bin/env python3.2
'''
The goal here is to take any VCF file output by VCFannotator and summarize the SNPs
contained there by type.
INPUT
The input expected is from the VCFannotator documentation:
An example output line transposed to a column format would look like so (taken from
the sample data):
0 TY-2482_chromosome
1 5080
2 .
3 T
4 C
5 8101.55
6 PASS
7 AC=2;AF=1.00;AN=2;DP=212;Dels=0.00;FS=0.000;HRun=1;HaplotypeScore=0.0000;MQ=59.52;MQ0=0;QD=38.21;SB=-4130.96
8 GT:AD:DP:GQ:PL
9 1/1:0,212:212:99:8101,605,0
10 CDS,tmpTY2482_00008,tmpTY2482_00008T0,microcin H47 immunity protein mchI,trans_orient:+,loc_in_cds:46,codon_pos:1,codon:Tct-Cct,pep:S->P,Ser-16-Pro,(NSY)
The final field is bundled with the individual feature annotation data, comma-delimited.
This includes the feature type (eg. CDS, intron, UTR), gene and transcript identifiers,
name of the gene, and the transcribed orientation of the gene. If the SNP is localized
to a coding region, then the relative position within that CDS sequence is provided in
addition to the codon change. The types of coding mutations are provided as synonomous
(SYN), non-synonomous (NSY), read-thru (RTH), and nonsence (STP). SNPs that are localized
to intergenic regions are reported as such along with the identifiers of the neighboring
genes and distance to each.
This isn't directly followed by Brian's VCFannotator, but for my own notes these are the SNP types:
- Non-coding region
- Coding region
- Synonymous
- Nonsynonymous
- Missense (results in a different amino acid, but not one of the three below)
- Nonsense (results in a premature stop codon)
- Read-through (releases a stop codon)
- Initiating (translation-initiating fMet is changed)
'''
import argparse
import biocodeutils
import gzip
import matplotlib.pyplot as plt
import os
import re
def main():
parser = argparse.ArgumentParser( description='Put a description of your script here')
## output file to be written
parser.add_argument('-v', '--vcf_file', type=str, required=True, help='Input VCFannotated file' )
args = parser.parse_args()
file = args.vcf_file
file_is_encoded = False
fh = None
if file.endswith(".gz"):
fh = gzip.GzipFile(file, "r")
file_is_encoded = True
else:
fh = open(file)
total_snp_c = 0
cds_snp_c = 0
intron_snp_c = 0
intergenic_snp_c = 0
## these are each subcategories of SNPs within a CDS
syn_c = 0
nonsyn_c = 0
readthru_c = 0
nonsense_c = 0
other_c = 0
for line in fh:
if file_is_encoded:
line = line.decode().rstrip()
else:
line = line.rstrip()
cols = line.split("\t")
if line.startswith("#"):
continue
cols = line.split("\t")
## VCF annotated files should all have 10 columns.
if len(cols) < 10:
raise Exception("ERROR: expected all non-comment lines to have 10 columns")
annot_col = cols[10]
snp_loc = None
snp_type = None
total_snp_c += 1
if annot_col.startswith("intergenic"):
intergenic_snp_c += 1
elif annot_col.startswith("intron"):
intron_snp_c += 1
elif annot_col.startswith("CDS"):
cds_snp_c += 1
cds_type = annot_col.split(",")[-1]
if cds_type == "(SYN)":
syn_c += 1
elif cds_type == "(NSY)":
nonsyn_c += 1
elif cds_type == "(RTH)":
readthru_c += 1
elif cds_type == "(STP)":
nonsense_c += 1
else:
other_c += 1
else:
raise Exception("ERROR: Unexpected SNP type at beginning of column: {0}".format(annot_col) )
print("Total SNPs: {}".format(total_snp_c) )
print("Intergenic: {}".format(intergenic_snp_c) )
print("Intronic : {}".format(intron_snp_c) )
print("Within CDS: {}".format(cds_snp_c) )
print("\tSynonymous : {}".format(syn_c) )
print("\tNon-synonymous: {}".format(nonsyn_c) )
print("\tRead-through : {}".format(readthru_c) )
print("\tNonsense : {}".format(nonsense_c) )
if __name__ == '__main__':
main()
|
jonathancrabtree/biocode
|
sandbox/jorvis/summarize_vcfannotator.py
|
Python
|
gpl-3.0
| 4,385
|
[
"Brian"
] |
a7b9567d313298d594e472f4fd79fe742062e69b6174bcea065f701a37006e3b
|
"""
# Notes:
- This simulation seeks to emulate the COBAHH benchmark simulations of (Brette
et al. 2007) using the Brian2 simulator for speed benchmark comparison to
DynaSim. However, this simulation does NOT include synapses, for better
comparison to Figure 5 of (Goodman and Brette, 2008) - although it uses the
COBAHH model of (Brette et al. 2007), not CUBA.
- The time taken to simulate will be indicated in the stdout log file
'~/batchdirs/brian2_benchmark_COBAHH_nosyn_compiled_0032/pbsout/brian2_benchmark_COBAHH_nosyn_compiled_0032.out'
- Note that this code has been slightly modified from the original (Brette et
al. 2007) benchmarking code, available here on ModelDB:
https://senselab.med.yale.edu/modeldb/showModel.cshtml?model=83319
in order to work with version 2 of the Brian simulator (aka Brian2), and also
modified to change the model being benchmarked, etc.
# References:
- Brette R, Rudolph M, Carnevale T, Hines M, Beeman D, Bower JM, et al.
Simulation of networks of spiking neurons: A review of tools and strategies.
Journal of Computational Neuroscience 2007;23:349–98.
doi:10.1007/s10827-007-0038-6.
- Goodman D, Brette R. Brian: a simulator for spiking neural networks in Python.
Frontiers in Neuroinformatics 2008;2. doi:10.3389/neuro.11.005.2008.
"""
from brian2 import *
set_device('cpp_standalone')
prefs.codegen.cpp.extra_compile_args = ['-w', '-O3', '-ffast-math', '-march=native']
# Parameters
cells = 32
defaultclock.dt = 0.01*ms
area = 20000*umetre**2
Cm = (1*ufarad*cmetre**-2) * area
gl = (5e-5*siemens*cmetre**-2) * area
El = -60*mV
EK = -90*mV
ENa = 50*mV
g_na = (100*msiemens*cmetre**-2) * area
g_kd = (30*msiemens*cmetre**-2) * area
VT = -63*mV
# # Time constants
# taue = 5*ms
# taui = 10*ms
# # Reversal potentials
# Ee = 0*mV
# Ei = -80*mV
# we = 6*nS # excitatory synaptic weight
# wi = 67*nS # inhibitory synaptic weight
# The model
eqs = Equations('''
dv/dt = (gl*(El-v)-
g_na*(m*m*m)*h*(v-ENa)-
g_kd*(n*n*n*n)*(v-EK))/Cm : volt
dm/dt = alpha_m*(1-m)-beta_m*m : 1
dn/dt = alpha_n*(1-n)-beta_n*n : 1
dh/dt = alpha_h*(1-h)-beta_h*h : 1
alpha_m = 0.32*(mV**-1)*(13*mV-v+VT)/
(exp((13*mV-v+VT)/(4*mV))-1.)/ms : Hz
beta_m = 0.28*(mV**-1)*(v-VT-40*mV)/
(exp((v-VT-40*mV)/(5*mV))-1)/ms : Hz
alpha_h = 0.128*exp((17*mV-v+VT)/(18*mV))/ms : Hz
beta_h = 4./(1+exp((40*mV-v+VT)/(5*mV)))/ms : Hz
alpha_n = 0.032*(mV**-1)*(15*mV-v+VT)/
(exp((15*mV-v+VT)/(5*mV))-1.)/ms : Hz
beta_n = .5*exp((10*mV-v+VT)/(40*mV))/ms : Hz
''')
# dv/dt = (gl*(El-v)+ge*(Ee-v)+gi*(Ei-v)-
# dge/dt = -ge*(1./taue) : siemens
# dgi/dt = -gi*(1./taui) : siemens
P = NeuronGroup(cells, model=eqs, threshold='v>-20*mV', refractory=3*ms,
method='euler')
proportion=int(0.8*cells)
Pe = P[:proportion]
Pi = P[proportion:]
# Ce = Synapses(Pe, P, on_pre='ge+=we')
# Ci = Synapses(Pi, P, on_pre='gi+=wi')
# Ce.connect(p=0.98)
# Ci.connect(p=0.98)
# Initialization
P.v = 'El + (randn() * 5 - 5)*mV'
# P.ge = '(randn() * 1.5 + 4) * 10.*nS'
# P.gi = '(randn() * 12 + 20) * 10.*nS'
# Record a few traces
trace = StateMonitor(P, 'v', record=[1, 10, 100])
totaldata = StateMonitor(P, 'v', record=True)
run(0.5 * second, report='text')
# plot(trace.t/ms, trace[1].v/mV)
# plot(trace.t/ms, trace[10].v/mV)
# plot(trace.t/ms, trace[100].v/mV)
# xlabel('t (ms)')
# ylabel('v (mV)')
# show()
# print("Saving TC cell voltages!")
# numpy.savetxt("foo_totaldata.csv", totaldata.v/mV, delimiter=",")
|
asoplata/dynasim-benchmark-brette-2007
|
Brian2/brian2_benchmark_COBAHH_nosyn_compiled_0032.py
|
Python
|
gpl-3.0
| 3,482
|
[
"Brian"
] |
627cc15e91c947e6c4a5ecc657860a9d808ea83cedd790d27983154325058202
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import unittest
import mock
import subprocess
import mooseutils
class Test(unittest.TestCase):
@mock.patch('subprocess.call')
def testRun(self, subproc):
mooseutils.run_executable('command', '-arg', '-arg2')
subproc.assert_called_with(['command', '-arg', '-arg2'], encoding='utf-8')
@mock.patch('subprocess.call')
def testRunMPI(self, subproc):
mooseutils.run_executable('command', '-arg', '-arg2', mpi=2)
subproc.assert_called_with(['mpiexec', '-n', '2', 'command', '-arg', '-arg2'], encoding='utf-8')
@mock.patch('subprocess.call')
def testRunSupressOutput(self, subproc):
mooseutils.run_executable('command', '-arg', '-arg2', suppress_output=True)
subproc.assert_called_with(['command', '-arg', '-arg2'], encoding='utf-8',
stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
if __name__ == '__main__':
unittest.main(verbosity=2, buffer=True)
|
harterj/moose
|
python/mooseutils/tests/test_run_executable.py
|
Python
|
lgpl-2.1
| 1,293
|
[
"MOOSE"
] |
98c5b648bce5aa9e102698c70dd520e5171390e61a9c315bcecfdd69ca9167fb
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The exponentially modified Gaussian distribution class."""
# Dependency imports
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import identity as identity_bijector
from tensorflow_probability.python.bijectors import softplus as softplus_bijector
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import exponential as exponential_lib
from tensorflow_probability.python.distributions import normal as normal_lib
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import special_math
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.math import generic as tfp_math
__all__ = [
'ExponentiallyModifiedGaussian',
]
class ExponentiallyModifiedGaussian(
distribution.AutoCompositeTensorDistribution):
"""Exponentially modified Gaussian distribution.
#### Mathematical details
The exponentially modified Gaussian distribution is the sum of a normal
distribution and an exponential distribution.
```none
X ~ Normal(loc, scale)
Y ~ Exponential(rate)
Z = X + Y
```
is equivalent to
```none
Z ~ ExponentiallyModifiedGaussian(loc, scale, rate)
```
#### Examples
```python
tfd = tfp.distributions
# Define a single scalar ExponentiallyModifiedGaussian distribution
dist = tfd.ExponentiallyModifiedGaussian(loc=0., scale=1., rate=3.)
# Evaluate the pdf at 1, returing a scalar.
dist.prob(1.)
```
"""
def __init__(self,
loc,
scale,
rate,
validate_args=False,
allow_nan_stats=True,
name='ExponentiallyModifiedGaussian'):
"""Construct an exponentially-modified Gaussian distribution.
The Gaussian distribution has mean `loc` and stddev `scale`,
and Exponential distribution has rate parameter `rate`.
The parameters `loc`, `scale`, and `rate` must be shaped in a way that
supports broadcasting (e.g. `loc + scale + rate` is a valid operation).
Args:
loc: Floating-point `Tensor`; the means of the distribution(s).
scale: Floating-point `Tensor`; the stddevs of the distribution(s). Must
contain only positive values.
rate: Floating-point `Tensor`; the rate parameter for the exponential
distribution.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or more
of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `loc`, `scale`, and `rate` are not all the same `dtype`.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([loc, scale, rate], dtype_hint=tf.float32)
self._loc = tensor_util.convert_nonref_to_tensor(
loc, dtype=dtype, name='loc')
self._scale = tensor_util.convert_nonref_to_tensor(
scale, dtype=dtype, name='scale')
self._rate = tensor_util.convert_nonref_to_tensor(
rate, dtype=dtype, name='rate')
super(ExponentiallyModifiedGaussian, self).__init__(
dtype=dtype,
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(('loc', 'scale', 'rate'),
([tf.convert_to_tensor(sample_shape, dtype=tf.int32)] * 3)))
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
return dict(
loc=parameter_properties.ParameterProperties(),
scale=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))),
rate=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))))
@property
def loc(self):
"""Distribution parameter for the mean of the normal distribution."""
return self._loc
@property
def scale(self):
"""Distribution parameter for standard deviation of the normal distribution."""
return self._scale
@property
def rate(self):
"""Distribution parameter for rate parameter of exponential distribution."""
return self._rate
def _event_shape_tensor(self):
return tf.constant([], dtype=tf.int32)
def _event_shape(self):
return tf.TensorShape([])
def _sample_n(self, n, seed=None):
normal_seed, exp_seed = samplers.split_seed(seed, salt='emg_sample')
# need to make sure component distributions are broadcast appropriately
# for correct generation of samples
loc = tf.convert_to_tensor(self.loc)
rate = tf.convert_to_tensor(self.rate)
scale = tf.convert_to_tensor(self.scale)
batch_shape = self._batch_shape_tensor(loc=loc, scale=scale, rate=rate)
loc_broadcast = tf.broadcast_to(loc, batch_shape)
rate_broadcast = tf.broadcast_to(rate, batch_shape)
normal_dist = normal_lib.Normal(loc=loc_broadcast, scale=scale)
exp_dist = exponential_lib.Exponential(rate_broadcast)
x = normal_dist.sample(n, normal_seed)
y = exp_dist.sample(n, exp_seed)
return x + y
def _log_prob(self, x):
loc = tf.convert_to_tensor(self.loc)
rate = tf.convert_to_tensor(self.rate)
scale = tf.convert_to_tensor(self.scale)
two = dtype_util.as_numpy_dtype(x.dtype)(2.)
z = (x - loc) / scale
w = rate * scale
return (tf.math.log(rate) + w / two * (w - 2 * z) +
special_math.log_ndtr(z - w))
def _log_cdf(self, x):
rate = tf.convert_to_tensor(self.rate)
scale = tf.convert_to_tensor(self.scale)
x_centralized = x - self.loc
u = rate * x_centralized
v = rate * scale
vsquared = tf.square(v)
return tfp_math.log_sub_exp(
special_math.log_ndtr(x_centralized / scale),
-u + vsquared / 2. + special_math.log_ndtr((u - vsquared) / v))
def _mean(self):
return tf.broadcast_to(
self.loc + 1 / self.rate, self._batch_shape_tensor())
def _variance(self):
return tf.broadcast_to(
tf.square(self.scale) + 1 / tf.square(self.rate),
self._batch_shape_tensor())
def _parameter_control_dependencies(self, is_init):
assertions = []
if is_init:
try:
self._batch_shape()
except ValueError:
raise ValueError(
'Arguments `loc`, `scale`, and `rate` must have compatible shapes; '
'loc.shape={}, scale.shape={}, rate.shape={}.'.format(
self.loc.shape, self.scale.shape, self.rate.shape))
# We don't bother checking the shapes in the dynamic case because
# all member functions access both arguments anyway.
if not self.validate_args:
assert not assertions # Should never happen.
return []
if is_init != tensor_util.is_ref(self.scale):
assertions.append(assert_util.assert_positive(
self.scale, message='Argument `scale` must be positive.'))
if is_init != tensor_util.is_ref(self.rate):
assertions.append(assert_util.assert_positive(
self.rate, message='Argument `rate` must be positive.'))
return assertions
def _default_event_space_bijector(self):
return identity_bijector.Identity(validate_args=self.validate_args)
|
tensorflow/probability
|
tensorflow_probability/python/distributions/exponentially_modified_gaussian.py
|
Python
|
apache-2.0
| 8,896
|
[
"Gaussian"
] |
ca79173a9d0f3c2b1e7519c7b78c19ba949c72ab354ee156cc9bcd2fc21f47a4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 on 2017-03-22.
# 2017, SMART Health IT.
import io
import json
import os
import unittest
from . import questionnaireresponse
from .fhirdate import FHIRDate
class QuestionnaireResponseTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("QuestionnaireResponse", js["resourceType"])
return questionnaireresponse.QuestionnaireResponse(js)
def testQuestionnaireResponse1(self):
inst = self.instantiate_from("questionnaireresponse-example-bluebook.json")
self.assertIsNotNone(inst, "Must have instantiated a QuestionnaireResponse instance")
self.implQuestionnaireResponse1(inst)
js = inst.as_json()
self.assertEqual("QuestionnaireResponse", js["resourceType"])
inst2 = questionnaireresponse.QuestionnaireResponse(js)
self.implQuestionnaireResponse1(inst2)
def implQuestionnaireResponse1(self, inst):
self.assertEqual(inst.authored.date, FHIRDate("2013-02-19T14:15:00+10:00").date)
self.assertEqual(inst.authored.as_json(), "2013-02-19T14:15:00+10:00")
self.assertEqual(inst.id, "bb")
self.assertEqual(inst.item[0].item[0].item[0].answer[0].valueString, "Cathy Jones")
self.assertEqual(inst.item[0].item[0].item[0].linkId, "nameOfChild")
self.assertEqual(inst.item[0].item[0].item[0].text, "Name of child")
self.assertEqual(inst.item[0].item[0].item[1].answer[0].valueCoding.code, "f")
self.assertEqual(inst.item[0].item[0].item[1].linkId, "sex")
self.assertEqual(inst.item[0].item[0].item[1].text, "Sex")
self.assertEqual(inst.item[0].item[0].linkId, "group")
self.assertEqual(inst.item[0].item[1].item[0].answer[0].valueDecimal, 3.25)
self.assertEqual(inst.item[0].item[1].item[0].linkId, "birthWeight")
self.assertEqual(inst.item[0].item[1].item[0].text, "Birth weight (kg)")
self.assertEqual(inst.item[0].item[1].item[1].answer[0].valueDecimal, 44.3)
self.assertEqual(inst.item[0].item[1].item[1].linkId, "birthLength")
self.assertEqual(inst.item[0].item[1].item[1].text, "Birth length (cm)")
self.assertEqual(inst.item[0].item[1].item[2].answer[0].item[0].item[0].answer[0].valueDate.date, FHIRDate("1972-11-30").date)
self.assertEqual(inst.item[0].item[1].item[2].answer[0].item[0].item[0].answer[0].valueDate.as_json(), "1972-11-30")
self.assertEqual(inst.item[0].item[1].item[2].answer[0].item[0].item[0].linkId, "vitaminKDose1")
self.assertEqual(inst.item[0].item[1].item[2].answer[0].item[0].item[0].text, "1st dose")
self.assertEqual(inst.item[0].item[1].item[2].answer[0].item[0].item[1].answer[0].valueDate.date, FHIRDate("1972-12-11").date)
self.assertEqual(inst.item[0].item[1].item[2].answer[0].item[0].item[1].answer[0].valueDate.as_json(), "1972-12-11")
self.assertEqual(inst.item[0].item[1].item[2].answer[0].item[0].item[1].linkId, "vitaminKDose2")
self.assertEqual(inst.item[0].item[1].item[2].answer[0].item[0].item[1].text, "2nd dose")
self.assertEqual(inst.item[0].item[1].item[2].answer[0].item[0].linkId, "vitaminKgivenDoses")
self.assertEqual(inst.item[0].item[1].item[2].answer[0].valueCoding.code, "INJECTION")
self.assertEqual(inst.item[0].item[1].item[2].linkId, "vitaminKgiven")
self.assertEqual(inst.item[0].item[1].item[2].text, "Vitamin K given")
self.assertEqual(inst.item[0].item[1].item[3].answer[0].item[0].answer[0].valueDate.date, FHIRDate("1972-12-04").date)
self.assertEqual(inst.item[0].item[1].item[3].answer[0].item[0].answer[0].valueDate.as_json(), "1972-12-04")
self.assertEqual(inst.item[0].item[1].item[3].answer[0].item[0].linkId, "hepBgivenDate")
self.assertEqual(inst.item[0].item[1].item[3].answer[0].item[0].text, "Date given")
self.assertTrue(inst.item[0].item[1].item[3].answer[0].valueBoolean)
self.assertEqual(inst.item[0].item[1].item[3].linkId, "hepBgiven")
self.assertEqual(inst.item[0].item[1].item[3].text, "Hep B given y / n")
self.assertEqual(inst.item[0].item[1].item[4].answer[0].valueString, "Already able to speak Chinese")
self.assertEqual(inst.item[0].item[1].item[4].linkId, "abnormalitiesAtBirth")
self.assertEqual(inst.item[0].item[1].item[4].text, "Abnormalities noted at birth")
self.assertEqual(inst.item[0].item[1].linkId, "neonatalInformation")
self.assertEqual(inst.item[0].item[1].text, "Neonatal Information")
self.assertEqual(inst.item[0].linkId, "birthDetails")
self.assertEqual(inst.item[0].text, "Birth details - To be completed by health professional")
self.assertEqual(inst.status, "completed")
self.assertEqual(inst.text.status, "generated")
def testQuestionnaireResponse2(self):
inst = self.instantiate_from("questionnaireresponse-example-f201-lifelines.json")
self.assertIsNotNone(inst, "Must have instantiated a QuestionnaireResponse instance")
self.implQuestionnaireResponse2(inst)
js = inst.as_json()
self.assertEqual("QuestionnaireResponse", js["resourceType"])
inst2 = questionnaireresponse.QuestionnaireResponse(js)
self.implQuestionnaireResponse2(inst2)
def implQuestionnaireResponse2(self, inst):
self.assertEqual(inst.authored.date, FHIRDate("2013-06-18T00:00:00+01:00").date)
self.assertEqual(inst.authored.as_json(), "2013-06-18T00:00:00+01:00")
self.assertEqual(inst.id, "f201")
self.assertEqual(inst.item[0].item[0].answer[0].valueString, "I am allergic to house dust")
self.assertEqual(inst.item[0].item[0].linkId, "1.1")
self.assertEqual(inst.item[0].item[0].text, "Do you have allergies?")
self.assertEqual(inst.item[0].linkId, "1")
self.assertEqual(inst.item[1].item[0].answer[0].valueString, "Male")
self.assertEqual(inst.item[1].item[0].linkId, "2.1")
self.assertEqual(inst.item[1].item[0].text, "What is your gender?")
self.assertEqual(inst.item[1].item[1].answer[0].valueDate.date, FHIRDate("1960-03-13").date)
self.assertEqual(inst.item[1].item[1].answer[0].valueDate.as_json(), "1960-03-13")
self.assertEqual(inst.item[1].item[1].linkId, "2.2")
self.assertEqual(inst.item[1].item[1].text, "What is your date of birth?")
self.assertEqual(inst.item[1].item[2].answer[0].valueString, "The Netherlands")
self.assertEqual(inst.item[1].item[2].linkId, "2.3")
self.assertEqual(inst.item[1].item[2].text, "What is your country of birth?")
self.assertEqual(inst.item[1].item[3].answer[0].valueString, "married")
self.assertEqual(inst.item[1].item[3].linkId, "2.4")
self.assertEqual(inst.item[1].item[3].text, "What is your marital status?")
self.assertEqual(inst.item[1].linkId, "2")
self.assertEqual(inst.item[1].text, "General questions")
self.assertEqual(inst.item[2].item[0].answer[0].valueString, "No")
self.assertEqual(inst.item[2].item[0].linkId, "3.1")
self.assertEqual(inst.item[2].item[0].text, "Do you smoke?")
self.assertEqual(inst.item[2].item[1].answer[0].valueString, "No, but I used to drink")
self.assertEqual(inst.item[2].item[1].linkId, "3.2")
self.assertEqual(inst.item[2].item[1].text, "Do you drink alchohol?")
self.assertEqual(inst.item[2].linkId, "3")
self.assertEqual(inst.item[2].text, "Intoxications")
self.assertEqual(inst.status, "completed")
self.assertEqual(inst.text.status, "generated")
def testQuestionnaireResponse3(self):
inst = self.instantiate_from("questionnaireresponse-example-gcs.json")
self.assertIsNotNone(inst, "Must have instantiated a QuestionnaireResponse instance")
self.implQuestionnaireResponse3(inst)
js = inst.as_json()
self.assertEqual("QuestionnaireResponse", js["resourceType"])
inst2 = questionnaireresponse.QuestionnaireResponse(js)
self.implQuestionnaireResponse3(inst2)
def implQuestionnaireResponse3(self, inst):
self.assertEqual(inst.authored.date, FHIRDate("2014-12-11T04:44:16Z").date)
self.assertEqual(inst.authored.as_json(), "2014-12-11T04:44:16Z")
self.assertEqual(inst.id, "gcs")
self.assertEqual(inst.item[0].answer[0].valueCoding.code, "LA6560-2")
self.assertEqual(inst.item[0].answer[0].valueCoding.display, "Confused")
self.assertEqual(inst.item[0].answer[0].valueCoding.extension[0].url, "http://hl7.org/fhir/StructureDefinition/iso21090-CO-value")
self.assertEqual(inst.item[0].answer[0].valueCoding.extension[0].valueDecimal, 4)
self.assertEqual(inst.item[0].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[0].linkId, "1.1")
self.assertEqual(inst.item[1].answer[0].valueCoding.code, "LA6566-9")
self.assertEqual(inst.item[1].answer[0].valueCoding.display, "Localizing pain")
self.assertEqual(inst.item[1].answer[0].valueCoding.extension[0].url, "http://hl7.org/fhir/StructureDefinition/iso21090-CO-value")
self.assertEqual(inst.item[1].answer[0].valueCoding.extension[0].valueDecimal, 5)
self.assertEqual(inst.item[1].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[1].linkId, "1.2")
self.assertEqual(inst.item[2].answer[0].valueCoding.code, "LA6556-0")
self.assertEqual(inst.item[2].answer[0].valueCoding.display, "Eyes open spontaneously")
self.assertEqual(inst.item[2].answer[0].valueCoding.extension[0].url, "http://hl7.org/fhir/StructureDefinition/iso21090-CO-value")
self.assertEqual(inst.item[2].answer[0].valueCoding.extension[0].valueDecimal, 4)
self.assertEqual(inst.item[2].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].linkId, "1.3")
self.assertEqual(inst.status, "completed")
self.assertEqual(inst.text.status, "generated")
def testQuestionnaireResponse4(self):
inst = self.instantiate_from("questionnaireresponse-example-ussg-fht-answers.json")
self.assertIsNotNone(inst, "Must have instantiated a QuestionnaireResponse instance")
self.implQuestionnaireResponse4(inst)
js = inst.as_json()
self.assertEqual("QuestionnaireResponse", js["resourceType"])
inst2 = questionnaireresponse.QuestionnaireResponse(js)
self.implQuestionnaireResponse4(inst2)
def implQuestionnaireResponse4(self, inst):
self.assertEqual(inst.authored.date, FHIRDate("2008-01-17").date)
self.assertEqual(inst.authored.as_json(), "2008-01-17")
self.assertEqual(inst.id, "ussg-fht-answers")
self.assertEqual(inst.item[0].item[0].answer[0].valueDate.date, FHIRDate("2008-01-17").date)
self.assertEqual(inst.item[0].item[0].answer[0].valueDate.as_json(), "2008-01-17")
self.assertEqual(inst.item[0].item[0].linkId, "0.1")
self.assertEqual(inst.item[0].item[0].text, "Date Done")
self.assertEqual(inst.item[0].linkId, "0")
self.assertEqual(inst.item[1].definition, "http://loinc.org/fhir/DataElement/54126-8")
self.assertEqual(inst.item[1].item[0].item[0].answer[0].valueString, "Annie Proband")
self.assertEqual(inst.item[1].item[0].item[0].definition, "http://loinc.org/fhir/DataElement/54125-0")
self.assertEqual(inst.item[1].item[0].item[0].linkId, "1.1.1")
self.assertEqual(inst.item[1].item[0].item[0].text, "Name")
self.assertEqual(inst.item[1].item[0].item[1].answer[0].valueCoding.code, "LA3-6")
self.assertEqual(inst.item[1].item[0].item[1].answer[0].valueCoding.display, "Female")
self.assertEqual(inst.item[1].item[0].item[1].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[1].item[0].item[1].definition, "http://loinc.org/fhir/DataElement/54131-8")
self.assertEqual(inst.item[1].item[0].item[1].linkId, "1.1.2")
self.assertEqual(inst.item[1].item[0].item[1].text, "Gender")
self.assertEqual(inst.item[1].item[0].item[2].answer[0].valueDate.date, FHIRDate("1966-04-04").date)
self.assertEqual(inst.item[1].item[0].item[2].answer[0].valueDate.as_json(), "1966-04-04")
self.assertEqual(inst.item[1].item[0].item[2].definition, "http://loinc.org/fhir/DataElement/21112-8")
self.assertEqual(inst.item[1].item[0].item[2].linkId, "1.1.3")
self.assertEqual(inst.item[1].item[0].item[2].text, "Date of Birth")
self.assertEqual(inst.item[1].item[0].item[3].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[1].item[0].item[3].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[1].item[0].item[3].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[1].item[0].item[3].definition, "http://loinc.org/fhir/DataElement/54132-6")
self.assertEqual(inst.item[1].item[0].item[3].linkId, "1.1.4")
self.assertEqual(inst.item[1].item[0].item[3].text, "Were you born a twin?")
self.assertEqual(inst.item[1].item[0].item[4].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[1].item[0].item[4].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[1].item[0].item[4].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[1].item[0].item[4].definition, "http://loinc.org/fhir/DataElement/54128-4")
self.assertEqual(inst.item[1].item[0].item[4].linkId, "1.1.5")
self.assertEqual(inst.item[1].item[0].item[4].text, "Were you adopted?")
self.assertEqual(inst.item[1].item[0].item[5].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[1].item[0].item[5].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[1].item[0].item[5].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[1].item[0].item[5].definition, "http://loinc.org/fhir/DataElement/54135-9")
self.assertEqual(inst.item[1].item[0].item[5].linkId, "1.1.6")
self.assertEqual(inst.item[1].item[0].item[5].text, "Are your parents related to each other in any way other than marriage?")
self.assertEqual(inst.item[1].item[0].item[6].answer[0].item[0].item[0].answer[0].valueCoding.code, "[in_i]")
self.assertEqual(inst.item[1].item[0].item[6].answer[0].item[0].item[0].answer[0].valueCoding.display, "inches")
self.assertEqual(inst.item[1].item[0].item[6].answer[0].item[0].item[0].answer[0].valueCoding.system, "http://unitsofmeasure.org")
self.assertEqual(inst.item[1].item[0].item[6].answer[0].item[0].item[0].linkId, "1.1.7.1.1")
self.assertEqual(inst.item[1].item[0].item[6].answer[0].item[0].item[0].text, "Units")
self.assertEqual(inst.item[1].item[0].item[6].answer[0].item[0].linkId, "1.1.7.1")
self.assertEqual(inst.item[1].item[0].item[6].answer[0].valueDecimal, 63)
self.assertEqual(inst.item[1].item[0].item[6].definition, "http://loinc.org/fhir/DataElement/8302-2")
self.assertEqual(inst.item[1].item[0].item[6].linkId, "1.1.7")
self.assertEqual(inst.item[1].item[0].item[6].text, "Height")
self.assertEqual(inst.item[1].item[0].item[7].answer[0].item[0].item[0].answer[0].valueCoding.code, "lb")
self.assertEqual(inst.item[1].item[0].item[7].answer[0].item[0].item[0].answer[0].valueCoding.display, "pounds")
self.assertEqual(inst.item[1].item[0].item[7].answer[0].item[0].item[0].answer[0].valueCoding.system, "http://unitsofmeasure.org")
self.assertEqual(inst.item[1].item[0].item[7].answer[0].item[0].item[0].linkId, "1.1.8.1.1")
self.assertEqual(inst.item[1].item[0].item[7].answer[0].item[0].item[0].text, "Units")
self.assertEqual(inst.item[1].item[0].item[7].answer[0].item[0].linkId, "1.1.8.1")
self.assertEqual(inst.item[1].item[0].item[7].answer[0].valueDecimal, 127)
self.assertEqual(inst.item[1].item[0].item[7].definition, "http://loinc.org/fhir/DataElement/29463-7")
self.assertEqual(inst.item[1].item[0].item[7].linkId, "1.1.8")
self.assertEqual(inst.item[1].item[0].item[7].text, "Weight")
self.assertEqual(inst.item[1].item[0].item[8].answer[0].valueDecimal, 22.5)
self.assertEqual(inst.item[1].item[0].item[8].definition, "http://loinc.org/fhir/DataElement/39156-5")
self.assertEqual(inst.item[1].item[0].item[8].linkId, "1.1.9")
self.assertEqual(inst.item[1].item[0].item[8].text, "Body mass index (BMI) [Ratio]")
self.assertEqual(inst.item[1].item[0].item[9].answer[0].valueCoding.code, "LA4457-3")
self.assertEqual(inst.item[1].item[0].item[9].answer[0].valueCoding.display, "White")
self.assertEqual(inst.item[1].item[0].item[9].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[1].item[0].item[9].definition, "http://loinc.org/fhir/DataElement/54134-2")
self.assertEqual(inst.item[1].item[0].item[9].linkId, "1.1.10")
self.assertEqual(inst.item[1].item[0].item[9].text, "Race")
self.assertEqual(inst.item[1].item[0].linkId, "1.1")
self.assertEqual(inst.item[1].linkId, "1")
self.assertEqual(inst.item[1].text, "Your health information")
self.assertEqual(inst.item[2].definition, "http://loinc.org/fhir/DataElement/54114-4")
self.assertEqual(inst.item[2].item[0].item[0].item[0].answer[0].valueCoding.code, "LA10405-1")
self.assertEqual(inst.item[2].item[0].item[0].item[0].answer[0].valueCoding.display, "Daughter")
self.assertEqual(inst.item[2].item[0].item[0].item[0].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[0].item[0].item[0].definition, "http://loinc.org/fhir/DataElement/54136-7")
self.assertEqual(inst.item[2].item[0].item[0].item[0].linkId, "2.1.1.1")
self.assertEqual(inst.item[2].item[0].item[0].item[0].text, "Relationship to you")
self.assertEqual(inst.item[2].item[0].item[0].item[1].answer[0].valueString, "Susan")
self.assertEqual(inst.item[2].item[0].item[0].item[1].definition, "http://loinc.org/fhir/DataElement/54138-3")
self.assertEqual(inst.item[2].item[0].item[0].item[1].linkId, "2.1.1.2")
self.assertEqual(inst.item[2].item[0].item[0].item[1].text, "Name")
self.assertEqual(inst.item[2].item[0].item[0].item[2].answer[0].valueCoding.code, "LA3-6")
self.assertEqual(inst.item[2].item[0].item[0].item[2].answer[0].valueCoding.display, "Female")
self.assertEqual(inst.item[2].item[0].item[0].item[2].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[0].item[0].item[2].definition, "http://loinc.org/fhir/DataElement/54123-5")
self.assertEqual(inst.item[2].item[0].item[0].item[2].linkId, "2.1.1.3")
self.assertEqual(inst.item[2].item[0].item[0].item[2].text, "Gender")
self.assertEqual(inst.item[2].item[0].item[0].item[3].answer[0].item[0].item[0].answer[0].valueDecimal, 17)
self.assertEqual(inst.item[2].item[0].item[0].item[3].answer[0].item[0].item[0].definition, "http://loinc.org/fhir/DataElement/54141-7")
self.assertEqual(inst.item[2].item[0].item[0].item[3].answer[0].item[0].item[0].linkId, "2.1.1.4.2.2")
self.assertEqual(inst.item[2].item[0].item[0].item[3].answer[0].item[0].item[0].text, "Age")
self.assertEqual(inst.item[2].item[0].item[0].item[3].answer[0].item[0].linkId, "2.1.1.4.2")
self.assertEqual(inst.item[2].item[0].item[0].item[3].answer[0].valueCoding.code, "LA33-6")
self.assertEqual(inst.item[2].item[0].item[0].item[3].answer[0].valueCoding.display, "Yes")
self.assertEqual(inst.item[2].item[0].item[0].item[3].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[0].item[0].item[3].definition, "http://loinc.org/fhir/DataElement/54139-1")
self.assertEqual(inst.item[2].item[0].item[0].item[3].linkId, "2.1.1.4")
self.assertEqual(inst.item[2].item[0].item[0].item[3].text, "Living?")
self.assertEqual(inst.item[2].item[0].item[0].item[4].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[2].item[0].item[0].item[4].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[2].item[0].item[0].item[4].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[0].item[0].item[4].definition, "http://loinc.org/fhir/DataElement/54121-9")
self.assertEqual(inst.item[2].item[0].item[0].item[4].linkId, "2.1.1.5")
self.assertEqual(inst.item[2].item[0].item[0].item[4].text, "Was this person born a twin?")
self.assertEqual(inst.item[2].item[0].item[0].item[5].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[2].item[0].item[0].item[5].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[2].item[0].item[0].item[5].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[0].item[0].item[5].definition, "http://loinc.org/fhir/DataElement/54122-7")
self.assertEqual(inst.item[2].item[0].item[0].item[5].linkId, "2.1.1.6")
self.assertEqual(inst.item[2].item[0].item[0].item[5].text, "Was this person adopted?")
self.assertEqual(inst.item[2].item[0].item[0].linkId, "2.1.1")
self.assertEqual(inst.item[2].item[0].linkId, "2.1")
self.assertEqual(inst.item[2].item[1].item[0].item[0].answer[0].valueCoding.code, "LA10415-0")
self.assertEqual(inst.item[2].item[1].item[0].item[0].answer[0].valueCoding.display, "Brother")
self.assertEqual(inst.item[2].item[1].item[0].item[0].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[1].item[0].item[0].definition, "http://loinc.org/fhir/DataElement/54136-7")
self.assertEqual(inst.item[2].item[1].item[0].item[0].linkId, "2.1.1.1")
self.assertEqual(inst.item[2].item[1].item[0].item[0].text, "Relationship to you")
self.assertEqual(inst.item[2].item[1].item[0].item[1].answer[0].valueString, "Brian")
self.assertEqual(inst.item[2].item[1].item[0].item[1].definition, "http://loinc.org/fhir/DataElement/54138-3")
self.assertEqual(inst.item[2].item[1].item[0].item[1].linkId, "2.1.1.2")
self.assertEqual(inst.item[2].item[1].item[0].item[1].text, "Name")
self.assertEqual(inst.item[2].item[1].item[0].item[2].answer[0].valueCoding.code, "LA2-8")
self.assertEqual(inst.item[2].item[1].item[0].item[2].answer[0].valueCoding.display, "Male")
self.assertEqual(inst.item[2].item[1].item[0].item[2].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[1].item[0].item[2].definition, "http://loinc.org/fhir/DataElement/54123-5")
self.assertEqual(inst.item[2].item[1].item[0].item[2].linkId, "2.1.1.3")
self.assertEqual(inst.item[2].item[1].item[0].item[2].text, "Gender")
self.assertEqual(inst.item[2].item[1].item[0].item[3].answer[0].item[0].item[0].answer[0].valueDecimal, 32)
self.assertEqual(inst.item[2].item[1].item[0].item[3].answer[0].item[0].item[0].definition, "http://loinc.org/fhir/DataElement/54141-7")
self.assertEqual(inst.item[2].item[1].item[0].item[3].answer[0].item[0].item[0].linkId, "2.1.1.4.2.2")
self.assertEqual(inst.item[2].item[1].item[0].item[3].answer[0].item[0].item[0].text, "Age")
self.assertEqual(inst.item[2].item[1].item[0].item[3].answer[0].item[0].linkId, "2.1.1.4.2")
self.assertEqual(inst.item[2].item[1].item[0].item[3].answer[0].valueCoding.code, "LA33-6")
self.assertEqual(inst.item[2].item[1].item[0].item[3].answer[0].valueCoding.display, "Yes")
self.assertEqual(inst.item[2].item[1].item[0].item[3].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[1].item[0].item[3].definition, "http://loinc.org/fhir/DataElement/54139-1")
self.assertEqual(inst.item[2].item[1].item[0].item[3].linkId, "2.1.1.4")
self.assertEqual(inst.item[2].item[1].item[0].item[3].text, "Living?")
self.assertEqual(inst.item[2].item[1].item[0].item[4].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[2].item[1].item[0].item[4].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[2].item[1].item[0].item[4].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[1].item[0].item[4].definition, "http://loinc.org/fhir/DataElement/54121-9")
self.assertEqual(inst.item[2].item[1].item[0].item[4].linkId, "2.1.1.5")
self.assertEqual(inst.item[2].item[1].item[0].item[4].text, "Was this person born a twin?")
self.assertEqual(inst.item[2].item[1].item[0].item[5].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[2].item[1].item[0].item[5].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[2].item[1].item[0].item[5].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[1].item[0].item[5].definition, "http://loinc.org/fhir/DataElement/54122-7")
self.assertEqual(inst.item[2].item[1].item[0].item[5].linkId, "2.1.1.6")
self.assertEqual(inst.item[2].item[1].item[0].item[5].text, "Was this person adopted?")
self.assertEqual(inst.item[2].item[1].item[0].linkId, "2.1.1")
self.assertEqual(inst.item[2].item[1].item[1].item[0].answer[0].valueCoding.code, "LA10550-4")
self.assertEqual(inst.item[2].item[1].item[1].item[0].answer[0].valueCoding.display, "-- Other Cancer")
self.assertEqual(inst.item[2].item[1].item[1].item[0].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[1].item[1].item[0].linkId, "2.1.2.1")
self.assertEqual(inst.item[2].item[1].item[1].item[0].text, "Disease or Condition")
self.assertEqual(inst.item[2].item[1].item[1].item[1].answer[0].valueCoding.code, "LA10397-0")
self.assertEqual(inst.item[2].item[1].item[1].item[1].answer[0].valueCoding.display, "30-39")
self.assertEqual(inst.item[2].item[1].item[1].item[1].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[1].item[1].item[1].linkId, "2.1.2.2")
self.assertEqual(inst.item[2].item[1].item[1].item[1].text, "Age at Diagnosis")
self.assertEqual(inst.item[2].item[1].item[1].linkId, "2.1.2")
self.assertEqual(inst.item[2].item[1].item[1].text, "This family member's history of disease")
self.assertEqual(inst.item[2].item[1].linkId, "2.1")
self.assertEqual(inst.item[2].item[2].item[0].item[0].answer[0].valueCoding.code, "LA10418-4")
self.assertEqual(inst.item[2].item[2].item[0].item[0].answer[0].valueCoding.display, "Sister")
self.assertEqual(inst.item[2].item[2].item[0].item[0].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[2].item[0].item[0].definition, "http://loinc.org/fhir/DataElement/54136-7")
self.assertEqual(inst.item[2].item[2].item[0].item[0].linkId, "2.1.1.1")
self.assertEqual(inst.item[2].item[2].item[0].item[0].text, "Relationship to you")
self.assertEqual(inst.item[2].item[2].item[0].item[1].answer[0].valueString, "Janet")
self.assertEqual(inst.item[2].item[2].item[0].item[1].definition, "http://loinc.org/fhir/DataElement/54138-3")
self.assertEqual(inst.item[2].item[2].item[0].item[1].linkId, "2.1.1.2")
self.assertEqual(inst.item[2].item[2].item[0].item[1].text, "Name")
self.assertEqual(inst.item[2].item[2].item[0].item[2].answer[0].valueCoding.code, "LA3-6")
self.assertEqual(inst.item[2].item[2].item[0].item[2].answer[0].valueCoding.display, "Female")
self.assertEqual(inst.item[2].item[2].item[0].item[2].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[2].item[0].item[2].definition, "http://loinc.org/fhir/DataElement/54123-5")
self.assertEqual(inst.item[2].item[2].item[0].item[2].linkId, "2.1.1.3")
self.assertEqual(inst.item[2].item[2].item[0].item[2].text, "Gender")
self.assertEqual(inst.item[2].item[2].item[0].item[3].answer[0].item[0].item[0].answer[0].valueDecimal, 36)
self.assertEqual(inst.item[2].item[2].item[0].item[3].answer[0].item[0].item[0].definition, "http://loinc.org/fhir/DataElement/54141-7")
self.assertEqual(inst.item[2].item[2].item[0].item[3].answer[0].item[0].item[0].linkId, "2.1.1.4.2.2")
self.assertEqual(inst.item[2].item[2].item[0].item[3].answer[0].item[0].item[0].text, "Age")
self.assertEqual(inst.item[2].item[2].item[0].item[3].answer[0].item[0].linkId, "2.1.1.4.2")
self.assertEqual(inst.item[2].item[2].item[0].item[3].answer[0].valueCoding.code, "LA33-6")
self.assertEqual(inst.item[2].item[2].item[0].item[3].answer[0].valueCoding.display, "Yes")
self.assertEqual(inst.item[2].item[2].item[0].item[3].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[2].item[0].item[3].definition, "http://loinc.org/fhir/DataElement/54139-1")
self.assertEqual(inst.item[2].item[2].item[0].item[3].linkId, "2.1.1.4")
self.assertEqual(inst.item[2].item[2].item[0].item[3].text, "Living?")
self.assertEqual(inst.item[2].item[2].item[0].item[4].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[2].item[2].item[0].item[4].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[2].item[2].item[0].item[4].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[2].item[0].item[4].definition, "http://loinc.org/fhir/DataElement/54121-9")
self.assertEqual(inst.item[2].item[2].item[0].item[4].linkId, "2.1.1.5")
self.assertEqual(inst.item[2].item[2].item[0].item[4].text, "Was this person born a twin?")
self.assertEqual(inst.item[2].item[2].item[0].item[5].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[2].item[2].item[0].item[5].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[2].item[2].item[0].item[5].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[2].item[0].item[5].definition, "http://loinc.org/fhir/DataElement/54122-7")
self.assertEqual(inst.item[2].item[2].item[0].item[5].linkId, "2.1.1.6")
self.assertEqual(inst.item[2].item[2].item[0].item[5].text, "Was this person adopted?")
self.assertEqual(inst.item[2].item[2].item[0].linkId, "2.1.1")
self.assertEqual(inst.item[2].item[2].item[1].item[0].answer[0].valueCoding.code, "LA10536-3")
self.assertEqual(inst.item[2].item[2].item[1].item[0].answer[0].valueCoding.display, "-- Breast Cancer")
self.assertEqual(inst.item[2].item[2].item[1].item[0].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[2].item[1].item[0].linkId, "2.1.2.1")
self.assertEqual(inst.item[2].item[2].item[1].item[0].text, "Disease or Condition")
self.assertEqual(inst.item[2].item[2].item[1].item[1].answer[0].valueCoding.code, "LA10397-0")
self.assertEqual(inst.item[2].item[2].item[1].item[1].answer[0].valueCoding.display, "30-39")
self.assertEqual(inst.item[2].item[2].item[1].item[1].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[2].item[1].item[1].linkId, "2.1.2.2")
self.assertEqual(inst.item[2].item[2].item[1].item[1].text, "Age at Diagnosis")
self.assertEqual(inst.item[2].item[2].item[1].linkId, "2.1.2")
self.assertEqual(inst.item[2].item[2].item[1].text, "This family member's history of disease")
self.assertEqual(inst.item[2].item[2].linkId, "2.1")
self.assertEqual(inst.item[2].item[3].item[0].item[0].answer[0].valueCoding.code, "LA10419-2")
self.assertEqual(inst.item[2].item[3].item[0].item[0].answer[0].valueCoding.display, "Nephew")
self.assertEqual(inst.item[2].item[3].item[0].item[0].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[3].item[0].item[0].definition, "http://loinc.org/fhir/DataElement/54136-7")
self.assertEqual(inst.item[2].item[3].item[0].item[0].linkId, "2.1.1.1")
self.assertEqual(inst.item[2].item[3].item[0].item[0].text, "Relationship to you")
self.assertEqual(inst.item[2].item[3].item[0].item[1].answer[0].valueString, "Ian")
self.assertEqual(inst.item[2].item[3].item[0].item[1].definition, "http://loinc.org/fhir/DataElement/54138-3")
self.assertEqual(inst.item[2].item[3].item[0].item[1].linkId, "2.1.1.2")
self.assertEqual(inst.item[2].item[3].item[0].item[1].text, "Name")
self.assertEqual(inst.item[2].item[3].item[0].item[2].answer[0].valueCoding.code, "LA2-8")
self.assertEqual(inst.item[2].item[3].item[0].item[2].answer[0].valueCoding.display, "Male")
self.assertEqual(inst.item[2].item[3].item[0].item[2].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[3].item[0].item[2].definition, "http://loinc.org/fhir/DataElement/54123-5")
self.assertEqual(inst.item[2].item[3].item[0].item[2].linkId, "2.1.1.3")
self.assertEqual(inst.item[2].item[3].item[0].item[2].text, "Gender")
self.assertEqual(inst.item[2].item[3].item[0].item[3].answer[0].item[0].item[0].answer[0].valueDecimal, 16)
self.assertEqual(inst.item[2].item[3].item[0].item[3].answer[0].item[0].item[0].definition, "http://loinc.org/fhir/DataElement/54141-7")
self.assertEqual(inst.item[2].item[3].item[0].item[3].answer[0].item[0].item[0].linkId, "2.1.1.4.2.2")
self.assertEqual(inst.item[2].item[3].item[0].item[3].answer[0].item[0].item[0].text, "Age")
self.assertEqual(inst.item[2].item[3].item[0].item[3].answer[0].item[0].linkId, "2.1.1.4.2")
self.assertEqual(inst.item[2].item[3].item[0].item[3].answer[0].valueCoding.code, "LA33-6")
self.assertEqual(inst.item[2].item[3].item[0].item[3].answer[0].valueCoding.display, "Yes")
self.assertEqual(inst.item[2].item[3].item[0].item[3].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[3].item[0].item[3].definition, "http://loinc.org/fhir/DataElement/54139-1")
self.assertEqual(inst.item[2].item[3].item[0].item[3].linkId, "2.1.1.4")
self.assertEqual(inst.item[2].item[3].item[0].item[3].text, "Living?")
self.assertEqual(inst.item[2].item[3].item[0].item[4].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[2].item[3].item[0].item[4].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[2].item[3].item[0].item[4].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[3].item[0].item[4].definition, "http://loinc.org/fhir/DataElement/54121-9")
self.assertEqual(inst.item[2].item[3].item[0].item[4].linkId, "2.1.1.5")
self.assertEqual(inst.item[2].item[3].item[0].item[4].text, "Was this person born a twin?")
self.assertEqual(inst.item[2].item[3].item[0].item[5].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[2].item[3].item[0].item[5].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[2].item[3].item[0].item[5].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[3].item[0].item[5].definition, "http://loinc.org/fhir/DataElement/54122-7")
self.assertEqual(inst.item[2].item[3].item[0].item[5].linkId, "2.1.1.6")
self.assertEqual(inst.item[2].item[3].item[0].item[5].text, "Was this person adopted?")
self.assertEqual(inst.item[2].item[3].item[0].linkId, "2.1.1")
self.assertEqual(inst.item[2].item[3].linkId, "2.1")
self.assertEqual(inst.item[2].item[4].item[0].item[0].answer[0].valueCoding.code, "LA10420-0")
self.assertEqual(inst.item[2].item[4].item[0].item[0].answer[0].valueCoding.display, "Niece")
self.assertEqual(inst.item[2].item[4].item[0].item[0].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[4].item[0].item[0].definition, "http://loinc.org/fhir/DataElement/54136-7")
self.assertEqual(inst.item[2].item[4].item[0].item[0].linkId, "2.1.1.1")
self.assertEqual(inst.item[2].item[4].item[0].item[0].text, "Relationship to you")
self.assertEqual(inst.item[2].item[4].item[0].item[1].answer[0].valueString, "Helen")
self.assertEqual(inst.item[2].item[4].item[0].item[1].definition, "http://loinc.org/fhir/DataElement/54138-3")
self.assertEqual(inst.item[2].item[4].item[0].item[1].linkId, "2.1.1.2")
self.assertEqual(inst.item[2].item[4].item[0].item[1].text, "Name")
self.assertEqual(inst.item[2].item[4].item[0].item[2].answer[0].valueCoding.code, "LA3-6")
self.assertEqual(inst.item[2].item[4].item[0].item[2].answer[0].valueCoding.display, "Female")
self.assertEqual(inst.item[2].item[4].item[0].item[2].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[4].item[0].item[2].definition, "http://loinc.org/fhir/DataElement/54123-5")
self.assertEqual(inst.item[2].item[4].item[0].item[2].linkId, "2.1.1.3")
self.assertEqual(inst.item[2].item[4].item[0].item[2].text, "Gender")
self.assertEqual(inst.item[2].item[4].item[0].item[3].answer[0].item[0].item[0].answer[0].valueDecimal, 15)
self.assertEqual(inst.item[2].item[4].item[0].item[3].answer[0].item[0].item[0].definition, "http://loinc.org/fhir/DataElement/54141-7")
self.assertEqual(inst.item[2].item[4].item[0].item[3].answer[0].item[0].item[0].linkId, "2.1.1.4.2.2")
self.assertEqual(inst.item[2].item[4].item[0].item[3].answer[0].item[0].item[0].text, "Age")
self.assertEqual(inst.item[2].item[4].item[0].item[3].answer[0].item[0].linkId, "2.1.1.4.2")
self.assertEqual(inst.item[2].item[4].item[0].item[3].answer[0].valueCoding.code, "LA33-6")
self.assertEqual(inst.item[2].item[4].item[0].item[3].answer[0].valueCoding.display, "Yes")
self.assertEqual(inst.item[2].item[4].item[0].item[3].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[4].item[0].item[3].definition, "http://loinc.org/fhir/DataElement/54139-1")
self.assertEqual(inst.item[2].item[4].item[0].item[3].linkId, "2.1.1.4")
self.assertEqual(inst.item[2].item[4].item[0].item[3].text, "Living?")
self.assertEqual(inst.item[2].item[4].item[0].item[4].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[2].item[4].item[0].item[4].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[2].item[4].item[0].item[4].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[4].item[0].item[4].definition, "http://loinc.org/fhir/DataElement/54121-9")
self.assertEqual(inst.item[2].item[4].item[0].item[4].linkId, "2.1.1.5")
self.assertEqual(inst.item[2].item[4].item[0].item[4].text, "Was this person born a twin?")
self.assertEqual(inst.item[2].item[4].item[0].item[5].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[2].item[4].item[0].item[5].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[2].item[4].item[0].item[5].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[4].item[0].item[5].definition, "http://loinc.org/fhir/DataElement/54122-7")
self.assertEqual(inst.item[2].item[4].item[0].item[5].linkId, "2.1.1.6")
self.assertEqual(inst.item[2].item[4].item[0].item[5].text, "Was this person adopted?")
self.assertEqual(inst.item[2].item[4].item[0].linkId, "2.1.1")
self.assertEqual(inst.item[2].item[4].linkId, "2.1")
self.assertEqual(inst.item[2].item[5].item[0].item[0].answer[0].valueCoding.code, "LA10416-8")
self.assertEqual(inst.item[2].item[5].item[0].item[0].answer[0].valueCoding.display, "Father")
self.assertEqual(inst.item[2].item[5].item[0].item[0].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[5].item[0].item[0].definition, "http://loinc.org/fhir/DataElement/54136-7")
self.assertEqual(inst.item[2].item[5].item[0].item[0].linkId, "2.1.1.1")
self.assertEqual(inst.item[2].item[5].item[0].item[0].text, "Relationship to you")
self.assertEqual(inst.item[2].item[5].item[0].item[1].answer[0].valueString, "Donald")
self.assertEqual(inst.item[2].item[5].item[0].item[1].definition, "http://loinc.org/fhir/DataElement/54138-3")
self.assertEqual(inst.item[2].item[5].item[0].item[1].linkId, "2.1.1.2")
self.assertEqual(inst.item[2].item[5].item[0].item[1].text, "Name")
self.assertEqual(inst.item[2].item[5].item[0].item[2].answer[0].valueCoding.code, "LA2-8")
self.assertEqual(inst.item[2].item[5].item[0].item[2].answer[0].valueCoding.display, "Male")
self.assertEqual(inst.item[2].item[5].item[0].item[2].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[5].item[0].item[2].definition, "http://loinc.org/fhir/DataElement/54123-5")
self.assertEqual(inst.item[2].item[5].item[0].item[2].linkId, "2.1.1.3")
self.assertEqual(inst.item[2].item[5].item[0].item[2].text, "Gender")
self.assertEqual(inst.item[2].item[5].item[0].item[3].answer[0].item[0].item[0].answer[0].valueDecimal, 52)
self.assertEqual(inst.item[2].item[5].item[0].item[3].answer[0].item[0].item[0].definition, "http://loinc.org/fhir/DataElement/54141-7")
self.assertEqual(inst.item[2].item[5].item[0].item[3].answer[0].item[0].item[0].linkId, "2.1.1.4.2.2")
self.assertEqual(inst.item[2].item[5].item[0].item[3].answer[0].item[0].item[0].text, "Age")
self.assertEqual(inst.item[2].item[5].item[0].item[3].answer[0].item[0].linkId, "2.1.1.4.2")
self.assertEqual(inst.item[2].item[5].item[0].item[3].answer[0].valueCoding.code, "LA33-6")
self.assertEqual(inst.item[2].item[5].item[0].item[3].answer[0].valueCoding.display, "Yes")
self.assertEqual(inst.item[2].item[5].item[0].item[3].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[5].item[0].item[3].definition, "http://loinc.org/fhir/DataElement/54139-1")
self.assertEqual(inst.item[2].item[5].item[0].item[3].linkId, "2.1.1.4")
self.assertEqual(inst.item[2].item[5].item[0].item[3].text, "Living?")
self.assertEqual(inst.item[2].item[5].item[0].item[4].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[2].item[5].item[0].item[4].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[2].item[5].item[0].item[4].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[5].item[0].item[4].definition, "http://loinc.org/fhir/DataElement/54121-9")
self.assertEqual(inst.item[2].item[5].item[0].item[4].linkId, "2.1.1.5")
self.assertEqual(inst.item[2].item[5].item[0].item[4].text, "Was this person born a twin?")
self.assertEqual(inst.item[2].item[5].item[0].item[5].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[2].item[5].item[0].item[5].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[2].item[5].item[0].item[5].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[5].item[0].item[5].definition, "http://loinc.org/fhir/DataElement/54122-7")
self.assertEqual(inst.item[2].item[5].item[0].item[5].linkId, "2.1.1.6")
self.assertEqual(inst.item[2].item[5].item[0].item[5].text, "Was this person adopted?")
self.assertEqual(inst.item[2].item[5].item[0].linkId, "2.1.1")
self.assertEqual(inst.item[2].item[5].linkId, "2.1")
self.assertEqual(inst.item[2].item[6].item[0].item[0].answer[0].valueCoding.code, "LA10425-9")
self.assertEqual(inst.item[2].item[6].item[0].item[0].answer[0].valueCoding.display, "Paternal Uncle")
self.assertEqual(inst.item[2].item[6].item[0].item[0].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[6].item[0].item[0].definition, "http://loinc.org/fhir/DataElement/54136-7")
self.assertEqual(inst.item[2].item[6].item[0].item[0].linkId, "2.1.1.1")
self.assertEqual(inst.item[2].item[6].item[0].item[0].text, "Relationship to you")
self.assertEqual(inst.item[2].item[6].item[0].item[1].answer[0].valueString, "Eric")
self.assertEqual(inst.item[2].item[6].item[0].item[1].definition, "http://loinc.org/fhir/DataElement/54138-3")
self.assertEqual(inst.item[2].item[6].item[0].item[1].linkId, "2.1.1.2")
self.assertEqual(inst.item[2].item[6].item[0].item[1].text, "Name")
self.assertEqual(inst.item[2].item[6].item[0].item[2].answer[0].valueCoding.code, "LA2-8")
self.assertEqual(inst.item[2].item[6].item[0].item[2].answer[0].valueCoding.display, "Male")
self.assertEqual(inst.item[2].item[6].item[0].item[2].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[6].item[0].item[2].definition, "http://loinc.org/fhir/DataElement/54123-5")
self.assertEqual(inst.item[2].item[6].item[0].item[2].linkId, "2.1.1.3")
self.assertEqual(inst.item[2].item[6].item[0].item[2].text, "Gender")
self.assertEqual(inst.item[2].item[6].item[0].item[3].answer[0].item[0].item[0].answer[0].valueDecimal, 56)
self.assertEqual(inst.item[2].item[6].item[0].item[3].answer[0].item[0].item[0].definition, "http://loinc.org/fhir/DataElement/54141-7")
self.assertEqual(inst.item[2].item[6].item[0].item[3].answer[0].item[0].item[0].linkId, "2.1.1.4.2.2")
self.assertEqual(inst.item[2].item[6].item[0].item[3].answer[0].item[0].item[0].text, "Age")
self.assertEqual(inst.item[2].item[6].item[0].item[3].answer[0].item[0].linkId, "2.1.1.4.2")
self.assertEqual(inst.item[2].item[6].item[0].item[3].answer[0].valueCoding.code, "LA33-6")
self.assertEqual(inst.item[2].item[6].item[0].item[3].answer[0].valueCoding.display, "Yes")
self.assertEqual(inst.item[2].item[6].item[0].item[3].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[6].item[0].item[3].definition, "http://loinc.org/fhir/DataElement/54139-1")
self.assertEqual(inst.item[2].item[6].item[0].item[3].linkId, "2.1.1.4")
self.assertEqual(inst.item[2].item[6].item[0].item[3].text, "Living?")
self.assertEqual(inst.item[2].item[6].item[0].item[4].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[2].item[6].item[0].item[4].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[2].item[6].item[0].item[4].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[6].item[0].item[4].definition, "http://loinc.org/fhir/DataElement/54121-9")
self.assertEqual(inst.item[2].item[6].item[0].item[4].linkId, "2.1.1.5")
self.assertEqual(inst.item[2].item[6].item[0].item[4].text, "Was this person born a twin?")
self.assertEqual(inst.item[2].item[6].item[0].item[5].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[2].item[6].item[0].item[5].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[2].item[6].item[0].item[5].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[6].item[0].item[5].definition, "http://loinc.org/fhir/DataElement/54122-7")
self.assertEqual(inst.item[2].item[6].item[0].item[5].linkId, "2.1.1.6")
self.assertEqual(inst.item[2].item[6].item[0].item[5].text, "Was this person adopted?")
self.assertEqual(inst.item[2].item[6].item[0].linkId, "2.1.1")
self.assertEqual(inst.item[2].item[6].linkId, "2.1")
self.assertEqual(inst.item[2].item[7].item[0].item[0].answer[0].valueCoding.code, "LA10421-8")
self.assertEqual(inst.item[2].item[7].item[0].item[0].answer[0].valueCoding.display, "Paternal Aunt")
self.assertEqual(inst.item[2].item[7].item[0].item[0].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[7].item[0].item[0].definition, "http://loinc.org/fhir/DataElement/54136-7")
self.assertEqual(inst.item[2].item[7].item[0].item[0].linkId, "2.1.1.1")
self.assertEqual(inst.item[2].item[7].item[0].item[0].text, "Relationship to you")
self.assertEqual(inst.item[2].item[7].item[0].item[1].answer[0].valueString, "Fiona")
self.assertEqual(inst.item[2].item[7].item[0].item[1].definition, "http://loinc.org/fhir/DataElement/54138-3")
self.assertEqual(inst.item[2].item[7].item[0].item[1].linkId, "2.1.1.2")
self.assertEqual(inst.item[2].item[7].item[0].item[1].text, "Name")
self.assertEqual(inst.item[2].item[7].item[0].item[2].answer[0].valueCoding.code, "LA3-6")
self.assertEqual(inst.item[2].item[7].item[0].item[2].answer[0].valueCoding.display, "Female")
self.assertEqual(inst.item[2].item[7].item[0].item[2].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[7].item[0].item[2].definition, "http://loinc.org/fhir/DataElement/54123-5")
self.assertEqual(inst.item[2].item[7].item[0].item[2].linkId, "2.1.1.3")
self.assertEqual(inst.item[2].item[7].item[0].item[2].text, "Gender")
self.assertEqual(inst.item[2].item[7].item[0].item[3].answer[0].item[0].item[0].answer[0].valueDecimal, 57)
self.assertEqual(inst.item[2].item[7].item[0].item[3].answer[0].item[0].item[0].definition, "http://loinc.org/fhir/DataElement/54141-7")
self.assertEqual(inst.item[2].item[7].item[0].item[3].answer[0].item[0].item[0].linkId, "2.1.1.4.2.2")
self.assertEqual(inst.item[2].item[7].item[0].item[3].answer[0].item[0].item[0].text, "Age")
self.assertEqual(inst.item[2].item[7].item[0].item[3].answer[0].item[0].linkId, "2.1.1.4.2")
self.assertEqual(inst.item[2].item[7].item[0].item[3].answer[0].valueCoding.code, "LA33-6")
self.assertEqual(inst.item[2].item[7].item[0].item[3].answer[0].valueCoding.display, "Yes")
self.assertEqual(inst.item[2].item[7].item[0].item[3].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[7].item[0].item[3].definition, "http://loinc.org/fhir/DataElement/54139-1")
self.assertEqual(inst.item[2].item[7].item[0].item[3].linkId, "2.1.1.4")
self.assertEqual(inst.item[2].item[7].item[0].item[3].text, "Living?")
self.assertEqual(inst.item[2].item[7].item[0].item[4].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[2].item[7].item[0].item[4].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[2].item[7].item[0].item[4].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[7].item[0].item[4].definition, "http://loinc.org/fhir/DataElement/54121-9")
self.assertEqual(inst.item[2].item[7].item[0].item[4].linkId, "2.1.1.5")
self.assertEqual(inst.item[2].item[7].item[0].item[4].text, "Was this person born a twin?")
self.assertEqual(inst.item[2].item[7].item[0].item[5].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[2].item[7].item[0].item[5].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[2].item[7].item[0].item[5].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[7].item[0].item[5].definition, "http://loinc.org/fhir/DataElement/54122-7")
self.assertEqual(inst.item[2].item[7].item[0].item[5].linkId, "2.1.1.6")
self.assertEqual(inst.item[2].item[7].item[0].item[5].text, "Was this person adopted?")
self.assertEqual(inst.item[2].item[7].item[0].linkId, "2.1.1")
self.assertEqual(inst.item[2].item[7].item[1].item[0].answer[0].valueCoding.code, "LA10543-9")
self.assertEqual(inst.item[2].item[7].item[1].item[0].answer[0].valueCoding.display, "-- Skin Cancer")
self.assertEqual(inst.item[2].item[7].item[1].item[0].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[7].item[1].item[0].linkId, "2.1.2.1")
self.assertEqual(inst.item[2].item[7].item[1].item[0].text, "Disease or Condition")
self.assertEqual(inst.item[2].item[7].item[1].linkId, "2.1.2")
self.assertEqual(inst.item[2].item[7].item[1].text, "This family member's history of disease")
self.assertEqual(inst.item[2].item[7].linkId, "2.1")
self.assertEqual(inst.item[2].item[8].item[0].item[0].answer[0].valueCoding.code, "LA10423-4")
self.assertEqual(inst.item[2].item[8].item[0].item[0].answer[0].valueCoding.display, "Paternal Grandfather")
self.assertEqual(inst.item[2].item[8].item[0].item[0].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[8].item[0].item[0].definition, "http://loinc.org/fhir/DataElement/54136-7")
self.assertEqual(inst.item[2].item[8].item[0].item[0].linkId, "2.1.1.1")
self.assertEqual(inst.item[2].item[8].item[0].item[0].text, "Relationship to you")
self.assertEqual(inst.item[2].item[8].item[0].item[1].answer[0].valueString, "Bob")
self.assertEqual(inst.item[2].item[8].item[0].item[1].definition, "http://loinc.org/fhir/DataElement/54138-3")
self.assertEqual(inst.item[2].item[8].item[0].item[1].linkId, "2.1.1.2")
self.assertEqual(inst.item[2].item[8].item[0].item[1].text, "Name")
self.assertEqual(inst.item[2].item[8].item[0].item[2].answer[0].valueCoding.code, "LA2-8")
self.assertEqual(inst.item[2].item[8].item[0].item[2].answer[0].valueCoding.display, "Male")
self.assertEqual(inst.item[2].item[8].item[0].item[2].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[8].item[0].item[2].definition, "http://loinc.org/fhir/DataElement/54123-5")
self.assertEqual(inst.item[2].item[8].item[0].item[2].linkId, "2.1.1.3")
self.assertEqual(inst.item[2].item[8].item[0].item[2].text, "Gender")
self.assertEqual(inst.item[2].item[8].item[0].item[3].answer[0].item[0].item[0].answer[0].valueCoding.code, "LA10537-1")
self.assertEqual(inst.item[2].item[8].item[0].item[3].answer[0].item[0].item[0].answer[0].valueCoding.display, "-- Colon Cancer")
self.assertEqual(inst.item[2].item[8].item[0].item[3].answer[0].item[0].item[0].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[8].item[0].item[3].answer[0].item[0].item[0].definition, "http://loinc.org/fhir/DataElement/54112-8")
self.assertEqual(inst.item[2].item[8].item[0].item[3].answer[0].item[0].item[0].linkId, "2.1.1.4.1.1")
self.assertEqual(inst.item[2].item[8].item[0].item[3].answer[0].item[0].item[0].text, "Cause of Death")
self.assertEqual(inst.item[2].item[8].item[0].item[3].answer[0].item[0].item[1].answer[0].valueCoding.code, "LA10400-2")
self.assertEqual(inst.item[2].item[8].item[0].item[3].answer[0].item[0].item[1].answer[0].valueCoding.display, "OVER 60")
self.assertEqual(inst.item[2].item[8].item[0].item[3].answer[0].item[0].item[1].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[8].item[0].item[3].answer[0].item[0].item[1].definition, "http://loinc.org/fhir/DataElement/54113-6")
self.assertEqual(inst.item[2].item[8].item[0].item[3].answer[0].item[0].item[1].linkId, "2.1.1.4.1.2")
self.assertEqual(inst.item[2].item[8].item[0].item[3].answer[0].item[0].item[1].text, "Age at Death")
self.assertEqual(inst.item[2].item[8].item[0].item[3].answer[0].item[0].linkId, "2.1.1.4.1")
self.assertEqual(inst.item[2].item[8].item[0].item[3].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[2].item[8].item[0].item[3].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[2].item[8].item[0].item[3].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[8].item[0].item[3].definition, "http://loinc.org/fhir/DataElement/54139-1")
self.assertEqual(inst.item[2].item[8].item[0].item[3].linkId, "2.1.1.4")
self.assertEqual(inst.item[2].item[8].item[0].item[3].text, "Living?")
self.assertEqual(inst.item[2].item[8].item[0].item[4].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[2].item[8].item[0].item[4].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[2].item[8].item[0].item[4].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[8].item[0].item[4].definition, "http://loinc.org/fhir/DataElement/54121-9")
self.assertEqual(inst.item[2].item[8].item[0].item[4].linkId, "2.1.1.5")
self.assertEqual(inst.item[2].item[8].item[0].item[4].text, "Was this person born a twin?")
self.assertEqual(inst.item[2].item[8].item[0].item[5].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[2].item[8].item[0].item[5].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[2].item[8].item[0].item[5].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[8].item[0].item[5].definition, "http://loinc.org/fhir/DataElement/54122-7")
self.assertEqual(inst.item[2].item[8].item[0].item[5].linkId, "2.1.1.6")
self.assertEqual(inst.item[2].item[8].item[0].item[5].text, "Was this person adopted?")
self.assertEqual(inst.item[2].item[8].item[0].linkId, "2.1.1")
self.assertEqual(inst.item[2].item[8].item[1].item[0].answer[0].valueCoding.code, "LA10537-1")
self.assertEqual(inst.item[2].item[8].item[1].item[0].answer[0].valueCoding.display, "-- Colon Cancer")
self.assertEqual(inst.item[2].item[8].item[1].item[0].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[8].item[1].item[0].linkId, "2.1.2.1")
self.assertEqual(inst.item[2].item[8].item[1].item[0].text, "Disease or Condition")
self.assertEqual(inst.item[2].item[8].item[1].item[1].answer[0].valueCoding.code, "LA10400-2")
self.assertEqual(inst.item[2].item[8].item[1].item[1].answer[0].valueCoding.display, "OVER 60")
self.assertEqual(inst.item[2].item[8].item[1].item[1].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[8].item[1].item[1].linkId, "2.1.2.2")
self.assertEqual(inst.item[2].item[8].item[1].item[1].text, "Age at Diagnosis")
self.assertEqual(inst.item[2].item[8].item[1].linkId, "2.1.2")
self.assertEqual(inst.item[2].item[8].item[1].text, "This family member's history of disease")
self.assertEqual(inst.item[2].item[8].linkId, "2.1")
self.assertEqual(inst.item[2].item[9].item[0].item[0].answer[0].valueCoding.code, "LA10424-2")
self.assertEqual(inst.item[2].item[9].item[0].item[0].answer[0].valueCoding.display, "Paternal Grandmother")
self.assertEqual(inst.item[2].item[9].item[0].item[0].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[9].item[0].item[0].definition, "http://loinc.org/fhir/DataElement/54136-7")
self.assertEqual(inst.item[2].item[9].item[0].item[0].linkId, "2.1.1.1")
self.assertEqual(inst.item[2].item[9].item[0].item[0].text, "Relationship to you")
self.assertEqual(inst.item[2].item[9].item[0].item[1].answer[0].valueString, "Claire")
self.assertEqual(inst.item[2].item[9].item[0].item[1].definition, "http://loinc.org/fhir/DataElement/54138-3")
self.assertEqual(inst.item[2].item[9].item[0].item[1].linkId, "2.1.1.2")
self.assertEqual(inst.item[2].item[9].item[0].item[1].text, "Name")
self.assertEqual(inst.item[2].item[9].item[0].item[2].answer[0].valueCoding.code, "LA3-6")
self.assertEqual(inst.item[2].item[9].item[0].item[2].answer[0].valueCoding.display, "Female")
self.assertEqual(inst.item[2].item[9].item[0].item[2].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[9].item[0].item[2].definition, "http://loinc.org/fhir/DataElement/54123-5")
self.assertEqual(inst.item[2].item[9].item[0].item[2].linkId, "2.1.1.3")
self.assertEqual(inst.item[2].item[9].item[0].item[2].text, "Gender")
self.assertEqual(inst.item[2].item[9].item[0].item[3].answer[0].item[0].item[0].answer[0].item[0].answer[0].valueString, "Lou Gehrigs")
self.assertEqual(inst.item[2].item[9].item[0].item[3].answer[0].item[0].item[0].answer[0].item[0].linkId, "2.1.1.4.1.1.1")
self.assertEqual(inst.item[2].item[9].item[0].item[3].answer[0].item[0].item[0].answer[0].item[0].text, "Please specify")
self.assertEqual(inst.item[2].item[9].item[0].item[3].answer[0].item[0].item[0].answer[0].valueCoding.code, "LA10589-2")
self.assertEqual(inst.item[2].item[9].item[0].item[3].answer[0].item[0].item[0].answer[0].valueCoding.display, "-- Other/Unexpected")
self.assertEqual(inst.item[2].item[9].item[0].item[3].answer[0].item[0].item[0].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[9].item[0].item[3].answer[0].item[0].item[0].definition, "http://loinc.org/fhir/DataElement/54112-8")
self.assertEqual(inst.item[2].item[9].item[0].item[3].answer[0].item[0].item[0].linkId, "2.1.1.4.1.1")
self.assertEqual(inst.item[2].item[9].item[0].item[3].answer[0].item[0].item[0].text, "Cause of Death")
self.assertEqual(inst.item[2].item[9].item[0].item[3].answer[0].item[0].item[1].answer[0].valueCoding.code, "LA10400-2")
self.assertEqual(inst.item[2].item[9].item[0].item[3].answer[0].item[0].item[1].answer[0].valueCoding.display, "OVER 60")
self.assertEqual(inst.item[2].item[9].item[0].item[3].answer[0].item[0].item[1].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[9].item[0].item[3].answer[0].item[0].item[1].definition, "http://loinc.org/fhir/DataElement/54113-6")
self.assertEqual(inst.item[2].item[9].item[0].item[3].answer[0].item[0].item[1].linkId, "2.1.1.4.1.2")
self.assertEqual(inst.item[2].item[9].item[0].item[3].answer[0].item[0].item[1].text, "Age at Death")
self.assertEqual(inst.item[2].item[9].item[0].item[3].answer[0].item[0].linkId, "2.1.1.4.1")
self.assertEqual(inst.item[2].item[9].item[0].item[3].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[2].item[9].item[0].item[3].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[2].item[9].item[0].item[3].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[9].item[0].item[3].definition, "http://loinc.org/fhir/DataElement/54139-1")
self.assertEqual(inst.item[2].item[9].item[0].item[3].linkId, "2.1.1.4")
self.assertEqual(inst.item[2].item[9].item[0].item[3].text, "Living?")
self.assertEqual(inst.item[2].item[9].item[0].item[4].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[2].item[9].item[0].item[4].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[2].item[9].item[0].item[4].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[9].item[0].item[4].definition, "http://loinc.org/fhir/DataElement/54121-9")
self.assertEqual(inst.item[2].item[9].item[0].item[4].linkId, "2.1.1.5")
self.assertEqual(inst.item[2].item[9].item[0].item[4].text, "Was this person born a twin?")
self.assertEqual(inst.item[2].item[9].item[0].item[5].answer[0].valueCoding.code, "LA32-8")
self.assertEqual(inst.item[2].item[9].item[0].item[5].answer[0].valueCoding.display, "No")
self.assertEqual(inst.item[2].item[9].item[0].item[5].answer[0].valueCoding.system, "http://loinc.org")
self.assertEqual(inst.item[2].item[9].item[0].item[5].definition, "http://loinc.org/fhir/DataElement/54122-7")
self.assertEqual(inst.item[2].item[9].item[0].item[5].linkId, "2.1.1.6")
self.assertEqual(inst.item[2].item[9].item[0].item[5].text, "Was this person adopted?")
self.assertEqual(inst.item[2].item[9].item[0].linkId, "2.1.1")
self.assertEqual(inst.item[2].item[9].linkId, "2.1")
self.assertEqual(inst.item[2].linkId, "2")
self.assertEqual(inst.item[2].text, "Family member health information")
self.assertEqual(inst.status, "in-progress")
self.assertEqual(inst.text.status, "generated")
def testQuestionnaireResponse5(self):
inst = self.instantiate_from("questionnaireresponse-example.json")
self.assertIsNotNone(inst, "Must have instantiated a QuestionnaireResponse instance")
self.implQuestionnaireResponse5(inst)
js = inst.as_json()
self.assertEqual("QuestionnaireResponse", js["resourceType"])
inst2 = questionnaireresponse.QuestionnaireResponse(js)
self.implQuestionnaireResponse5(inst2)
def implQuestionnaireResponse5(self, inst):
self.assertEqual(inst.authored.date, FHIRDate("2013-02-19T14:15:00-05:00").date)
self.assertEqual(inst.authored.as_json(), "2013-02-19T14:15:00-05:00")
self.assertEqual(inst.contained[0].id, "patsub")
self.assertEqual(inst.contained[1].id, "order")
self.assertEqual(inst.contained[2].id, "questauth")
self.assertEqual(inst.id, "3141")
self.assertEqual(inst.identifier.system, "http://example.org/fhir/NamingSystem/questionnaire-ids")
self.assertEqual(inst.identifier.value, "Q12349876")
self.assertEqual(inst.item[0].item[0].answer[0].item[0].item[0].answer[0].valueCoding.code, "1")
self.assertEqual(inst.item[0].item[0].answer[0].item[0].item[0].answer[0].valueCoding.system, "http://cancer.questionnaire.org/system/code/yesno")
self.assertEqual(inst.item[0].item[0].answer[0].item[0].item[0].linkId, "1.1.1.1")
self.assertEqual(inst.item[0].item[0].answer[0].item[0].item[1].answer[0].valueCoding.code, "1")
self.assertEqual(inst.item[0].item[0].answer[0].item[0].item[1].answer[0].valueCoding.system, "http://cancer.questionnaire.org/system/code/yesno")
self.assertEqual(inst.item[0].item[0].answer[0].item[0].item[1].linkId, "1.1.1.2")
self.assertEqual(inst.item[0].item[0].answer[0].item[0].item[2].answer[0].valueCoding.code, "0")
self.assertEqual(inst.item[0].item[0].answer[0].item[0].item[2].answer[0].valueCoding.system, "http://cancer.questionnaire.org/system/code/yesno")
self.assertEqual(inst.item[0].item[0].answer[0].item[0].item[2].linkId, "1.1.1.3")
self.assertEqual(inst.item[0].item[0].answer[0].item[0].linkId, "1.1.1")
self.assertEqual(inst.item[0].item[0].answer[0].valueCoding.code, "1")
self.assertEqual(inst.item[0].item[0].answer[0].valueCoding.display, "Yes")
self.assertEqual(inst.item[0].item[0].answer[0].valueCoding.system, "http://cancer.questionnaire.org/system/code/yesno")
self.assertEqual(inst.item[0].item[0].linkId, "1.1")
self.assertEqual(inst.item[0].linkId, "1")
self.assertEqual(inst.status, "completed")
self.assertEqual(inst.text.status, "generated")
|
all-of-us/raw-data-repository
|
rdr_service/lib_fhir/fhirclient_3_0_0/models/questionnaireresponse_tests.py
|
Python
|
bsd-3-clause
| 68,284
|
[
"Brian"
] |
b628f2b6275b310f9fc609318486f80e543b8e65897fe8cf202f16ca3e61774d
|
"""
Tests for enthought/tvtk/misc.py
"""
# Author: Prabhu Ramachandran <prabhu@aero.iitb.ac.in>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
import unittest
import tempfile
import os.path
import os
from tvtk.api import tvtk, write_data
class TestMisc(unittest.TestCase):
def setUp(self):
datasets = [tvtk.ImageData(),
tvtk.StructuredPoints(),
tvtk.RectilinearGrid(),
tvtk.StructuredGrid(),
tvtk.PolyData(),
tvtk.UnstructuredGrid(),
]
exts = ['.vti', '.vti', '.vtr', '.vts', '.vtp', '.vtu']
self.datasets = datasets
self.exts = exts
def test_write_data_xml_noext(self):
"XML file writing without extensions"
# Check if write_data writes out XML files with the correct
# extension when none is specified.
datasets = self.datasets
exts = self.exts
for d, ext in zip(datasets, exts):
fh, fname = tempfile.mkstemp(ext)
fbase = os.path.splitext(fname)[0]
os.close(fh)
os.remove(fname)
write_data(d, fbase)
self.assertEqual(os.path.exists(fname), True)
os.remove(fname)
def test_write_data_xml(self):
"XML file writing with specified extension"
datasets = self.datasets
for d in datasets:
fh, fname = tempfile.mkstemp('.xml')
os.close(fh)
os.remove(fname)
self.assertEqual(os.path.exists(fname), False)
write_data(d, fname)
self.assertEqual(os.path.exists(fname), True)
os.remove(fname)
def test_write_data_xml_kwargs(self):
"XML file writing with extra keyword arguments"
datasets = self.datasets
exts = self.exts
for d, ext in zip(datasets, exts):
fh, fname = tempfile.mkstemp(ext)
fbase = os.path.splitext(fname)[0]
os.close(fh)
os.remove(fname)
# Test if passing extra keyword args is supported.
write_data(d, fbase, compressor=None, data_mode='ascii')
self.assertEqual(os.path.exists(fname), True)
os.remove(fname)
def test_write_data_vtk(self):
"Old-style VTK file writing with specified extension"
datasets = self.datasets
for d in datasets:
fh, fname = tempfile.mkstemp('.vtk')
os.close(fh)
os.remove(fname)
self.assertEqual(os.path.exists(fname), False)
write_data(d, fname)
self.assertEqual(os.path.exists(fname), True)
r = tvtk.DataSetReader(file_name=fname)
r.update()
self.assertEqual(isinstance(r.output, d.__class__), True)
os.remove(fname)
if __name__ == '__main__':
unittest.main()
|
dmsurti/mayavi
|
tvtk/tests/test_misc.py
|
Python
|
bsd-3-clause
| 2,910
|
[
"VTK"
] |
d742f92f9acfeb508a90f3712dda7570ce42851f9ce9e90494d824715a717201
|
# -*- encoding: utf-8
from sqlalchemy.testing import eq_, engines, pickleable
import datetime
import os
from sqlalchemy import *
from sqlalchemy import types, exc, schema, event
from sqlalchemy.orm import *
from sqlalchemy.sql import table, column
from sqlalchemy.databases import mssql
from sqlalchemy.dialects.mssql import pyodbc, mxodbc, pymssql
from sqlalchemy.dialects.mssql.base import TIME
from sqlalchemy.engine import url
from sqlalchemy.testing import fixtures, AssertsCompiledSQL, \
AssertsExecutionResults, ComparesTables
from sqlalchemy import testing
from sqlalchemy.testing import emits_warning_on, assert_raises_message
import decimal
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.util.compat import b
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = mssql.dialect()
def test_select(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.select(),
'SELECT sometable.somecolumn FROM sometable')
def test_select_with_nolock(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.select().with_hint(t, 'WITH (NOLOCK)'),
'SELECT sometable.somecolumn FROM sometable WITH (NOLOCK)')
def test_join_with_hint(self):
t1 = table('t1',
column('a', Integer),
column('b', String),
column('c', String),
)
t2 = table('t2',
column("a", Integer),
column("b", Integer),
column("c", Integer),
)
join = t1.join(t2, t1.c.a==t2.c.a).\
select().with_hint(t1, 'WITH (NOLOCK)')
self.assert_compile(
join,
'SELECT t1.a, t1.b, t1.c, t2.a, t2.b, t2.c '
'FROM t1 WITH (NOLOCK) JOIN t2 ON t1.a = t2.a'
)
def test_insert(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.insert(),
'INSERT INTO sometable (somecolumn) VALUES '
'(:somecolumn)')
def test_update(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.update(t.c.somecolumn == 7),
'UPDATE sometable SET somecolumn=:somecolum'
'n WHERE sometable.somecolumn = '
':somecolumn_1', dict(somecolumn=10))
def test_insert_hint(self):
t = table('sometable', column('somecolumn'))
for targ in (None, t):
for darg in ("*", "mssql"):
self.assert_compile(
t.insert().
values(somecolumn="x").
with_hint("WITH (PAGLOCK)",
selectable=targ,
dialect_name=darg),
"INSERT INTO sometable WITH (PAGLOCK) "
"(somecolumn) VALUES (:somecolumn)"
)
def test_update_hint(self):
t = table('sometable', column('somecolumn'))
for targ in (None, t):
for darg in ("*", "mssql"):
self.assert_compile(
t.update().where(t.c.somecolumn=="q").
values(somecolumn="x").
with_hint("WITH (PAGLOCK)",
selectable=targ,
dialect_name=darg),
"UPDATE sometable WITH (PAGLOCK) "
"SET somecolumn=:somecolumn "
"WHERE sometable.somecolumn = :somecolumn_1"
)
def test_update_exclude_hint(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(
t.update().where(t.c.somecolumn=="q").
values(somecolumn="x").
with_hint("XYZ", "mysql"),
"UPDATE sometable SET somecolumn=:somecolumn "
"WHERE sometable.somecolumn = :somecolumn_1"
)
def test_delete_hint(self):
t = table('sometable', column('somecolumn'))
for targ in (None, t):
for darg in ("*", "mssql"):
self.assert_compile(
t.delete().where(t.c.somecolumn=="q").
with_hint("WITH (PAGLOCK)",
selectable=targ,
dialect_name=darg),
"DELETE FROM sometable WITH (PAGLOCK) "
"WHERE sometable.somecolumn = :somecolumn_1"
)
def test_delete_exclude_hint(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(
t.delete().\
where(t.c.somecolumn=="q").\
with_hint("XYZ", dialect_name="mysql"),
"DELETE FROM sometable WHERE "
"sometable.somecolumn = :somecolumn_1"
)
def test_update_from_hint(self):
t = table('sometable', column('somecolumn'))
t2 = table('othertable', column('somecolumn'))
for darg in ("*", "mssql"):
self.assert_compile(
t.update().where(t.c.somecolumn==t2.c.somecolumn).
values(somecolumn="x").
with_hint("WITH (PAGLOCK)",
selectable=t2,
dialect_name=darg),
"UPDATE sometable SET somecolumn=:somecolumn "
"FROM sometable, othertable WITH (PAGLOCK) "
"WHERE sometable.somecolumn = othertable.somecolumn"
)
# TODO: not supported yet.
#def test_delete_from_hint(self):
# t = table('sometable', column('somecolumn'))
# t2 = table('othertable', column('somecolumn'))
# for darg in ("*", "mssql"):
# self.assert_compile(
# t.delete().where(t.c.somecolumn==t2.c.somecolumn).
# with_hint("WITH (PAGLOCK)",
# selectable=t2,
# dialect_name=darg),
# ""
# )
def test_strict_binds(self):
"""test the 'strict' compiler binds."""
from sqlalchemy.dialects.mssql.base import MSSQLStrictCompiler
mxodbc_dialect = mxodbc.dialect()
mxodbc_dialect.statement_compiler = MSSQLStrictCompiler
t = table('sometable', column('foo'))
for expr, compile in [
(
select([literal("x"), literal("y")]),
"SELECT 'x' AS anon_1, 'y' AS anon_2",
),
(
select([t]).where(t.c.foo.in_(['x', 'y', 'z'])),
"SELECT sometable.foo FROM sometable WHERE sometable.foo "
"IN ('x', 'y', 'z')",
),
(
t.c.foo.in_([None]),
"sometable.foo IN (NULL)"
)
]:
self.assert_compile(expr, compile, dialect=mxodbc_dialect)
def test_in_with_subqueries(self):
"""Test removal of legacy behavior that converted "x==subquery"
to use IN.
"""
t = table('sometable', column('somecolumn'))
self.assert_compile(t.select().where(t.c.somecolumn
== t.select()),
'SELECT sometable.somecolumn FROM '
'sometable WHERE sometable.somecolumn = '
'(SELECT sometable.somecolumn FROM '
'sometable)')
self.assert_compile(t.select().where(t.c.somecolumn
!= t.select()),
'SELECT sometable.somecolumn FROM '
'sometable WHERE sometable.somecolumn != '
'(SELECT sometable.somecolumn FROM '
'sometable)')
def test_count(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.count(),
'SELECT count(sometable.somecolumn) AS '
'tbl_row_count FROM sometable')
def test_noorderby_insubquery(self):
"""test that the ms-sql dialect removes ORDER BY clauses from
subqueries"""
table1 = table('mytable',
column('myid', Integer),
column('name', String),
column('description', String),
)
q = select([table1.c.myid],
order_by=[table1.c.myid]).alias('foo')
crit = q.c.myid == table1.c.myid
self.assert_compile(select(['*'], crit),
"SELECT * FROM (SELECT mytable.myid AS "
"myid FROM mytable) AS foo, mytable WHERE "
"foo.myid = mytable.myid")
def test_delete_schema(self):
metadata = MetaData()
tbl = Table('test', metadata, Column('id', Integer,
primary_key=True), schema='paj')
self.assert_compile(tbl.delete(tbl.c.id == 1),
'DELETE FROM paj.test WHERE paj.test.id = '
':id_1')
s = select([tbl.c.id]).where(tbl.c.id == 1)
self.assert_compile(tbl.delete().where(tbl.c.id.in_(s)),
'DELETE FROM paj.test WHERE paj.test.id IN '
'(SELECT test_1.id FROM paj.test AS test_1 '
'WHERE test_1.id = :id_1)')
def test_delete_schema_multipart(self):
metadata = MetaData()
tbl = Table('test', metadata, Column('id', Integer,
primary_key=True), schema='banana.paj')
self.assert_compile(tbl.delete(tbl.c.id == 1),
'DELETE FROM banana.paj.test WHERE '
'banana.paj.test.id = :id_1')
s = select([tbl.c.id]).where(tbl.c.id == 1)
self.assert_compile(tbl.delete().where(tbl.c.id.in_(s)),
'DELETE FROM banana.paj.test WHERE '
'banana.paj.test.id IN (SELECT test_1.id '
'FROM banana.paj.test AS test_1 WHERE '
'test_1.id = :id_1)')
def test_delete_schema_multipart_needs_quoting(self):
metadata = MetaData()
tbl = Table('test', metadata, Column('id', Integer,
primary_key=True), schema='banana split.paj')
self.assert_compile(tbl.delete(tbl.c.id == 1),
'DELETE FROM [banana split].paj.test WHERE '
'[banana split].paj.test.id = :id_1')
s = select([tbl.c.id]).where(tbl.c.id == 1)
self.assert_compile(tbl.delete().where(tbl.c.id.in_(s)),
'DELETE FROM [banana split].paj.test WHERE '
'[banana split].paj.test.id IN (SELECT '
'test_1.id FROM [banana split].paj.test AS '
'test_1 WHERE test_1.id = :id_1)')
def test_delete_schema_multipart_both_need_quoting(self):
metadata = MetaData()
tbl = Table('test', metadata, Column('id', Integer,
primary_key=True),
schema='banana split.paj with a space')
self.assert_compile(tbl.delete(tbl.c.id == 1),
'DELETE FROM [banana split].[paj with a '
'space].test WHERE [banana split].[paj '
'with a space].test.id = :id_1')
s = select([tbl.c.id]).where(tbl.c.id == 1)
self.assert_compile(tbl.delete().where(tbl.c.id.in_(s)),
'DELETE FROM [banana split].[paj with a '
'space].test WHERE [banana split].[paj '
'with a space].test.id IN (SELECT '
'test_1.id FROM [banana split].[paj with a '
'space].test AS test_1 WHERE test_1.id = '
':id_1)')
def test_union(self):
t1 = table('t1', column('col1'), column('col2'), column('col3'
), column('col4'))
t2 = table('t2', column('col1'), column('col2'), column('col3'
), column('col4'))
s1, s2 = select([t1.c.col3.label('col3'), t1.c.col4.label('col4'
)], t1.c.col2.in_(['t1col2r1', 't1col2r2'])), \
select([t2.c.col3.label('col3'), t2.c.col4.label('col4')],
t2.c.col2.in_(['t2col2r2', 't2col2r3']))
u = union(s1, s2, order_by=['col3', 'col4'])
self.assert_compile(u,
'SELECT t1.col3 AS col3, t1.col4 AS col4 '
'FROM t1 WHERE t1.col2 IN (:col2_1, '
':col2_2) UNION SELECT t2.col3 AS col3, '
't2.col4 AS col4 FROM t2 WHERE t2.col2 IN '
'(:col2_3, :col2_4) ORDER BY col3, col4')
self.assert_compile(u.alias('bar').select(),
'SELECT bar.col3, bar.col4 FROM (SELECT '
't1.col3 AS col3, t1.col4 AS col4 FROM t1 '
'WHERE t1.col2 IN (:col2_1, :col2_2) UNION '
'SELECT t2.col3 AS col3, t2.col4 AS col4 '
'FROM t2 WHERE t2.col2 IN (:col2_3, '
':col2_4)) AS bar')
def test_function(self):
self.assert_compile(func.foo(1, 2), 'foo(:foo_1, :foo_2)')
self.assert_compile(func.current_time(), 'CURRENT_TIME')
self.assert_compile(func.foo(), 'foo()')
m = MetaData()
t = Table('sometable', m, Column('col1', Integer), Column('col2'
, Integer))
self.assert_compile(select([func.max(t.c.col1)]),
'SELECT max(sometable.col1) AS max_1 FROM '
'sometable')
def test_function_overrides(self):
self.assert_compile(func.current_date(), "GETDATE()")
self.assert_compile(func.length(3), "LEN(:length_1)")
def test_extract(self):
t = table('t', column('col1'))
for field in 'day', 'month', 'year':
self.assert_compile(
select([extract(field, t.c.col1)]),
'SELECT DATEPART("%s", t.col1) AS anon_1 FROM t' % field)
def test_update_returning(self):
table1 = table('mytable', column('myid', Integer), column('name'
, String(128)), column('description',
String(128)))
u = update(table1, values=dict(name='foo'
)).returning(table1.c.myid, table1.c.name)
self.assert_compile(u,
'UPDATE mytable SET name=:name OUTPUT '
'inserted.myid, inserted.name')
u = update(table1, values=dict(name='foo')).returning(table1)
self.assert_compile(u,
'UPDATE mytable SET name=:name OUTPUT '
'inserted.myid, inserted.name, '
'inserted.description')
u = update(table1, values=dict(name='foo'
)).returning(table1).where(table1.c.name == 'bar')
self.assert_compile(u,
'UPDATE mytable SET name=:name OUTPUT '
'inserted.myid, inserted.name, '
'inserted.description WHERE mytable.name = '
':name_1')
u = update(table1, values=dict(name='foo'
)).returning(func.length(table1.c.name))
self.assert_compile(u,
'UPDATE mytable SET name=:name OUTPUT '
'LEN(inserted.name) AS length_1')
def test_delete_returning(self):
table1 = table('mytable', column('myid', Integer), column('name'
, String(128)), column('description',
String(128)))
d = delete(table1).returning(table1.c.myid, table1.c.name)
self.assert_compile(d,
'DELETE FROM mytable OUTPUT deleted.myid, '
'deleted.name')
d = delete(table1).where(table1.c.name == 'bar'
).returning(table1.c.myid,
table1.c.name)
self.assert_compile(d,
'DELETE FROM mytable OUTPUT deleted.myid, '
'deleted.name WHERE mytable.name = :name_1')
def test_insert_returning(self):
table1 = table('mytable', column('myid', Integer), column('name'
, String(128)), column('description',
String(128)))
i = insert(table1, values=dict(name='foo'
)).returning(table1.c.myid, table1.c.name)
self.assert_compile(i,
'INSERT INTO mytable (name) OUTPUT '
'inserted.myid, inserted.name VALUES '
'(:name)')
i = insert(table1, values=dict(name='foo')).returning(table1)
self.assert_compile(i,
'INSERT INTO mytable (name) OUTPUT '
'inserted.myid, inserted.name, '
'inserted.description VALUES (:name)')
i = insert(table1, values=dict(name='foo'
)).returning(func.length(table1.c.name))
self.assert_compile(i,
'INSERT INTO mytable (name) OUTPUT '
'LEN(inserted.name) AS length_1 VALUES '
'(:name)')
def test_limit_using_top(self):
t = table('t', column('x', Integer), column('y', Integer))
s = select([t]).where(t.c.x==5).order_by(t.c.y).limit(10)
self.assert_compile(
s,
"SELECT TOP 10 t.x, t.y FROM t WHERE t.x = :x_1 ORDER BY t.y",
checkparams={u'x_1': 5}
)
def test_limit_zero_using_top(self):
t = table('t', column('x', Integer), column('y', Integer))
s = select([t]).where(t.c.x==5).order_by(t.c.y).limit(0)
self.assert_compile(
s,
"SELECT TOP 0 t.x, t.y FROM t WHERE t.x = :x_1 ORDER BY t.y",
checkparams={u'x_1': 5}
)
def test_offset_using_window(self):
t = table('t', column('x', Integer), column('y', Integer))
s = select([t]).where(t.c.x==5).order_by(t.c.y).offset(20)
# test that the select is not altered with subsequent compile
# calls
for i in xrange(2):
self.assert_compile(
s,
"SELECT anon_1.x, anon_1.y FROM (SELECT t.x AS x, t.y "
"AS y, ROW_NUMBER() OVER (ORDER BY t.y) AS "
"mssql_rn FROM t WHERE t.x = :x_1) AS "
"anon_1 WHERE mssql_rn > :mssql_rn_1",
checkparams={u'mssql_rn_1': 20, u'x_1': 5}
)
def test_limit_offset_using_window(self):
t = table('t', column('x', Integer), column('y', Integer))
s = select([t]).where(t.c.x==5).order_by(t.c.y).limit(10).offset(20)
self.assert_compile(
s,
"SELECT anon_1.x, anon_1.y "
"FROM (SELECT t.x AS x, t.y AS y, "
"ROW_NUMBER() OVER (ORDER BY t.y) AS mssql_rn "
"FROM t "
"WHERE t.x = :x_1) AS anon_1 "
"WHERE mssql_rn > :mssql_rn_1 AND mssql_rn <= :mssql_rn_2",
checkparams={u'mssql_rn_1': 20, u'mssql_rn_2': 30, u'x_1': 5}
)
def test_limit_offset_with_correlated_order_by(self):
t1 = table('t1', column('x', Integer), column('y', Integer))
t2 = table('t2', column('x', Integer), column('y', Integer))
order_by = select([t2.c.y]).where(t1.c.x == t2.c.x).as_scalar()
s = select([t1]).where(t1.c.x == 5).order_by(order_by) \
.limit(10).offset(20)
self.assert_compile(
s,
"SELECT anon_1.x, anon_1.y "
"FROM (SELECT t1.x AS x, t1.y AS y, "
"ROW_NUMBER() OVER (ORDER BY "
"(SELECT t2.y FROM t2 WHERE t1.x = t2.x)"
") AS mssql_rn "
"FROM t1 "
"WHERE t1.x = :x_1) AS anon_1 "
"WHERE mssql_rn > :mssql_rn_1 AND mssql_rn <= :mssql_rn_2",
checkparams={u'mssql_rn_1': 20, u'mssql_rn_2': 30, u'x_1': 5}
)
def test_limit_zero_offset_using_window(self):
t = table('t', column('x', Integer), column('y', Integer))
s = select([t]).where(t.c.x==5).order_by(t.c.y).limit(0).offset(0)
# render the LIMIT of zero, but not the OFFSET
# of zero, so produces TOP 0
self.assert_compile(
s,
"SELECT TOP 0 t.x, t.y FROM t "
"WHERE t.x = :x_1 ORDER BY t.y",
checkparams={u'x_1': 5}
)
def test_sequence_start_0(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, Sequence('', 0), primary_key=True))
self.assert_compile(schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL IDENTITY(0,1), "
"PRIMARY KEY (id))"
)
def test_sequence_non_primary_key(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, Sequence(''), primary_key=False))
self.assert_compile(schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL IDENTITY(1,1))"
)
def test_sequence_ignore_nullability(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, Sequence(''), nullable=True))
self.assert_compile(schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL IDENTITY(1,1))"
)
def test_index_clustering(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer))
idx = Index("foo", tbl.c.id, mssql_clustered=True)
self.assert_compile(schema.CreateIndex(idx),
"CREATE CLUSTERED INDEX foo ON test (id)"
)
def test_index_ordering(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('x', Integer), Column('y', Integer), Column('z', Integer))
idx = Index("foo", tbl.c.x.desc(), "y")
self.assert_compile(schema.CreateIndex(idx),
"CREATE INDEX foo ON test (x DESC, y)"
)
def test_index_extra_include_1(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('x', Integer), Column('y', Integer), Column('z', Integer))
idx = Index("foo", tbl.c.x, mssql_include=['y'])
self.assert_compile(schema.CreateIndex(idx),
"CREATE INDEX foo ON test (x) INCLUDE (y)"
)
def test_index_extra_include_2(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('x', Integer), Column('y', Integer), Column('z', Integer))
idx = Index("foo", tbl.c.x, mssql_include=[tbl.c.y])
self.assert_compile(schema.CreateIndex(idx),
"CREATE INDEX foo ON test (x) INCLUDE (y)"
)
class SchemaAliasingTest(fixtures.TestBase, AssertsCompiledSQL):
"""SQL server cannot reference schema-qualified tables in a SELECT statement, they
must be aliased.
"""
__dialect__ = mssql.dialect()
def setup(self):
metadata = MetaData()
self.t1 = table('t1',
column('a', Integer),
column('b', String),
column('c', String),
)
self.t2 = Table(
't2', metadata,
Column("a", Integer),
Column("b", Integer),
Column("c", Integer),
schema = 'schema'
)
def test_result_map(self):
s = self.t2.select()
c = s.compile(dialect=self.__dialect__)
assert self.t2.c.a in set(c.result_map['a'][1])
def test_result_map_use_labels(self):
s = self.t2.select(use_labels=True)
c = s.compile(dialect=self.__dialect__)
assert self.t2.c.a in set(c.result_map['schema_t2_a'][1])
def test_straight_select(self):
self.assert_compile(self.t2.select(),
"SELECT t2_1.a, t2_1.b, t2_1.c FROM [schema].t2 AS t2_1"
)
def test_straight_select_use_labels(self):
self.assert_compile(
self.t2.select(use_labels=True),
"SELECT t2_1.a AS schema_t2_a, t2_1.b AS schema_t2_b, "
"t2_1.c AS schema_t2_c FROM [schema].t2 AS t2_1"
)
def test_join_to_schema(self):
t1, t2 = self.t1, self.t2
self.assert_compile(
t1.join(t2, t1.c.a==t2.c.a).select(),
"SELECT t1.a, t1.b, t1.c, t2_1.a, t2_1.b, t2_1.c FROM t1 "
"JOIN [schema].t2 AS t2_1 ON t2_1.a = t1.a"
)
def test_union_schema_to_non(self):
t1, t2 = self.t1, self.t2
s = select([t2.c.a, t2.c.b]).apply_labels().\
union(
select([t1.c.a, t1.c.b]).apply_labels()
).alias().select()
self.assert_compile(
s,
"SELECT anon_1.schema_t2_a, anon_1.schema_t2_b FROM "
"(SELECT t2_1.a AS schema_t2_a, t2_1.b AS schema_t2_b "
"FROM [schema].t2 AS t2_1 UNION SELECT t1.a AS t1_a, "
"t1.b AS t1_b FROM t1) AS anon_1"
)
def test_column_subquery_to_alias(self):
a1 = self.t2.alias('a1')
s = select([self.t2, select([a1.c.a]).as_scalar()])
self.assert_compile(
s,
"SELECT t2_1.a, t2_1.b, t2_1.c, "
"(SELECT a1.a FROM [schema].t2 AS a1) "
"AS anon_1 FROM [schema].t2 AS t2_1"
)
class IdentityInsertTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = 'mssql'
__dialect__ = mssql.MSDialect()
@classmethod
def setup_class(cls):
global metadata, cattable
metadata = MetaData(testing.db)
cattable = Table('cattable', metadata,
Column('id', Integer),
Column('description', String(50)),
PrimaryKeyConstraint('id', name='PK_cattable'),
)
def setup(self):
metadata.create_all()
def teardown(self):
metadata.drop_all()
def test_compiled(self):
self.assert_compile(cattable.insert().values(id=9,
description='Python'),
'INSERT INTO cattable (id, description) '
'VALUES (:id, :description)')
def test_execute(self):
cattable.insert().values(id=9, description='Python').execute()
cats = cattable.select().order_by(cattable.c.id).execute()
eq_([(9, 'Python')], list(cats))
result = cattable.insert().values(description='PHP').execute()
eq_([10], result.inserted_primary_key)
lastcat = cattable.select().order_by(desc(cattable.c.id)).execute()
eq_((10, 'PHP'), lastcat.first())
def test_executemany(self):
cattable.insert().execute([{'id': 89, 'description': 'Python'},
{'id': 8, 'description': 'Ruby'},
{'id': 3, 'description': 'Perl'},
{'id': 1, 'description': 'Java'}])
cats = cattable.select().order_by(cattable.c.id).execute()
eq_([(1, 'Java'), (3, 'Perl'), (8, 'Ruby'), (89, 'Python')],
list(cats))
cattable.insert().execute([{'description': 'PHP'},
{'description': 'Smalltalk'}])
lastcats = \
cattable.select().order_by(desc(cattable.c.id)).limit(2).execute()
eq_([(91, 'Smalltalk'), (90, 'PHP')], list(lastcats))
class ReflectionTest(fixtures.TestBase, ComparesTables):
__only_on__ = 'mssql'
@testing.provide_metadata
def test_basic_reflection(self):
meta = self.metadata
users = Table(
'engine_users',
meta,
Column('user_id', types.INT, primary_key=True),
Column('user_name', types.VARCHAR(20), nullable=False),
Column('test1', types.CHAR(5), nullable=False),
Column('test2', types.Float(5), nullable=False),
Column('test3', types.Text),
Column('test4', types.Numeric, nullable=False),
Column('test5', types.DateTime),
Column('parent_user_id', types.Integer,
ForeignKey('engine_users.user_id')),
Column('test6', types.DateTime, nullable=False),
Column('test7', types.Text),
Column('test8', types.LargeBinary),
Column('test_passivedefault2', types.Integer,
server_default='5'),
Column('test9', types.BINARY(100)),
Column('test_numeric', types.Numeric()),
)
addresses = Table(
'engine_email_addresses',
meta,
Column('address_id', types.Integer, primary_key=True),
Column('remote_user_id', types.Integer,
ForeignKey(users.c.user_id)),
Column('email_address', types.String(20)),
)
meta.create_all()
meta2 = MetaData()
reflected_users = Table('engine_users', meta2,
autoload=True,
autoload_with=testing.db)
reflected_addresses = Table('engine_email_addresses',
meta2, autoload=True, autoload_with=testing.db)
self.assert_tables_equal(users, reflected_users)
self.assert_tables_equal(addresses, reflected_addresses)
@testing.provide_metadata
def test_identity(self):
metadata = self.metadata
table = Table(
'identity_test', metadata,
Column('col1', Integer, Sequence('fred', 2, 3), primary_key=True)
)
table.create()
meta2 = MetaData(testing.db)
table2 = Table('identity_test', meta2, autoload=True)
sequence = isinstance(table2.c['col1'].default, schema.Sequence) \
and table2.c['col1'].default
assert sequence.start == 2
assert sequence.increment == 3
@testing.emits_warning("Did not recognize")
@testing.provide_metadata
def test_skip_types(self):
metadata = self.metadata
testing.db.execute("""
create table foo (id integer primary key, data xml)
""")
t1 = Table('foo', metadata, autoload=True)
assert isinstance(t1.c.id.type, Integer)
assert isinstance(t1.c.data.type, types.NullType)
@testing.provide_metadata
def test_db_qualified_items(self):
metadata = self.metadata
Table('foo', metadata, Column('id', Integer, primary_key=True))
Table('bar', metadata,
Column('id', Integer, primary_key=True),
Column('foo_id', Integer, ForeignKey('foo.id', name="fkfoo"))
)
metadata.create_all()
dbname = testing.db.scalar("select db_name()")
owner = testing.db.scalar("SELECT user_name()")
inspector = inspect(testing.db)
bar_via_db = inspector.get_foreign_keys(
"bar", schema="%s.%s" % (dbname, owner))
eq_(
bar_via_db,
[{
'referred_table': 'foo',
'referred_columns': ['id'],
'referred_schema': 'test.dbo',
'name': 'fkfoo',
'constrained_columns': ['foo_id']}]
)
assert testing.db.has_table("bar", schema="test.dbo")
m2 = MetaData()
Table('bar', m2, schema="test.dbo", autoload=True,
autoload_with=testing.db)
eq_(m2.tables["test.dbo.foo"].schema, "test.dbo")
@testing.provide_metadata
def test_indexes_cols(self):
metadata = self.metadata
t1 = Table('t', metadata, Column('x', Integer), Column('y', Integer))
Index('foo', t1.c.x, t1.c.y)
metadata.create_all()
m2 = MetaData()
t2 = Table('t', m2, autoload=True, autoload_with=testing.db)
eq_(
set(list(t2.indexes)[0].columns),
set([t2.c['x'], t2.c.y])
)
@testing.provide_metadata
def test_indexes_cols_with_commas(self):
metadata = self.metadata
t1 = Table('t', metadata,
Column('x, col', Integer, key='x'),
Column('y', Integer)
)
Index('foo', t1.c.x, t1.c.y)
metadata.create_all()
m2 = MetaData()
t2 = Table('t', m2, autoload=True, autoload_with=testing.db)
eq_(
set(list(t2.indexes)[0].columns),
set([t2.c['x, col'], t2.c.y])
)
@testing.provide_metadata
def test_indexes_cols_with_spaces(self):
metadata = self.metadata
t1 = Table('t', metadata, Column('x col', Integer, key='x'),
Column('y', Integer))
Index('foo', t1.c.x, t1.c.y)
metadata.create_all()
m2 = MetaData()
t2 = Table('t', m2, autoload=True, autoload_with=testing.db)
eq_(
set(list(t2.indexes)[0].columns),
set([t2.c['x col'], t2.c.y])
)
class QueryUnicodeTest(fixtures.TestBase):
__only_on__ = 'mssql'
def test_convert_unicode(self):
meta = MetaData(testing.db)
t1 = Table('unitest_table', meta, Column('id', Integer,
primary_key=True), Column('descr',
mssql.MSText(convert_unicode=True)))
meta.create_all()
con = testing.db.connect()
# encode in UTF-8 (sting object) because this is the default
# dialect encoding
con.execute(u"insert into unitest_table values ('bien u\
umang\xc3\xa9')".encode('UTF-8'))
try:
r = t1.select().execute().first()
assert isinstance(r[1], unicode), \
'%s is %s instead of unicode, working on %s' % (r[1],
type(r[1]), meta.bind)
finally:
meta.drop_all()
from sqlalchemy.testing.assertsql import ExactSQL
class QueryTest(testing.AssertsExecutionResults, fixtures.TestBase):
__only_on__ = 'mssql'
def test_fetchid_trigger(self):
"""
Verify identity return value on inserting to a trigger table.
MSSQL's OUTPUT INSERTED clause does not work for the
case of a table having an identity (autoincrement)
primary key column, and which also has a trigger configured
to fire upon each insert and subsequently perform an
insert into a different table.
SQLALchemy's MSSQL dialect by default will attempt to
use an OUTPUT_INSERTED clause, which in this case will
raise the following error:
ProgrammingError: (ProgrammingError) ('42000', 334,
"[Microsoft][SQL Server Native Client 10.0][SQL Server]The
target table 't1' of the DML statement cannot have any enabled
triggers if the statement contains an OUTPUT clause without
INTO clause.", 7748) 'INSERT INTO t1 (descr) OUTPUT inserted.id
VALUES (?)' ('hello',)
This test verifies a workaround, which is to rely on the
older SCOPE_IDENTITY() call, which still works for this scenario.
To enable the workaround, the Table must be instantiated
with the init parameter 'implicit_returning = False'.
"""
#todo: this same test needs to be tried in a multithreaded context
# with multiple threads inserting to the same table.
#todo: check whether this error also occurs with clients other
# than the SQL Server Native Client. Maybe an assert_raises
# test should be written.
meta = MetaData(testing.db)
t1 = Table('t1', meta,
Column('id', Integer, Sequence('fred', 100, 1),
primary_key=True),
Column('descr', String(200)),
# the following flag will prevent the
# MSSQLCompiler.returning_clause from getting called,
# though the ExecutionContext will still have a
# _select_lastrowid, so the SELECT SCOPE_IDENTITY() will
# hopefully be called instead.
implicit_returning = False
)
t2 = Table('t2', meta,
Column('id', Integer, Sequence('fred', 200, 1),
primary_key=True),
Column('descr', String(200)))
meta.create_all()
con = testing.db.connect()
con.execute("""create trigger paj on t1 for insert as
insert into t2 (descr) select descr from inserted""")
try:
tr = con.begin()
r = con.execute(t2.insert(), descr='hello')
self.assert_(r.inserted_primary_key == [200])
r = con.execute(t1.insert(), descr='hello')
self.assert_(r.inserted_primary_key == [100])
finally:
tr.commit()
con.execute("""drop trigger paj""")
meta.drop_all()
@testing.provide_metadata
def test_disable_scope_identity(self):
engine = engines.testing_engine(options={"use_scope_identity":False})
metadata = self.metadata
metadata.bind = engine
t1 = Table('t1', metadata,
Column('id', Integer, primary_key=True),
implicit_returning=False
)
metadata.create_all()
self.assert_sql_execution(
testing.db,
lambda: engine.execute(t1.insert()),
ExactSQL("INSERT INTO t1 DEFAULT VALUES"),
# we dont have an event for
# "SELECT @@IDENTITY" part here.
# this will be in 0.8 with #2459
)
assert not engine.dialect.use_scope_identity
def test_insertid_schema(self):
meta = MetaData(testing.db)
con = testing.db.connect()
con.execute('create schema paj')
tbl = Table('test', meta,
Column('id', Integer, primary_key=True), schema='paj')
tbl.create()
try:
tbl.insert().execute({'id':1})
finally:
tbl.drop()
con.execute('drop schema paj')
def test_returning_no_autoinc(self):
meta = MetaData(testing.db)
table = Table('t1', meta, Column('id', Integer,
primary_key=True), Column('data', String(50)))
table.create()
try:
result = table.insert().values(id=1,
data=func.lower('SomeString'
)).returning(table.c.id, table.c.data).execute()
eq_(result.fetchall(), [(1, 'somestring')])
finally:
# this will hang if the "SET IDENTITY_INSERT t1 OFF" occurs
# before the result is fetched
table.drop()
def test_delete_schema(self):
meta = MetaData(testing.db)
con = testing.db.connect()
con.execute('create schema paj')
tbl = Table('test', meta, Column('id', Integer,
primary_key=True), schema='paj')
tbl.create()
try:
tbl.insert().execute({'id': 1})
tbl.delete(tbl.c.id == 1).execute()
finally:
tbl.drop()
con.execute('drop schema paj')
def test_insertid_reserved(self):
meta = MetaData(testing.db)
table = Table(
'select', meta,
Column('col', Integer, primary_key=True)
)
table.create()
meta2 = MetaData(testing.db)
try:
table.insert().execute(col=7)
finally:
table.drop()
class Foo(object):
def __init__(self, **kw):
for k in kw:
setattr(self, k, kw[k])
class GenerativeQueryTest(fixtures.TestBase):
__only_on__ = 'mssql'
@classmethod
def setup_class(cls):
global foo, metadata
metadata = MetaData(testing.db)
foo = Table('foo', metadata,
Column('id', Integer, Sequence('foo_id_seq'),
primary_key=True),
Column('bar', Integer),
Column('range', Integer))
mapper(Foo, foo)
metadata.create_all()
sess = create_session(bind=testing.db)
for i in range(100):
sess.add(Foo(bar=i, range=i%10))
sess.flush()
@classmethod
def teardown_class(cls):
metadata.drop_all()
clear_mappers()
def test_slice_mssql(self):
sess = create_session(bind=testing.db)
query = sess.query(Foo)
orig = query.all()
assert list(query[:10]) == orig[:10]
assert list(query[:10]) == orig[:10]
class SchemaTest(fixtures.TestBase):
def setup(self):
t = Table('sometable', MetaData(),
Column('pk_column', Integer),
Column('test_column', String)
)
self.column = t.c.test_column
dialect = mssql.dialect()
self.ddl_compiler = dialect.ddl_compiler(dialect,
schema.CreateTable(t))
def _column_spec(self):
return self.ddl_compiler.get_column_specification(self.column)
def test_that_mssql_default_nullability_emits_null(self):
eq_("test_column VARCHAR(max) NULL", self._column_spec())
def test_that_mssql_none_nullability_does_not_emit_nullability(self):
self.column.nullable = None
eq_("test_column VARCHAR(max)", self._column_spec())
def test_that_mssql_specified_nullable_emits_null(self):
self.column.nullable = True
eq_("test_column VARCHAR(max) NULL", self._column_spec())
def test_that_mssql_specified_not_nullable_emits_not_null(self):
self.column.nullable = False
eq_("test_column VARCHAR(max) NOT NULL", self._column_spec())
def full_text_search_missing():
"""Test if full text search is not implemented and return False if
it is and True otherwise."""
try:
connection = testing.db.connect()
try:
connection.execute('CREATE FULLTEXT CATALOG Catalog AS '
'DEFAULT')
return False
except:
return True
finally:
connection.close()
class MatchTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = 'mssql'
__skip_if__ = full_text_search_missing,
@classmethod
def setup_class(cls):
global metadata, cattable, matchtable
metadata = MetaData(testing.db)
cattable = Table('cattable', metadata, Column('id', Integer),
Column('description', String(50)),
PrimaryKeyConstraint('id', name='PK_cattable'))
matchtable = Table(
'matchtable',
metadata,
Column('id', Integer),
Column('title', String(200)),
Column('category_id', Integer, ForeignKey('cattable.id')),
PrimaryKeyConstraint('id', name='PK_matchtable'),
)
DDL("""CREATE FULLTEXT INDEX
ON cattable (description)
KEY INDEX PK_cattable""").execute_at('after-create'
, matchtable)
DDL("""CREATE FULLTEXT INDEX
ON matchtable (title)
KEY INDEX PK_matchtable""").execute_at('after-create'
, matchtable)
metadata.create_all()
cattable.insert().execute([{'id': 1, 'description': 'Python'},
{'id': 2, 'description': 'Ruby'}])
matchtable.insert().execute([{'id': 1, 'title'
: 'Agile Web Development with Rails'
, 'category_id': 2}, {'id': 2,
'title': 'Dive Into Python',
'category_id': 1}, {'id': 3, 'title'
: "Programming Matz's Ruby",
'category_id': 2}, {'id': 4, 'title'
: 'The Definitive Guide to Django',
'category_id': 1}, {'id': 5, 'title'
: 'Python in a Nutshell',
'category_id': 1}])
DDL("WAITFOR DELAY '00:00:05'"
).execute(bind=engines.testing_engine())
@classmethod
def teardown_class(cls):
metadata.drop_all()
connection = testing.db.connect()
connection.execute("DROP FULLTEXT CATALOG Catalog")
connection.close()
def test_expression(self):
self.assert_compile(matchtable.c.title.match('somstr'),
'CONTAINS (matchtable.title, ?)')
def test_simple_match(self):
results = \
matchtable.select().where(matchtable.c.title.match('python'
)).order_by(matchtable.c.id).execute().fetchall()
eq_([2, 5], [r.id for r in results])
def test_simple_match_with_apostrophe(self):
results = \
matchtable.select().where(matchtable.c.title.match("Matz's"
)).execute().fetchall()
eq_([3], [r.id for r in results])
def test_simple_prefix_match(self):
results = \
matchtable.select().where(matchtable.c.title.match('"nut*"'
)).execute().fetchall()
eq_([5], [r.id for r in results])
def test_simple_inflectional_match(self):
results = \
matchtable.select().where(
matchtable.c.title.match('FORMSOF(INFLECTIONAL, "dives")'
)).execute().fetchall()
eq_([2], [r.id for r in results])
def test_or_match(self):
results1 = \
matchtable.select().where(or_(matchtable.c.title.match('nutshell'
), matchtable.c.title.match('ruby'
))).order_by(matchtable.c.id).execute().fetchall()
eq_([3, 5], [r.id for r in results1])
results2 = \
matchtable.select().where(
matchtable.c.title.match('nutshell OR ruby'
)).order_by(matchtable.c.id).execute().fetchall()
eq_([3, 5], [r.id for r in results2])
def test_and_match(self):
results1 = \
matchtable.select().where(and_(matchtable.c.title.match('python'
), matchtable.c.title.match('nutshell'
))).execute().fetchall()
eq_([5], [r.id for r in results1])
results2 = \
matchtable.select().where(
matchtable.c.title.match('python AND nutshell'
)).execute().fetchall()
eq_([5], [r.id for r in results2])
def test_match_across_joins(self):
results = matchtable.select().where(and_(cattable.c.id
== matchtable.c.category_id,
or_(cattable.c.description.match('Ruby'),
matchtable.c.title.match('nutshell'
)))).order_by(matchtable.c.id).execute().fetchall()
eq_([1, 3, 5], [r.id for r in results])
class ParseConnectTest(fixtures.TestBase):
def test_pyodbc_connect_dsn_trusted(self):
dialect = pyodbc.dialect()
u = url.make_url('mssql://mydsn')
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;Trusted_Connection=Yes'], {}], connection)
def test_pyodbc_connect_old_style_dsn_trusted(self):
dialect = pyodbc.dialect()
u = url.make_url('mssql:///?dsn=mydsn')
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;Trusted_Connection=Yes'], {}], connection)
def test_pyodbc_connect_dsn_non_trusted(self):
dialect = pyodbc.dialect()
u = url.make_url('mssql://username:password@mydsn')
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;UID=username;PWD=password'], {}], connection)
def test_pyodbc_connect_dsn_extra(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql://username:password@mydsn/?LANGUAGE=us_'
'english&foo=bar')
connection = dialect.create_connect_args(u)
dsn_string = connection[0][0]
assert ";LANGUAGE=us_english" in dsn_string
assert ";foo=bar" in dsn_string
def test_pyodbc_connect(self):
dialect = pyodbc.dialect()
u = url.make_url('mssql://username:password@hostspec/database')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UI'
'D=username;PWD=password'], {}], connection)
def test_pyodbc_connect_comma_port(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql://username:password@hostspec:12345/data'
'base')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec,12345;Database=datab'
'ase;UID=username;PWD=password'], {}], connection)
def test_pyodbc_connect_config_port(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql://username:password@hostspec/database?p'
'ort=12345')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UI'
'D=username;PWD=password;port=12345'], {}], connection)
def test_pyodbc_extra_connect(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql://username:password@hostspec/database?L'
'ANGUAGE=us_english&foo=bar')
connection = dialect.create_connect_args(u)
eq_(connection[1], {})
eq_(connection[0][0]
in ('DRIVER={SQL Server};Server=hostspec;Database=database;'
'UID=username;PWD=password;foo=bar;LANGUAGE=us_english',
'DRIVER={SQL Server};Server=hostspec;Database=database;UID='
'username;PWD=password;LANGUAGE=us_english;foo=bar'), True)
def test_pyodbc_odbc_connect(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql:///?odbc_connect=DRIVER%3D%7BSQL+Server'
'%7D%3BServer%3Dhostspec%3BDatabase%3Ddatabase'
'%3BUID%3Dusername%3BPWD%3Dpassword')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UI'
'D=username;PWD=password'], {}], connection)
def test_pyodbc_odbc_connect_with_dsn(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql:///?odbc_connect=dsn%3Dmydsn%3BDatabase'
'%3Ddatabase%3BUID%3Dusername%3BPWD%3Dpassword'
)
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;Database=database;UID=username;PWD=password'],
{}], connection)
def test_pyodbc_odbc_connect_ignores_other_values(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql://userdiff:passdiff@localhost/dbdiff?od'
'bc_connect=DRIVER%3D%7BSQL+Server%7D%3BServer'
'%3Dhostspec%3BDatabase%3Ddatabase%3BUID%3Duse'
'rname%3BPWD%3Dpassword')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UI'
'D=username;PWD=password'], {}], connection)
def test_pymssql_port_setting(self):
dialect = pymssql.dialect()
u = \
url.make_url('mssql+pymssql://scott:tiger@somehost/test')
connection = dialect.create_connect_args(u)
eq_(
[[], {'host': 'somehost', 'password': 'tiger',
'user': 'scott', 'database': 'test'}], connection
)
u = \
url.make_url('mssql+pymssql://scott:tiger@somehost:5000/test')
connection = dialect.create_connect_args(u)
eq_(
[[], {'host': 'somehost:5000', 'password': 'tiger',
'user': 'scott', 'database': 'test'}], connection
)
def test_pymssql_disconnect(self):
dialect = pymssql.dialect()
for error in [
'Adaptive Server connection timed out',
'message 20003',
"Error 10054",
"Not connected to any MS SQL server",
"Connection is closed"
]:
eq_(dialect.is_disconnect(error, None, None), True)
eq_(dialect.is_disconnect("not an error", None, None), False)
@testing.only_on(['mssql+pyodbc', 'mssql+pymssql'],
"FreeTDS specific test")
def test_bad_freetds_warning(self):
engine = engines.testing_engine()
def _bad_version(connection):
return 95, 10, 255
engine.dialect._get_server_version_info = _bad_version
assert_raises_message(exc.SAWarning,
'Unrecognized server version info',
engine.connect)
class TimeTypeTest(fixtures.TestBase):
def test_result_processor_no_microseconds(self):
expected = datetime.time(12, 34, 56)
self._assert_result_processor(expected, '12:34:56')
def test_result_processor_too_many_microseconds(self):
# microsecond must be in 0..999999, should truncate (6 vs 7 digits)
expected = datetime.time(12, 34, 56, 123456)
self._assert_result_processor(expected, '12:34:56.1234567')
def _assert_result_processor(self, expected, value):
mssql_time_type = TIME()
result_processor = mssql_time_type.result_processor(None, None)
eq_(expected, result_processor(value))
class TypeDDLTest(fixtures.TestBase):
def test_boolean(self):
"Exercise type specification for boolean type."
columns = [
# column type, args, kwargs, expected ddl
(Boolean, [], {},
'BIT'),
]
metadata = MetaData()
table_args = ['test_mssql_boolean', metadata]
for index, spec in enumerate(columns):
type_, args, kw, res = spec
table_args.append(
Column('c%s' % index, type_(*args, **kw), nullable=None))
boolean_table = Table(*table_args)
dialect = mssql.dialect()
gen = dialect.ddl_compiler(dialect, schema.CreateTable(boolean_table))
for col in boolean_table.c:
index = int(col.name[1:])
testing.eq_(gen.get_column_specification(col),
"%s %s" % (col.name, columns[index][3]))
self.assert_(repr(col))
def test_numeric(self):
"Exercise type specification and options for numeric types."
columns = [
# column type, args, kwargs, expected ddl
(types.NUMERIC, [], {},
'NUMERIC'),
(types.NUMERIC, [None], {},
'NUMERIC'),
(types.NUMERIC, [12, 4], {},
'NUMERIC(12, 4)'),
(types.Float, [], {},
'FLOAT'),
(types.Float, [None], {},
'FLOAT'),
(types.Float, [12], {},
'FLOAT(12)'),
(mssql.MSReal, [], {},
'REAL'),
(types.Integer, [], {},
'INTEGER'),
(types.BigInteger, [], {},
'BIGINT'),
(mssql.MSTinyInteger, [], {},
'TINYINT'),
(types.SmallInteger, [], {},
'SMALLINT'),
]
metadata = MetaData()
table_args = ['test_mssql_numeric', metadata]
for index, spec in enumerate(columns):
type_, args, kw, res = spec
table_args.append(
Column('c%s' % index, type_(*args, **kw), nullable=None))
numeric_table = Table(*table_args)
dialect = mssql.dialect()
gen = dialect.ddl_compiler(dialect, schema.CreateTable(numeric_table))
for col in numeric_table.c:
index = int(col.name[1:])
testing.eq_(gen.get_column_specification(col),
"%s %s" % (col.name, columns[index][3]))
self.assert_(repr(col))
def test_char(self):
"""Exercise COLLATE-ish options on string types."""
columns = [
(mssql.MSChar, [], {},
'CHAR'),
(mssql.MSChar, [1], {},
'CHAR(1)'),
(mssql.MSChar, [1], {'collation': 'Latin1_General_CI_AS'},
'CHAR(1) COLLATE Latin1_General_CI_AS'),
(mssql.MSNChar, [], {},
'NCHAR'),
(mssql.MSNChar, [1], {},
'NCHAR(1)'),
(mssql.MSNChar, [1], {'collation': 'Latin1_General_CI_AS'},
'NCHAR(1) COLLATE Latin1_General_CI_AS'),
(mssql.MSString, [], {},
'VARCHAR(max)'),
(mssql.MSString, [1], {},
'VARCHAR(1)'),
(mssql.MSString, [1], {'collation': 'Latin1_General_CI_AS'},
'VARCHAR(1) COLLATE Latin1_General_CI_AS'),
(mssql.MSNVarchar, [], {},
'NVARCHAR(max)'),
(mssql.MSNVarchar, [1], {},
'NVARCHAR(1)'),
(mssql.MSNVarchar, [1], {'collation': 'Latin1_General_CI_AS'},
'NVARCHAR(1) COLLATE Latin1_General_CI_AS'),
(mssql.MSText, [], {},
'TEXT'),
(mssql.MSText, [], {'collation': 'Latin1_General_CI_AS'},
'TEXT COLLATE Latin1_General_CI_AS'),
(mssql.MSNText, [], {},
'NTEXT'),
(mssql.MSNText, [], {'collation': 'Latin1_General_CI_AS'},
'NTEXT COLLATE Latin1_General_CI_AS'),
]
metadata = MetaData()
table_args = ['test_mssql_charset', metadata]
for index, spec in enumerate(columns):
type_, args, kw, res = spec
table_args.append(
Column('c%s' % index, type_(*args, **kw), nullable=None))
charset_table = Table(*table_args)
dialect = mssql.dialect()
gen = dialect.ddl_compiler(dialect, schema.CreateTable(charset_table))
for col in charset_table.c:
index = int(col.name[1:])
testing.eq_(gen.get_column_specification(col),
"%s %s" % (col.name, columns[index][3]))
self.assert_(repr(col))
def test_timestamp(self):
"""Exercise TIMESTAMP column."""
dialect = mssql.dialect()
metadata = MetaData()
spec, expected = (TIMESTAMP, 'TIMESTAMP')
t = Table('mssql_ts', metadata,
Column('id', Integer, primary_key=True),
Column('t', spec, nullable=None))
gen = dialect.ddl_compiler(dialect, schema.CreateTable(t))
testing.eq_(gen.get_column_specification(t.c.t), "t %s" % expected)
self.assert_(repr(t.c.t))
def test_money(self):
"""Exercise type specification for money types."""
columns = [(mssql.MSMoney, [], {}, 'MONEY'),
(mssql.MSSmallMoney, [], {}, 'SMALLMONEY')]
metadata = MetaData()
table_args = ['test_mssql_money', metadata]
for index, spec in enumerate(columns):
type_, args, kw, res = spec
table_args.append(Column('c%s' % index, type_(*args, **kw),
nullable=None))
money_table = Table(*table_args)
dialect = mssql.dialect()
gen = dialect.ddl_compiler(dialect,
schema.CreateTable(money_table))
for col in money_table.c:
index = int(col.name[1:])
testing.eq_(gen.get_column_specification(col), '%s %s'
% (col.name, columns[index][3]))
self.assert_(repr(col))
def test_binary(self):
"Exercise type specification for binary types."
columns = [
# column type, args, kwargs, expected ddl
(mssql.MSBinary, [], {},
'BINARY'),
(mssql.MSBinary, [10], {},
'BINARY(10)'),
(types.BINARY, [], {},
'BINARY'),
(types.BINARY, [10], {},
'BINARY(10)'),
(mssql.MSVarBinary, [], {},
'VARBINARY(max)'),
(mssql.MSVarBinary, [10], {},
'VARBINARY(10)'),
(types.VARBINARY, [10], {},
'VARBINARY(10)'),
(types.VARBINARY, [], {},
'VARBINARY(max)'),
(mssql.MSImage, [], {},
'IMAGE'),
(mssql.IMAGE, [], {},
'IMAGE'),
(types.LargeBinary, [], {},
'IMAGE'),
]
metadata = MetaData()
table_args = ['test_mssql_binary', metadata]
for index, spec in enumerate(columns):
type_, args, kw, res = spec
table_args.append(Column('c%s' % index, type_(*args, **kw),
nullable=None))
binary_table = Table(*table_args)
dialect = mssql.dialect()
gen = dialect.ddl_compiler(dialect,
schema.CreateTable(binary_table))
for col in binary_table.c:
index = int(col.name[1:])
testing.eq_(gen.get_column_specification(col), '%s %s'
% (col.name, columns[index][3]))
self.assert_(repr(col))
class TypeRoundTripTest(fixtures.TestBase, AssertsExecutionResults, ComparesTables):
__only_on__ = 'mssql'
@classmethod
def setup_class(cls):
global metadata
metadata = MetaData(testing.db)
def teardown(self):
metadata.drop_all()
@testing.fails_on_everything_except('mssql+pyodbc',
'this is some pyodbc-specific feature')
def test_decimal_notation(self):
numeric_table = Table('numeric_table', metadata, Column('id',
Integer, Sequence('numeric_id_seq',
optional=True), primary_key=True),
Column('numericcol',
Numeric(precision=38, scale=20,
asdecimal=True)))
metadata.create_all()
test_items = [decimal.Decimal(d) for d in (
'1500000.00000000000000000000',
'-1500000.00000000000000000000',
'1500000',
'0.0000000000000000002',
'0.2',
'-0.0000000000000000002',
'-2E-2',
'156666.458923543',
'-156666.458923543',
'1',
'-1',
'-1234',
'1234',
'2E-12',
'4E8',
'3E-6',
'3E-7',
'4.1',
'1E-1',
'1E-2',
'1E-3',
'1E-4',
'1E-5',
'1E-6',
'1E-7',
'1E-1',
'1E-8',
'0.2732E2',
'-0.2432E2',
'4.35656E2',
'-02452E-2',
'45125E-2',
'1234.58965E-2',
'1.521E+15',
'-1E-25',
'1E-25',
'1254E-25',
'-1203E-25',
'0',
'-0.00',
'-0',
'4585E12',
'000000000000000000012',
'000000000000.32E12',
'00000000000000.1E+12',
'000000000000.2E-32',
)]
for value in test_items:
numeric_table.insert().execute(numericcol=value)
for value in select([numeric_table.c.numericcol]).execute():
assert value[0] in test_items, "%r not in test_items" % value[0]
def test_float(self):
float_table = Table('float_table', metadata, Column('id',
Integer, Sequence('numeric_id_seq',
optional=True), primary_key=True),
Column('floatcol', Float()))
metadata.create_all()
try:
test_items = [float(d) for d in (
'1500000.00000000000000000000',
'-1500000.00000000000000000000',
'1500000',
'0.0000000000000000002',
'0.2',
'-0.0000000000000000002',
'156666.458923543',
'-156666.458923543',
'1',
'-1',
'1234',
'2E-12',
'4E8',
'3E-6',
'3E-7',
'4.1',
'1E-1',
'1E-2',
'1E-3',
'1E-4',
'1E-5',
'1E-6',
'1E-7',
'1E-8',
)]
for value in test_items:
float_table.insert().execute(floatcol=value)
except Exception, e:
raise e
# todo this should suppress warnings, but it does not
@emits_warning_on('mssql+mxodbc', r'.*does not have any indexes.*')
def test_dates(self):
"Exercise type specification for date types."
columns = [
# column type, args, kwargs, expected ddl
(mssql.MSDateTime, [], {},
'DATETIME', []),
(types.DATE, [], {},
'DATE', ['>=', (10,)]),
(types.Date, [], {},
'DATE', ['>=', (10,)]),
(types.Date, [], {},
'DATETIME', ['<', (10,)], mssql.MSDateTime),
(mssql.MSDate, [], {},
'DATE', ['>=', (10,)]),
(mssql.MSDate, [], {},
'DATETIME', ['<', (10,)], mssql.MSDateTime),
(types.TIME, [], {},
'TIME', ['>=', (10,)]),
(types.Time, [], {},
'TIME', ['>=', (10,)]),
(mssql.MSTime, [], {},
'TIME', ['>=', (10,)]),
(mssql.MSTime, [1], {},
'TIME(1)', ['>=', (10,)]),
(types.Time, [], {},
'DATETIME', ['<', (10,)], mssql.MSDateTime),
(mssql.MSTime, [], {},
'TIME', ['>=', (10,)]),
(mssql.MSSmallDateTime, [], {},
'SMALLDATETIME', []),
(mssql.MSDateTimeOffset, [], {},
'DATETIMEOFFSET', ['>=', (10,)]),
(mssql.MSDateTimeOffset, [1], {},
'DATETIMEOFFSET(1)', ['>=', (10,)]),
(mssql.MSDateTime2, [], {},
'DATETIME2', ['>=', (10,)]),
(mssql.MSDateTime2, [1], {},
'DATETIME2(1)', ['>=', (10,)]),
]
table_args = ['test_mssql_dates', metadata]
for index, spec in enumerate(columns):
type_, args, kw, res, requires = spec[0:5]
if requires and testing._is_excluded('mssql', *requires) \
or not requires:
c = Column('c%s' % index, type_(*args,
**kw), nullable=None)
testing.db.dialect.type_descriptor(c.type)
table_args.append(c)
dates_table = Table(*table_args)
gen = testing.db.dialect.ddl_compiler(testing.db.dialect,
schema.CreateTable(dates_table))
for col in dates_table.c:
index = int(col.name[1:])
testing.eq_(gen.get_column_specification(col), '%s %s'
% (col.name, columns[index][3]))
self.assert_(repr(col))
dates_table.create(checkfirst=True)
reflected_dates = Table('test_mssql_dates',
MetaData(testing.db), autoload=True)
for col in reflected_dates.c:
self.assert_types_base(col, dates_table.c[col.key])
def test_date_roundtrip(self):
t = Table('test_dates', metadata,
Column('id', Integer,
Sequence('datetest_id_seq', optional=True),
primary_key=True),
Column('adate', Date),
Column('atime', Time),
Column('adatetime', DateTime))
metadata.create_all()
d1 = datetime.date(2007, 10, 30)
t1 = datetime.time(11, 2, 32)
d2 = datetime.datetime(2007, 10, 30, 11, 2, 32)
t.insert().execute(adate=d1, adatetime=d2, atime=t1)
t.insert().execute(adate=d2, adatetime=d2, atime=d2)
x = t.select().execute().fetchall()[0]
self.assert_(x.adate.__class__ == datetime.date)
self.assert_(x.atime.__class__ == datetime.time)
self.assert_(x.adatetime.__class__ == datetime.datetime)
t.delete().execute()
t.insert().execute(adate=d1, adatetime=d2, atime=t1)
eq_(select([t.c.adate, t.c.atime, t.c.adatetime], t.c.adate
== d1).execute().fetchall(), [(d1, t1, d2)])
@emits_warning_on('mssql+mxodbc', r'.*does not have any indexes.*')
@testing.provide_metadata
def test_binary_reflection(self):
"Exercise type specification for binary types."
columns = [
# column type, args, kwargs, expected ddl
(mssql.MSBinary, [], {},
'BINARY'),
(mssql.MSBinary, [10], {},
'BINARY(10)'),
(types.BINARY, [], {},
'BINARY'),
(types.BINARY, [10], {},
'BINARY(10)'),
(mssql.MSVarBinary, [], {},
'VARBINARY(max)'),
(mssql.MSVarBinary, [10], {},
'VARBINARY(10)'),
(types.VARBINARY, [10], {},
'VARBINARY(10)'),
(types.VARBINARY, [], {},
'VARBINARY(max)'),
(mssql.MSImage, [], {},
'IMAGE'),
(mssql.IMAGE, [], {},
'IMAGE'),
(types.LargeBinary, [], {},
'IMAGE'),
]
metadata = self.metadata
table_args = ['test_mssql_binary', metadata]
for index, spec in enumerate(columns):
type_, args, kw, res = spec
table_args.append(Column('c%s' % index, type_(*args, **kw),
nullable=None))
binary_table = Table(*table_args)
metadata.create_all()
reflected_binary = Table('test_mssql_binary',
MetaData(testing.db), autoload=True)
for col in reflected_binary.c:
c1 = testing.db.dialect.type_descriptor(col.type).__class__
c2 = \
testing.db.dialect.type_descriptor(
binary_table.c[col.name].type).__class__
assert issubclass(c1, c2), '%r is not a subclass of %r' \
% (c1, c2)
if binary_table.c[col.name].type.length:
testing.eq_(col.type.length,
binary_table.c[col.name].type.length)
def test_autoincrement(self):
Table('ai_1', metadata,
Column('int_y', Integer, primary_key=True),
Column('int_n', Integer, DefaultClause('0'),
primary_key=True, autoincrement=False))
Table('ai_2', metadata,
Column('int_y', Integer, primary_key=True),
Column('int_n', Integer, DefaultClause('0'),
primary_key=True, autoincrement=False))
Table('ai_3', metadata,
Column('int_n', Integer, DefaultClause('0'),
primary_key=True, autoincrement=False),
Column('int_y', Integer, primary_key=True))
Table('ai_4', metadata,
Column('int_n', Integer, DefaultClause('0'),
primary_key=True, autoincrement=False),
Column('int_n2', Integer, DefaultClause('0'),
primary_key=True, autoincrement=False))
Table('ai_5', metadata,
Column('int_y', Integer, primary_key=True),
Column('int_n', Integer, DefaultClause('0'),
primary_key=True, autoincrement=False))
Table('ai_6', metadata,
Column('o1', String(1), DefaultClause('x'),
primary_key=True),
Column('int_y', Integer, primary_key=True))
Table('ai_7', metadata,
Column('o1', String(1), DefaultClause('x'),
primary_key=True),
Column('o2', String(1), DefaultClause('x'),
primary_key=True),
Column('int_y', Integer, primary_key=True))
Table('ai_8', metadata,
Column('o1', String(1), DefaultClause('x'),
primary_key=True),
Column('o2', String(1), DefaultClause('x'),
primary_key=True))
metadata.create_all()
table_names = ['ai_1', 'ai_2', 'ai_3', 'ai_4',
'ai_5', 'ai_6', 'ai_7', 'ai_8']
mr = MetaData(testing.db)
for name in table_names:
tbl = Table(name, mr, autoload=True)
tbl = metadata.tables[name]
for c in tbl.c:
if c.name.startswith('int_y'):
assert c.autoincrement, name
assert tbl._autoincrement_column is c, name
elif c.name.startswith('int_n'):
assert not c.autoincrement, name
assert tbl._autoincrement_column is not c, name
# mxodbc can't handle scope_identity() with DEFAULT VALUES
if testing.db.driver == 'mxodbc':
eng = \
[engines.testing_engine(options={'implicit_returning'
: True})]
else:
eng = \
[engines.testing_engine(options={'implicit_returning'
: False}),
engines.testing_engine(options={'implicit_returning'
: True})]
for counter, engine in enumerate(eng):
engine.execute(tbl.insert())
if 'int_y' in tbl.c:
assert engine.scalar(select([tbl.c.int_y])) \
== counter + 1
assert list(engine.execute(tbl.select()).first()).\
count(counter + 1) == 1
else:
assert 1 \
not in list(engine.execute(tbl.select()).first())
engine.execute(tbl.delete())
class MonkeyPatchedBinaryTest(fixtures.TestBase):
__only_on__ = 'mssql+pymssql'
def test_unicode(self):
module = __import__('pymssql')
result = module.Binary(u'foo')
eq_(result, u'foo')
def test_bytes(self):
module = __import__('pymssql')
input = b('\x80\x03]q\x00X\x03\x00\x00\x00oneq\x01a.')
expected_result = input
result = module.Binary(input)
eq_(result, expected_result)
class BinaryTest(fixtures.TestBase, AssertsExecutionResults):
"""Test the Binary and VarBinary types"""
__only_on__ = 'mssql'
@classmethod
def setup_class(cls):
global binary_table, MyPickleType
class MyPickleType(types.TypeDecorator):
impl = PickleType
def process_bind_param(self, value, dialect):
if value:
value.stuff = 'this is modified stuff'
return value
def process_result_value(self, value, dialect):
if value:
value.stuff = 'this is the right stuff'
return value
binary_table = Table(
'binary_table',
MetaData(testing.db),
Column('primary_id', Integer, Sequence('binary_id_seq',
optional=True), primary_key=True),
Column('data', mssql.MSVarBinary(8000)),
Column('data_image', mssql.MSImage),
Column('data_slice', types.BINARY(100)),
Column('misc', String(30)),
Column('pickled', PickleType),
Column('mypickle', MyPickleType),
)
binary_table.create()
def teardown(self):
binary_table.delete().execute()
@classmethod
def teardown_class(cls):
binary_table.drop()
def test_binary(self):
testobj1 = pickleable.Foo('im foo 1')
testobj2 = pickleable.Foo('im foo 2')
testobj3 = pickleable.Foo('im foo 3')
stream1 = self.load_stream('binary_data_one.dat')
stream2 = self.load_stream('binary_data_two.dat')
binary_table.insert().execute(
primary_id=1,
misc='binary_data_one.dat',
data=stream1,
data_image=stream1,
data_slice=stream1[0:100],
pickled=testobj1,
mypickle=testobj3,
)
binary_table.insert().execute(
primary_id=2,
misc='binary_data_two.dat',
data=stream2,
data_image=stream2,
data_slice=stream2[0:99],
pickled=testobj2,
)
# TODO: pyodbc does not seem to accept "None" for a VARBINARY
# column (data=None). error: [Microsoft][ODBC SQL Server
# Driver][SQL Server]Implicit conversion from data type varchar
# to varbinary is not allowed. Use the CONVERT function to run
# this query. (257) binary_table.insert().execute(primary_id=3,
# misc='binary_data_two.dat', data=None, data_image=None,
# data_slice=stream2[0:99], pickled=None)
binary_table.insert().execute(primary_id=3,
misc='binary_data_two.dat', data_image=None,
data_slice=stream2[0:99], pickled=None)
for stmt in \
binary_table.select(order_by=binary_table.c.primary_id), \
text('select * from binary_table order by '
'binary_table.primary_id',
typemap=dict(data=mssql.MSVarBinary(8000),
data_image=mssql.MSImage,
data_slice=types.BINARY(100), pickled=PickleType,
mypickle=MyPickleType), bind=testing.db):
l = stmt.execute().fetchall()
eq_(list(stream1), list(l[0]['data']))
paddedstream = list(stream1[0:100])
paddedstream.extend(['\x00'] * (100 - len(paddedstream)))
eq_(paddedstream, list(l[0]['data_slice']))
eq_(list(stream2), list(l[1]['data']))
eq_(list(stream2), list(l[1]['data_image']))
eq_(testobj1, l[0]['pickled'])
eq_(testobj2, l[1]['pickled'])
eq_(testobj3.moredata, l[0]['mypickle'].moredata)
eq_(l[0]['mypickle'].stuff, 'this is the right stuff')
def load_stream(self, name, len=3000):
fp = open(os.path.join(os.path.dirname(__file__), "..", name), 'rb')
stream = fp.read(len)
fp.close()
return stream
class InfoCoerceUnicodeTest(fixtures.TestBase):
def test_info_unicode_coercion(self):
from sqlalchemy.dialects.mssql.information_schema import CoerceUnicode
dialect = mssql.dialect()
value = CoerceUnicode().bind_processor(dialect)('a string')
assert isinstance(value, unicode)
class ReflectHugeViewTest(fixtures.TestBase):
__only_on__ = 'mssql'
def setup(self):
self.col_num = 150
self.metadata = MetaData(testing.db)
t = Table('base_table', self.metadata,
*[
Column("long_named_column_number_%d" % i, Integer)
for i in xrange(self.col_num)
]
)
self.view_str = view_str = \
"CREATE VIEW huge_named_view AS SELECT %s FROM base_table" % (
",".join("long_named_column_number_%d" % i
for i in xrange(self.col_num))
)
assert len(view_str) > 4000
event.listen(t, 'after_create', DDL(view_str) )
event.listen(t, 'before_drop', DDL("DROP VIEW huge_named_view") )
self.metadata.create_all()
def teardown(self):
self.metadata.drop_all()
def test_inspect_view_definition(self):
inspector = Inspector.from_engine(testing.db)
view_def = inspector.get_view_definition("huge_named_view")
eq_(view_def, self.view_str)
|
rclmenezes/sqlalchemy
|
test/dialect/test_mssql.py
|
Python
|
mit
| 80,949
|
[
"ASE"
] |
d8e36c745082e51bcf4a5af65931764ee76eb8586532cd7ea94f4552ce6e535c
|
"""
It used to create several plots
"""
import time
import copy
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.MonitoringSystem.private.DBUtils import DBUtils
from DIRAC.Core.Utilities.Plotting import gDataCache
from DIRAC.Core.Utilities.Plotting.Plots import (
generateNoDataPlot,
generateTimedStackedBarPlot,
generateQualityPlot,
generateCumulativePlot,
generatePiePlot,
generateStackedLinePlot,
)
class BasePlotter(DBUtils):
_EA_THUMBNAIL = "thumbnail"
_EA_WIDTH = "width"
_EA_HEIGHT = "height"
_EA_THB_WIDTH = "thbWidth"
_EA_THB_HEIGHT = "thbHeight"
_EA_PADDING = "figurePadding"
_EA_TITLE = "plotTitle"
_RATE_UNITS = {
"time": (
("seconds / s", 1, 24),
("hours / s", 3600, 24),
("days / s", 86400, 15),
("weeks / s", 86400 * 7, 10),
("months / s", 86400 * 30, 12),
("years / s", 86400 * 365, 1),
),
"cpupower": (("HS06", 1, 750), ("kHS06", 1000, 750), ("MHS06", 10**6, 1)),
"bytes": (
("MB / s", 10**6, 1000),
("GB / s", 10**9, 1000),
("TB / s", 10**12, 1000),
("PB / s", 10**15, 1),
),
"jobs": (
("jobs / hour", 1 / 3600.0, 1000),
("kjobs / hour", (10**3) / 3600.0, 1000),
("Mjobs / hour", (10**6) / 3600.0, 1),
),
"files": (
("files / hour", 1 / 3600.0, 1000),
("kfiles / hour", (10**3) / 3600.0, 1000),
("Mfiles / hour", (10**6) / 3600.0, 1),
),
}
_UNITS = {
"time": (
("seconds", 1, 24),
("hours", 3600, 24),
("days", 86400, 15),
("weeks", 86400 * 7, 10),
("months", 86400 * 30, 12),
("years", 86400 * 365, 1),
),
"cpupower": (
("HS06 hours", 3600, 24),
("HS06 days", 86400, 750),
("kHS06 days", 86400 * 1000, 750),
("MHS06 days", 86400 * 10**6, 1),
),
"bytes": (("MB", 10**6, 1000), ("GB", 10**9, 1000), ("TB", 10**12, 1000), ("PB", 10**15, 1)),
"jobs": (("jobs", 1, 1000), ("kjobs", 10**3, 1000), ("Mjobs", 10**6, 1)),
"files": (("files", 1, 1000), ("kfiles", 10**3, 1000), ("Mfiles", 10**6, 1)),
}
# To be defined in the derived classes
_typeKeyFields = []
_typeName = ""
def __init__(self, db, setup, extraArgs=None):
super(BasePlotter, self).__init__(db, setup)
""" c'tor
:param self: self reference
"""
if isinstance(extraArgs, dict):
self._extraArgs = extraArgs
else:
self._extraArgs = {}
reportsRevMap = {}
for attr in dir(self):
if attr.startswith("_report"):
if attr.endswith("Name"):
reportId = attr[7:-4]
reportName = getattr(self, attr)
reportsRevMap[reportId] = reportName
else:
reportId = attr[7:]
if reportId not in reportsRevMap:
reportsRevMap[reportId] = reportId
self.__reportNameMapping = {}
for rId in reportsRevMap:
self.__reportNameMapping[reportsRevMap[rId]] = rId
def generate(self, reportRequest):
"""
It retrives the data from the database and create the plot
:param dict reportRequest: contains the plot attributes
"""
reportHash = reportRequest["hash"]
reportName = reportRequest["reportName"]
if reportName in self.__reportNameMapping:
reportRequest["reportName"] = self.__reportNameMapping[reportName]
gLogger.info("Retrieving data for %s:%s" % (reportRequest["typeName"], reportRequest["reportName"]))
sT = time.time()
retVal = self.__retrieveReportData(reportRequest, reportHash)
reportGenerationTime = time.time() - sT
if not retVal["OK"]:
return retVal
if not reportRequest["generatePlot"]:
return retVal
reportData = retVal["Value"]
gLogger.info("Plotting data for %s:%s" % (reportRequest["typeName"], reportRequest["reportName"]))
sT = time.time()
retVal = self.__generatePlotForReport(reportRequest, reportHash, reportData)
plotGenerationTime = time.time() - sT
gLogger.verbose(
"Time for %s:%s - Report %.2f Plot %.2f (%.2f%% r/p)"
% (
reportRequest["typeName"],
reportRequest["reportName"],
reportGenerationTime,
plotGenerationTime,
((reportGenerationTime * 100 / plotGenerationTime) if plotGenerationTime else 0.0),
)
)
if not retVal["OK"]:
return retVal
plotDict = retVal["Value"]
if "retrieveReportData" in reportRequest["extraArgs"] and reportRequest["extraArgs"]["retrieveReportData"]:
plotDict["reportData"] = reportData
return S_OK(plotDict)
def plotsList(self):
"""
It returns the list of available plots.
"""
return sorted(self.__reportNameMapping)
def __retrieveReportData(self, reportRequest, reportHash):
"""
It uses the appropriate Plotter to retrieve the data from the database.
:param dict reportRequest: the dictionary which contains the conditions used to create the plot
:param str reportHash: it is the unique identifier used to cache a plot
:return: dict S_OK/S_ERROR if the data found in the cache it returns from it otherwise it uses the cache.
"""
funcName = "_report%s" % reportRequest["reportName"]
if not hasattr(self, funcName):
return S_ERROR("Report %s is not defined" % reportRequest["reportName"])
else:
funcObj = getattr(self, funcName)
return gDataCache.getReportData(reportRequest=reportRequest, reportHash=reportHash, dataFunc=funcObj)
def __generatePlotForReport(self, reportRequest, reportHash, reportData):
"""It creates the plot
:param dict reportRequest: contains the plot attributes
:param str reportHash: unique string which identify the plot
:param dict repotData: contains the data used to generate the plot.
"""
funcName = "_plot%s" % reportRequest["reportName"]
try:
funcObj = getattr(self, funcName)
except Exception:
return S_ERROR("Plot function for report %s is not defined" % reportRequest["reportName"])
return gDataCache.getReportPlot(
reportRequest=reportRequest, reportHash=reportHash, reportData=reportData, plotFunc=funcObj
)
def _getTimedData(self, startTime, endTime, selectField, preCondDict, metadataDict=None):
"""
It retrieves the time series data from the ES database.
:param int startTime: epoch time
:param int endTime: epoch time
:param str selectField: the value that we want to plot
:param dict preCondDict: plot attributes
:param dict metadataDict: extra arguments used to create the plot.
"""
condDict = {}
if metadataDict is None:
metadataDict = {}
grouping = preCondDict["grouping"][0]
# Make safe selections
for keyword in self._typeKeyFields:
if keyword in preCondDict:
condDict[keyword] = preCondDict[keyword]
retVal = self._determineBucketSize(startTime, endTime)
if not retVal["OK"]:
return retVal
interval, granularity = retVal["Value"]
dynamicBucketing = metadataDict.get("DynamicBucketing", True)
# by default we use dynamic bucketing
if dynamicBucketing:
retVal = self._retrieveBucketedData(
self._typeName, startTime, endTime, interval, selectField, condDict, grouping, metadataDict
)
else:
retVal = self._retrieveAggregatedData(
self._typeName, startTime, endTime, interval, selectField, condDict, grouping, metadataDict
)
if not retVal["OK"]:
return retVal
dataDict = retVal["Value"]
return S_OK((dataDict, granularity))
def _getSummaryData(self, startTime, endTime, selectField, preCondDict, metadataDict=None):
"""
It returns the data used to create the pie chart plot.
:param int startTime: epoch time
:param int endTime: epoch time
:param str selectField: the value what we want to plot
:param dict preCondDict: plot attributes
:param dict metadataDict: extra arguments used to create the plot.
"""
grouping = preCondDict["grouping"][0]
condDict = {}
# Make safe selections
for keyword in self._typeKeyFields:
if keyword in preCondDict:
condDict[keyword] = preCondDict[keyword]
retVal = self._determineBucketSize(startTime, endTime)
if not retVal["OK"]:
return retVal
interval, _ = retVal["Value"]
retVal = self._retrieveBucketedData(
typeName=self._typeName,
startTime=startTime,
endTime=endTime,
interval=interval,
selectField=selectField,
condDict=condDict,
grouping=grouping,
metadataDict=metadataDict,
)
if not retVal["OK"]:
return retVal
dataDict = retVal["Value"]
return S_OK(dataDict)
def _findSuitableRateUnit(self, dataDict, maxValue, unit):
"""
Returns the suitable unit for a given dataset.
"""
return self._findUnitMagic(dataDict, maxValue, unit, self._RATE_UNITS)
def _findSuitableUnit(self, dataDict, maxValue, unit):
"""
Returns the suitable unit for a given dataset.
"""
return self._findUnitMagic(dataDict, maxValue, unit, self._UNITS)
def _findUnitMagic(self, reportDataDict, maxValue, unit, selectedUnits):
"""
Returns the suitable unit for a given dataset.
"""
if unit not in selectedUnits:
raise AttributeError("%s is not a known rate unit" % unit)
baseUnitData = selectedUnits[unit][0]
if self._extraArgs.get("staticUnits"):
unitData = selectedUnits[unit][0]
else:
unitList = selectedUnits[unit]
unitIndex = -1
for _, unitDivFactor, unitThreshold in unitList:
unitIndex += 1
if maxValue / unitDivFactor < unitThreshold:
break
unitData = selectedUnits[unit][unitIndex]
# Apply divFactor to all units
graphDataDict, maxValue = self._divideByFactor(copy.deepcopy(reportDataDict), unitData[1])
if unitData == baseUnitData:
reportDataDict = graphDataDict
else:
reportDataDict, dummyMaxValue = self._divideByFactor(reportDataDict, baseUnitData[1])
return reportDataDict, graphDataDict, maxValue, unitData[0]
def __checkPlotMetadata(self, metadata):
"""It check the plot metadata arguments
:param dict metadata: contains the plot metadata
"""
if self._extraArgs.get(self._EA_WIDTH):
try:
metadata[self._EA_WIDTH] = min(1600, max(200, int(self._extraArgs[self._EA_WIDTH])))
except Exception:
pass
if self._EA_HEIGHT in self._extraArgs and self._extraArgs[self._EA_HEIGHT]:
try:
metadata[self._EA_HEIGHT] = min(1600, max(200, int(self._extraArgs[self._EA_HEIGHT])))
except Exception:
pass
if self._extraArgs.get(self._EA_TITLE):
metadata["title"] = self._extraArgs[self._EA_TITLE]
def __checkThumbnailMetadata(self, metadata):
"""checks the plot thumbnail data
:param dict metadata: contains the thumbnail data
"""
if self._EA_THUMBNAIL in self._extraArgs and self._extraArgs[self._EA_THUMBNAIL]:
thbMD = dict(metadata)
thbMD["legend"] = False
if self._EA_THB_HEIGHT in self._extraArgs:
thbMD[self._EA_HEIGHT] = self._extraArgs[self._EA_THB_HEIGHT]
else:
thbMD[self._EA_HEIGHT] = 125
if self._EA_THB_WIDTH in self._extraArgs:
thbMD[self._EA_WIDTH] = self._extraArgs[self._EA_THB_WIDTH]
else:
thbMD[self._EA_WIDTH] = 200
thbMD[self._EA_PADDING] = 20
for key in ("title", "ylabel", "xlabel"):
if key in thbMD:
del thbMD[key]
return thbMD
return False
def __plotData(self, filename, dataDict, metadata, funcToPlot):
"""It create the plot.
:param str filename: the name of the file which contains the plot
:param dict dataDict: data used to crate the plot
:param dict metadata: plot metadata
:param object funcToPlot: the method which create the plot using the appropriate method.
"""
self.__checkPlotMetadata(metadata)
if not dataDict:
funcToPlot = generateNoDataPlot
plotFileName = "%s.png" % filename
finalResult = funcToPlot(fileName=plotFileName, data=dataDict, metadata=metadata)
if not finalResult["OK"]:
return finalResult
thbMD = self.__checkThumbnailMetadata(metadata)
if not thbMD:
return S_OK({"plot": True, "thumbnail": False})
thbFilename = "%s.thb.png" % filename
retVal = funcToPlot(thbFilename, dataDict, thbMD)
if not retVal["OK"]:
return retVal
return S_OK({"plot": True, "thumbnail": True})
def _generateTimedStackedBarPlot(self, filename, dataDict, metadata):
"""
it creates a bar plot
"""
return self.__plotData(filename, dataDict, metadata, generateTimedStackedBarPlot)
def _generateQualityPlot(self, filename, dataDict, metadata):
"""
it create a quality plot
"""
return self.__plotData(filename, dataDict, metadata, generateQualityPlot)
def _generateCumulativePlot(self, filename, dataDict, metadata):
"""
It creates a cumulative plot
"""
return self.__plotData(filename, dataDict, metadata, generateCumulativePlot)
def _generatePiePlot(self, filename, dataDict, metadata):
"""
It creates a pie chart plot
"""
return self.__plotData(filename, dataDict, metadata, generatePiePlot)
def _generateStackedLinePlot(self, filename, dataDict, metadata):
"""
It create a stacked lien plot
"""
return self.__plotData(filename, dataDict, metadata, generateStackedLinePlot)
def _fillWithZero(self, granularity, startEpoch, endEpoch, dataDict):
"""
Fill with zeros missing buckets
- dataDict = { 'key' : { time1 : value, time2 : value... }, 'key2'.. }
"""
startBucketEpoch = startEpoch - startEpoch % granularity
for key in dataDict:
currentDict = dataDict[key]
for timeEpoch in range(int(startBucketEpoch), int(endEpoch), granularity):
if timeEpoch not in currentDict:
currentDict[timeEpoch] = 0
return dataDict
|
DIRACGrid/DIRAC
|
src/DIRAC/MonitoringSystem/private/Plotters/BasePlotter.py
|
Python
|
gpl-3.0
| 15,548
|
[
"DIRAC"
] |
657716eaa0c5bf54e20208c40c0adfc32124525a2a55f1c7a5f6fa68979397eb
|
import logging
import wx
from service.fit import Fit
from gui.bitmap_loader import BitmapLoader
import gui.globalEvents as GE
from gui.preferenceView import PreferenceView
from service.settings import EOSSettings
import gui.mainFrame
from wx.lib.intctrl import IntCtrl
logger = logging.getLogger(__name__)
class PFFittingEnginePref(PreferenceView):
title = "Fitting Engine"
def __init__(self):
self.dirtySettings = False
def refreshPanel(self, fit):
pass
# noinspection PyAttributeOutsideInit
def populatePanel(self, panel):
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
mainSizer = wx.BoxSizer(wx.VERTICAL)
helpCursor = wx.Cursor(wx.CURSOR_QUESTION_ARROW)
self.engine_settings = EOSSettings.getInstance()
self.stTitle = wx.StaticText(panel, wx.ID_ANY, self.title, wx.DefaultPosition, wx.DefaultSize, 0)
self.stTitle.Wrap(-1)
self.stTitle.SetFont(wx.Font(12, 70, 90, 90, False, wx.EmptyString))
mainSizer.Add(self.stTitle, 0, wx.ALL, 5)
self.m_staticline1 = wx.StaticLine(panel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
mainSizer.Add(self.m_staticline1, 0, wx.EXPAND | wx.TOP | wx.BOTTOM, 5)
self.cbGlobalForceReload = wx.CheckBox(panel, wx.ID_ANY, "Factor in reload time when calculating capacitor usage, damage, and tank.",
wx.DefaultPosition, wx.DefaultSize, 0)
mainSizer.Add(self.cbGlobalForceReload, 0, wx.ALL | wx.EXPAND, 5)
self.cbStrictSkillLevels = wx.CheckBox(panel, wx.ID_ANY,
"Enforce strict skill level requirements",
wx.DefaultPosition, wx.DefaultSize, 0)
self.cbStrictSkillLevels.SetCursor(helpCursor)
self.cbStrictSkillLevels.SetToolTip(wx.ToolTip(
'When enabled, skills will check their dependencies\' requirements when their levels change and reset ' +
'skills that no longer meet the requirement.\neg: Setting Drones from level V to IV will reset the Heavy ' +
'Drone Operation skill, as that requires Drones V'))
mainSizer.Add(self.cbStrictSkillLevels, 0, wx.ALL | wx.EXPAND, 5)
self.cbUniversalAdaptiveArmorHardener = wx.CheckBox(panel, wx.ID_ANY,
"When damage profile is Uniform, set Reactive Armor " +
"Hardener to match (old behavior).",
wx.DefaultPosition, wx.DefaultSize, 0)
mainSizer.Add(self.cbUniversalAdaptiveArmorHardener, 0, wx.ALL | wx.EXPAND, 5)
spoolup_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.spool_up_label = wx.StaticText(panel, wx.ID_ANY, "Global Default Spoolup Percentage:", wx.DefaultPosition, wx.DefaultSize, 0)
self.spool_up_label.Wrap(-1)
self.spool_up_label.SetCursor(helpCursor)
self.spool_up_label.SetToolTip(
wx.ToolTip('The amount of spoolup to use by default on module which support it. Can be changed on a per-module basis'))
spoolup_sizer.Add(self.spool_up_label, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.spoolup_value = IntCtrl(panel, min=0, max=100, limited=True)
spoolup_sizer.Add(self.spoolup_value , 0, wx.ALL, 5)
mainSizer.Add(spoolup_sizer, 0, wx.ALL | wx.EXPAND, 0)
# Future code once new cap sim is implemented
'''
self.cbGlobalForceReactivationTimer = wx.CheckBox( panel, wx.ID_ANY, u"Factor in reactivation timer", wx.DefaultPosition, wx.DefaultSize, 0 )
mainSizer.Add( self.cbGlobalForceReactivationTimer, 0, wx.ALL|wx.EXPAND, 5 )
text = u" Ignores reactivation timer when calculating capacitor usage,\n damage, and tank."
self.cbGlobalForceReactivationTimerText = wx.StaticText( panel, wx.ID_ANY, text, wx.DefaultPosition, wx.DefaultSize, 0 )
self.cbGlobalForceReactivationTimerText.Wrap( -1 )
self.cbGlobalForceReactivationTimerText.SetFont( wx.Font( 10, 70, 90, 90, False, wx.EmptyString ) )
mainSizer.Add( self.cbGlobalForceReactivationTimerText, 0, wx.ALL, 5 )
'''
# Future code for mining laser crystal
'''
self.cbGlobalMiningSpecialtyCrystal = wx.CheckBox( panel, wx.ID_ANY, u"Factor in reactivation timer", wx.DefaultPosition, wx.DefaultSize, 0 )
mainSizer.Add( self.cbGlobalMiningSpecialtyCrystal, 0, wx.ALL|wx.EXPAND, 5 )
text = u" If enabled, displays the Specialty Crystal mining amount.\n This is the amount mined when using crystals and mining the matching asteroid."
self.cbGlobalMiningSpecialtyCrystalText = wx.StaticText( panel, wx.ID_ANY, text, wx.DefaultPosition, wx.DefaultSize, 0 )
self.cbGlobalMiningSpecialtyCrystalText.Wrap( -1 )
self.cbGlobalMiningSpecialtyCrystalText.SetFont( wx.Font( 10, 70, 90, 90, False, wx.EmptyString ) )
mainSizer.Add( self.cbGlobalMiningSpecialtyCrystalText, 0, wx.ALL, 5 )
'''
self.sFit = Fit.getInstance()
self.cbGlobalForceReload.SetValue(self.sFit.serviceFittingOptions["useGlobalForceReload"])
self.cbGlobalForceReload.Bind(wx.EVT_CHECKBOX, self.OnCBGlobalForceReloadStateChange)
self.cbStrictSkillLevels.SetValue(self.engine_settings.get("strictSkillLevels"))
self.cbStrictSkillLevels.Bind(wx.EVT_CHECKBOX, self.OnCBStrictSkillLevelsChange)
self.cbUniversalAdaptiveArmorHardener.SetValue(self.engine_settings.get("useStaticAdaptiveArmorHardener"))
self.cbUniversalAdaptiveArmorHardener.Bind(wx.EVT_CHECKBOX, self.OnCBUniversalAdaptiveArmorHardenerChange)
self.spoolup_value.SetValue(int(self.engine_settings.get("globalDefaultSpoolupPercentage") * 100))
self.spoolup_value.Bind(wx.lib.intctrl.EVT_INT, self.OnSpoolupChange)
panel.SetSizer(mainSizer)
panel.Layout()
def OnSpoolupChange(self, event):
self.engine_settings.set("globalDefaultSpoolupPercentage", self.spoolup_value.GetValue() / 100)
def OnCBGlobalForceReloadStateChange(self, event):
self.sFit.serviceFittingOptions["useGlobalForceReload"] = self.cbGlobalForceReload.GetValue()
fitID = self.mainFrame.getActiveFit()
self.sFit.refreshFit(fitID)
wx.PostEvent(self.mainFrame, GE.FitChanged(fitID=fitID))
def OnCBStrictSkillLevelsChange(self, event):
self.engine_settings.set("strictSkillLevels", self.cbStrictSkillLevels.GetValue())
def OnCBUniversalAdaptiveArmorHardenerChange(self, event):
self.engine_settings.set("useStaticAdaptiveArmorHardener", self.cbUniversalAdaptiveArmorHardener.GetValue())
def getImage(self):
return BitmapLoader.getBitmap("settings_fitting", "gui")
def OnWindowLeave(self, event):
# We don't want to do anything when they leave,
# but in the future we might.
pass
PFFittingEnginePref.register()
|
blitzmann/Pyfa
|
gui/builtinPreferenceViews/pyfaEnginePreferences.py
|
Python
|
gpl-3.0
| 7,074
|
[
"CRYSTAL"
] |
08a14e9cfa707694d17ce82e589742e3ffbaf739bd9ff9ace6acfc43b69a62a0
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import copy
import numpy as np
from scipy.stats import linregress
from matplotlib import cm
import itertools
import warnings
from pymatgen.core.structure import Structure, Composition
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.core.surface import Slab
from pymatgen.analysis.wulff import WulffShape
from pymatgen import MPRester
from pymatgen.analysis.phase_diagram import PhaseDiagram
from pymatgen import Element
from pymatgen.util.plotting import pretty_plot
__author__ = "Richard Tran"
__copyright__ = "Copyright 2014, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Richard Tran"
__email__ = "rit001@eng.ucsd.edu"
__date__ = "8/24/17"
class SurfaceEnergyAnalyzer(object):
"""
A class used for analyzing the surface energies of a material of a given
material_id. By default, this will use entries calculated from the
Materials Project to obtain chemical potential and bulk energy. As a
result, the difference in VASP parameters between the user's entry
(vasprun_dict) and the parameters used by Materials Project, may lead
to a rough estimate of the surface energy. For best results, it is
recommend that the user calculates all decomposition components first,
and insert the results into their own database as a pymatgen-db entry
and use those entries instead (custom_entries). In addition, this code
will only use one bulk entry to calculate surface energy. Ideally, to
get the most accurate surface energy, the user should compare their
slab energy to the energy of the oriented unit cell with both calculations
containing consistent k-points to avoid converegence problems as the
slab size is varied. See:
Sun, W.; Ceder, G. Efficient creation and convergence of surface slabs,
Surface Science, 2013, 617, 53–59, doi:10.1016/j.susc.2013.05.016.
and
Rogal, J., & Reuter, K. (2007). Ab Initio Atomistic Thermodynamics for
Surfaces : A Primer. Experiment, Modeling and Simulation of Gas-Surface
Interactions for Reactive Flows in Hypersonic Flights, 2–1 – 2–18.
.. attribute:: ref_element
All chemical potentials cna be written in terms of the range of chemical
potential of this element which will be used to calculate surface energy.
.. attribute:: mprester
Materials project rester for querying entries from the materials project.
Requires user MAPIKEY.
.. attribute:: ucell_entry
Materials Project entry of the material of the slab.
.. attribute:: x
Reduced amount composition of decomposed compound A in the bulk.
.. attribute:: y
Reduced amount composition of ref_element in the bulk.
.. attribute:: gbulk
Gibbs free energy of the bulk per formula unit
.. attribute:: chempot_range
List of the min and max chemical potential of ref_element.
.. attribute:: e_of_element
Energy per atom of ground state ref_element, eg. if ref_element=O,
than e_of_element=1/2*E_O2.
.. attribute:: vasprun_dict
Dictionary containing a list of Vaspruns for slab calculations as
items and the corresponding Miller index of the slab as the key
"""
def __init__(self, material_id, vasprun_dict, ref_element,
exclude_ids=[], custom_entries=[], mapi_key=None):
"""
Analyzes surface energies and Wulff shape of a particular
material using the chemical potential.
Args:
material_id (str): Materials Project material_id (a string,
e.g., mp-1234).
vasprun_dict (dict): Dictionary containing a list of Vaspruns
for slab calculations as items and the corresponding Miller
index of the slab as the key.
eg. vasprun_dict = {(1,1,1): [vasprun_111_1, vasprun_111_2,
vasprun_111_3], (1,1,0): [vasprun_111_1, vasprun_111_2], ...}
element: element to be considered as independent
variables. E.g., if you want to show the stability
ranges of all Li-Co-O phases wrt to uLi
exclude_ids (list of material_ids): List of material_ids
to exclude when obtaining the decomposition components
to calculate the chemical potential
custom_entries (list of pymatgen-db type entries): List of
user specified pymatgen-db type entries to use in finding
decomposition components for the chemical potential
mapi_key (str): Materials Project API key for accessing the
MP database via MPRester
"""
self.ref_element = ref_element
self.mprester = MPRester(mapi_key) if mapi_key else MPRester()
self.ucell_entry = \
self.mprester.get_entry_by_material_id(material_id,
inc_structure=True,
property_data=
["formation_energy_per_atom"])
ucell = self.ucell_entry.structure
# Get x and y, the number of species in a formula unit of the bulk
reduced_comp = ucell.composition.reduced_composition.as_dict()
if len(reduced_comp.keys()) == 1:
x = y = reduced_comp[ucell[0].species_string]
else:
for el in reduced_comp.keys():
if self.ref_element == el:
y = reduced_comp[el]
else:
x = reduced_comp[el]
# Calculate Gibbs free energy of the bulk per unit formula
gbulk = self.ucell_entry.energy /\
(len([site for site in ucell
if site.species_string == self.ref_element]) / y)
entries = [entry for entry in
self.mprester.get_entries_in_chemsys(list(reduced_comp.keys()),
property_data=["e_above_hull",
"material_id"])
if entry.data["e_above_hull"] == 0 and
entry.data["material_id"] not in exclude_ids] \
if not custom_entries else custom_entries
pd = PhaseDiagram(entries)
chempot_ranges = pd.get_chempot_range_map([Element(self.ref_element)])
# If no chemical potential is found, we return u=0, eg.
# for a elemental system, the relative u of Cu for Cu is 0
chempot_range = [chempot_ranges[entry] for entry in chempot_ranges.keys()
if entry.composition ==
self.ucell_entry.composition][0][0]._coords if \
chempot_ranges else [[0,0], [0,0]]
e_of_element = [entry.energy_per_atom for entry in
entries if str(entry.composition.reduced_composition)
== self.ref_element + "1"][0]
self.x = x
self.y = y
self.gbulk = gbulk
chempot_range = list(chempot_range)
self.chempot_range = sorted([chempot_range[0][0], chempot_range[1][0]])
self.e_of_element = e_of_element
self.vasprun_dict = vasprun_dict
def calculate_gamma(self, vasprun):
"""
Calculates the surface energy for a single slab.
Args:
vasprun (Vasprun): A Vasprun object
Returns (list): The surface energy for the minimum/maximun
chemical potential and the second list gives the range
of the chemical potential
"""
reduced_comp = self.ucell_entry.composition.reduced_composition.as_dict()
# Get the composition in the slab
slab = vasprun.final_structure
comp = slab.composition.as_dict()
if len(reduced_comp.keys()) == 1:
Ny = comp[self.ucell_entry.structure[0].species_string]
Nx = Ny
else:
for el in reduced_comp.keys():
if self.ref_element == el:
Ny = comp[el]
else:
Nx = comp[el]
# Calculate surface area
m = slab.lattice.matrix
A = np.linalg.norm(np.cross(m[0], m[1]))
# calculate the surface energy for the max and min chemical potential
return [(1 / (2 * A)) * (vasprun.final_energy - (Nx / self.x)
* self.gbulk - (Ny - (self.y / self.x) * Nx)
* (delu + self.e_of_element))
for delu in self.chempot_range]
def wulff_shape_from_chempot(self, chempot, symprec=1e-5):
"""
Method to get the Wulff shape at a specific chemical potential.
Args:
chempot (float): The chemical potential the Wulff Shape exist in.
"""
# Check if the user provided chemical potential is within the
# predetermine range of chemical potential. If not, raise a warning
if not max(self.chempot_range) >= chempot >= min(self.chempot_range):
warnings.warn("The provided chemical potential is outside the range "
"of chemical potential (%s to %s). The resulting Wulff "
"shape might not be reasonable." %(min(self.chempot_range),
max(self.chempot_range)))
latt = SpacegroupAnalyzer(self.ucell_entry.structure).\
get_conventional_standard_structure().lattice
miller_list = self.vasprun_dict.keys()
e_surf_list = []
for hkl in miller_list:
# At each possible configuration, we calculate surface energy as a
# function of u and take the lowest surface energy (corresponds to
# the most stable slab termination at that particular u)
surf_e_range_list = [self.calculate_gamma(vasprun)
for vasprun in self.vasprun_dict[hkl]]
e_list = []
for e_range in surf_e_range_list:
slope, intercept = self.get_slope_and_intercept(e_range)
e_list.append(slope * chempot + intercept)
e_surf_list.append(min(e_list))
return WulffShape(latt, miller_list, e_surf_list, symprec=symprec)
def wulff_shape_dict(self, symprec=1e-5, at_intersections=False):
"""
As the surface energy is a function of chemical potential, so too is the
Wulff shape. This methods generates a dictionary of Wulff shapes at
certain chemical potentials where a facet goes through a transition.
Returns a dict, eg. {chempot1: WulffShape1, chempot2: WulffShape2}
Args:
symprec (float): for recp_operation, default is 1e-5.
at_intersections (bool): Whether to generate a Wulff shape for each
intersection of surface energy for a specific facet (eg. at the
point where a (111) stoichiometric surface energy plot intersects
with the (111) nonstoichiometric plot) or to just generate two
Wulff shapes, one at the min and max chemical potential.
"""
# First lets get the Wulff shape at the
# minimum and maximum chemical potential
wulff_dict = {self.chempot_range[0]: \
self.wulff_shape_from_chempot(self.chempot_range[0],
symprec=symprec),
self.chempot_range[1]: \
self.wulff_shape_from_chempot(self.chempot_range[1],
symprec=symprec)}
# Now we get the Wulff shape each time a facet changes its configuration
# (ie, adsorption coverage, stoichiometric to nonstoichiometric, etc)
if at_intersections:
# Get all values of chemical potential where an intersection occurs
u_at_intersection = [self.get_intersections(hkl)[0] for hkl in
self.vasprun_dict.keys()
if self.get_intersections(hkl)]
# Get a Wulff shape for each intersection. The change in the Wulff shape
# will vary if the rate of change in surface energy for any facet changes
for u in u_at_intersection:
wulff_dict[u] = self.wulff_shape_from_chempot(u, symprec=symprec)
return wulff_dict
def get_slope_and_intercept(self, surf_e_pair):
"""
Returns the slope and intercept of the surface
energy vs chemical potential line
Args:
surf_e_pair ([e_at_min_u, e_at_max_u]): The surface energy at the
minimum chemical potential and maximum chemical potential
"""
slope, intercept, r_value, p_value, std_err = \
linregress(self.chempot_range, surf_e_pair)
slope = 0 if str(slope) == 'nan' else slope
intercept = surf_e_pair[0] if str(intercept) == 'nan' else intercept
return slope, intercept
def get_intersections(self, miller_index):
"""
Returns a all intersections for a specific facet. Useful for
finding when the configuration of a particular facet changes.
Args:
miller_index ((h, k, l)): Miller index of the facet we
are interested in
"""
# First lets calculate the range of surface
# energies for all terminations of a specific facet
all_se_ranges = [self.calculate_gamma(vasprun) for vasprun
in self.vasprun_dict[miller_index]]
if len(all_se_ranges) == 1:
return []
# Now get all possible intersection coordinates for each pair of lines
intersections = []
for pair_ranges in itertools.combinations(all_se_ranges, 2):
slope1, intercept1 = self.get_slope_and_intercept(pair_ranges[0])
slope2, intercept2 = self.get_slope_and_intercept(pair_ranges[1])
# Calculate the intersection coordinates
u = (intercept1-intercept2)/(slope2-slope1)
# if the intersection is beyond the chemical potential
# range or if the lines are parallel, we ignore it
if slope1-slope2 == 0 or u < min(self.chempot_range) \
or u > max(self.chempot_range):
continue
intersections.append([u, slope1 * u + intercept1])
return sorted(intersections, key=lambda ints: ints[0])
def area_frac_vs_chempot_plot(self, cmap=cm.jet, at_intersections=False,
increments=10):
"""
Plots the change in the area contribution of
each facet as a function of chemical potential.
Args:
cmap (cm): A matplotlib colormap object, defaults to jet.
at_intersections (bool): Whether to generate a Wulff shape for each
intersection of surface energy for a specific facet (eg. at the
point where a (111) stoichiometric surface energy plot intersects
with the (111) nonstoichiometric plot) or to just generate two
Wulff shapes, one at the min and max chemical potential.
increments (bool): Number of data points between min/max or point
of intersection. Defaults to 5 points.
"""
# Choose unique colors for each facet
f = [int(i) for i in np.linspace(0, 255, len(self.vasprun_dict.keys()))]
# Get all points of min/max chempot and intersections
chempot_intersections = []
chempot_intersections.extend(self.chempot_range)
for hkl in self.vasprun_dict.keys():
chempot_intersections.extend([ints[0] for ints in
self.get_intersections(hkl)])
chempot_intersections = sorted(chempot_intersections)
# Get all chempots
if at_intersections:
all_chempots = []
for i, intersection in enumerate(chempot_intersections):
if i < len(chempot_intersections)-1:
all_chempots.extend(np.linspace(intersection,
chempot_intersections[i+1],
increments))
else:
all_chempots = np.linspace(min(self.chempot_range),
max(self.chempot_range), increments)
# initialize a dictionary of lists of fractional areas for each hkl
hkl_area_dict = {}
for hkl in self.vasprun_dict.keys():
hkl_area_dict[hkl] = []
# Get plot points for each Miller index
for u in all_chempots:
wulffshape = self.wulff_shape_from_chempot(u)
for hkl in wulffshape.area_fraction_dict.keys():
hkl_area_dict[hkl].append(wulffshape.area_fraction_dict[hkl])
# Plot the area fraction vs chemical potential for each facet
plt = pretty_plot()
for i, hkl in enumerate(self.vasprun_dict.keys()):
# Ignore any facets that never show up on the
# Wulff shape regardless of chemical potential
if all([a == 0 for a in hkl_area_dict[hkl]]):
continue
else:
plt.plot(all_chempots, hkl_area_dict[hkl],
'--', color=cmap(f[i]), label=str(hkl))
# Make the figure look nice
plt.ylim([0,1])
plt.xlim(self.chempot_range)
plt.ylabel(r"Fractional area $A^{Wulff}_{hkl}/A^{Wulff}$")
plt.xlabel(r"Chemical potential $\Delta\mu_{%s}$ (eV)" %(self.ref_element))
plt.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)
return plt
def chempot_vs_gamma_plot(self, cmap=cm.jet, show_unstable_points=False):
"""
Plots the surface energy of all facets as a function of chemical potential.
Each facet will be associated with its own distinct colors. Dashed lines
will represent stoichiometries different from that of the mpid's compound.
Args:
cmap (cm): A matplotlib colormap object, defaults to jet.
show_unstable_points (bool): For each facet, there may be various
terminations or stoichiometries and the relative stability of
these different slabs may change with chemical potential. This
option will only plot the most stable surface energy for a
given chemical potential.
"""
plt = pretty_plot()
# Choose unique colors for each facet
f = [int(i) for i in np.linspace(0, 255, sum([len(vaspruns) for vaspruns in
self.vasprun_dict.values()]))]
i, already_labelled, colors = 0, [], []
for hkl in self.vasprun_dict.keys():
for vasprun in self.vasprun_dict[hkl]:
slab = vasprun.final_structure
# Generate a label for the type of slab
label = str(hkl)
# use dashed lines for slabs that are not stoichiometric
# wrt bulk. Label with formula if nonstoichiometric
if slab.composition.reduced_composition != \
self.ucell_entry.composition.reduced_composition:
mark = '--'
label += " %s" % (slab.composition.reduced_composition)
else:
mark = '-'
# label the chemical environment at the surface if different from the bulk.
# First get the surface sites, then get the reduced composition at the surface
# s = vasprun.final_structure
# ucell = SpacegroupAnalyzer(self.ucell_entry.structure).\
# get_conventional_standard_structure()
# slab = Slab(s.lattice, s.species, s.frac_coords, hkl, ucell, 0, None)
# surf_comp = slab.surface_composition()
#
# if surf_comp.reduced_composition != ucell.composition.reduced_composition:
# label += " %s" %(surf_comp.reduced_composition)
if label in already_labelled:
c = colors[already_labelled.index(label)]
label = None
else:
already_labelled.append(label)
c = cmap(f[i])
colors.append(c)
se_range = self.calculate_gamma(vasprun)
plt.plot(self.chempot_range, se_range, mark, color=c, label=label)
i += 1
# Make the figure look nice
axes = plt.gca()
ylim = axes.get_ylim()
plt.ylim(ylim)
plt.xlim(self.chempot_range)
plt.ylabel(r"Surface energy (eV/$\AA$)")
plt.xlabel(r"Chemical potential $\Delta\mu_{%s}$ (eV)" %(self.ref_element))
plt.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)
return plt
def broken_bond_vs_gamma(self):
return
|
setten/pymatgen
|
pymatgen/analysis/surface_analysis.py
|
Python
|
mit
| 21,488
|
[
"VASP",
"pymatgen"
] |
58100e48c361e5c223591b3f1d9a10c8cf71fe07c03e86cbb44c7b856698e883
|
#!/usr/bin/env python
# -*- Mode: Python; py-indent-offset: 4 -*-
# vim: tabstop=4 shiftwidth=4 expandtab
#
# Copyright (C) 2010 Red Hat, Inc., John (J5) Palmieri <johnp@redhat.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# USA
title = "Combo boxes"
description = """
The ComboBox widget allows to select one option out of a list.
The ComboBoxEntry additionally allows the user to enter a value
that is not in the list of options.
How the options are displayed is controlled by cell renderers.
"""
# See FIXME's
is_fully_bound = False
from gi.repository import Gtk, Gdk, GdkPixbuf, GLib, GObject
(PIXBUF_COL,
TEXT_COL) = range(2)
class MaskEntry(Gtk.Entry):
__gtype_name__ = 'MaskEntry'
def __init__(self, mask=None):
self.mask = mask
super(MaskEntry, self).__init__()
self.connect('changed', self.changed_cb)
self.error_color = Gdk.RGBA()
self.error_color.red = 1.0
self.error_color.green = 0.9
self.error_color_blue = 0.9
self.error_color.alpha = 1.0
# workaround since override_color doesn't accept None yet
style_ctx = self.get_style_context()
self.normal_color = style_ctx.get_color(0)
def set_background(self):
if self.mask:
if not GLib.regex_match_simple(self.mask,
self.get_text(), 0, 0):
self.override_color(0, self.error_color)
return
self.override_color(0, self.normal_color)
def changed_cb(self, entry):
self.set_background()
class ComboboxApp:
def __init__(self, demoapp):
self.demoapp = demoapp
self.window = Gtk.Window()
self.window.set_title('Combo boxes')
self.window.set_border_width(10)
self.window.connect('destroy', lambda w: Gtk.main_quit())
vbox = Gtk.VBox(homogeneous=False, spacing=2)
self.window.add(vbox)
frame = Gtk.Frame(label='Some stock icons')
vbox.pack_start(frame, False, False, 0)
box = Gtk.VBox(homogeneous=False, spacing=0)
box.set_border_width(5)
frame.add(box)
model = self.create_stock_icon_store()
combo = Gtk.ComboBox(model=model)
box.add(combo)
renderer = Gtk.CellRendererPixbuf()
combo.pack_start(renderer, False)
# FIXME: override set_attributes
combo.add_attribute(renderer, 'pixbuf', PIXBUF_COL)
combo.set_cell_data_func(renderer, self.set_sensitive, None)
renderer = Gtk.CellRendererText()
combo.pack_start(renderer, True)
combo.add_attribute(renderer, 'text', TEXT_COL)
combo.set_cell_data_func(renderer, self.set_sensitive, None)
combo.set_row_separator_func(self.is_separator, None)
combo.set_active(0)
# a combobox demonstrating trees
frame = Gtk.Frame(label='Where are we ?')
vbox.pack_start(frame, False, False, 0)
box = Gtk.VBox(homogeneous=False, spacing=0)
box.set_border_width(5)
frame.add(box)
model = self.create_capital_store()
combo = Gtk.ComboBox(model=model)
box.add(combo)
renderer = Gtk.CellRendererText()
combo.pack_start(renderer, True)
combo.add_attribute(renderer, 'text', 0)
combo.set_cell_data_func(renderer, self.is_capital_sensistive, None)
# FIXME: make new_from_indices work
# make constructor take list or string of indices
path = Gtk.TreePath.new_from_string('0:8')
treeiter = model.get_iter(path)
combo.set_active_iter(treeiter)
# A GtkComboBoxEntry with validation.
frame = Gtk.Frame(label='Editable')
vbox.pack_start(frame, False, False, 0)
box = Gtk.VBox(homogeneous=False, spacing=0)
box.set_border_width(5)
frame.add(box)
combo = Gtk.ComboBoxText.new_with_entry()
self.fill_combo_entry(combo)
box.add(combo)
entry = MaskEntry(mask='^([0-9]*|One|Two|2\302\275|Three)$')
Gtk.Container.remove(combo, combo.get_child())
combo.add(entry)
# A combobox with string IDs
frame = Gtk.Frame(label='String IDs')
vbox.pack_start(frame, False, False, 0)
box = Gtk.VBox(homogeneous=False, spacing=0)
box.set_border_width(5)
frame.add(box)
# FIXME: model is not setup when constructing Gtk.ComboBoxText()
# so we call new() - Gtk should fix this to setup the model
# in __init__, not in the constructor
combo = Gtk.ComboBoxText.new()
combo.append('never', 'Not visible')
combo.append('when-active', 'Visible when active')
combo.append('always', 'Always visible')
box.add(combo)
entry = Gtk.Entry()
# FIXME: a bug in PyGObject does not allow us to access dynamic
# methods on GObject.Object, so bind properties the hard way
# GObject.Object.bind_property(combo, 'active-id',
# entry, 'text',
# GObject.BindingFlags.BIDIRECTIONAL)
self.combo_notify_id = \
combo.connect('notify::active-id',
self.combo_active_id_changed, entry)
self.entry_notify_id = \
entry.connect('notify::text',
self.entry_text_changed, combo)
box.add(entry)
self.window.show_all()
def combo_active_id_changed(self, combo, pspec, entry):
entry.disconnect(self.entry_notify_id)
entry.set_text(combo.get_property('active-id'))
self.entry_notify_id = \
entry.connect('notify::text',
self.entry_text_changed, combo)
def entry_text_changed(self, entry, pspec, combo):
combo.disconnect(self.combo_notify_id)
combo.set_property('active-id', entry.get_text())
self.combo_notify_id = \
combo.connect('notify::active-id',
self.combo_active_id_changed, entry)
def strip_underscore(self, s):
return s.replace('_', '')
def create_stock_icon_store(self):
stock_id = (Gtk.STOCK_DIALOG_WARNING,
Gtk.STOCK_STOP,
Gtk.STOCK_NEW,
Gtk.STOCK_CLEAR,
None,
Gtk.STOCK_OPEN)
cellview = Gtk.CellView()
store = Gtk.ListStore(GdkPixbuf.Pixbuf, str)
for id in stock_id:
if id is not None:
pixbuf = cellview.render_icon(id, Gtk.IconSize.BUTTON, None)
item = Gtk.stock_lookup(id)
label = self.strip_underscore(item.label)
store.append((pixbuf, label))
else:
store.append((None, 'separator'))
return store
def set_sensitive(self, cell_layout, cell, tree_model, treeiter, data):
"""
A GtkCellLayoutDataFunc that demonstrates how one can control
sensitivity of rows. This particular function does nothing
useful and just makes the second row insensitive.
"""
path = tree_model.get_path(treeiter)
indices = path.get_indices()
sensitive = not(indices[0] == 1)
cell.set_property('sensitive', sensitive)
def is_separator(self, model, treeiter, data):
"""
A GtkTreeViewRowSeparatorFunc that demonstrates how rows can be
rendered as separators. This particular function does nothing
useful and just turns the fourth row into a separator.
"""
path = model.get_path(treeiter)
indices = path.get_indices()
result = (indices[0] == 4)
return result
def create_capital_store(self):
capitals = (
{'group': 'A - B', 'capital': None},
{'group': None, 'capital': 'Albany'},
{'group': None, 'capital': 'Annapolis'},
{'group': None, 'capital': 'Atlanta'},
{'group': None, 'capital': 'Augusta'},
{'group': None, 'capital': 'Austin'},
{'group': None, 'capital': 'Baton Rouge'},
{'group': None, 'capital': 'Bismarck'},
{'group': None, 'capital': 'Boise'},
{'group': None, 'capital': 'Boston'},
{'group': 'C - D', 'capital': None},
{'group': None, 'capital': 'Carson City'},
{'group': None, 'capital': 'Charleston'},
{'group': None, 'capital': 'Cheyeene'},
{'group': None, 'capital': 'Columbia'},
{'group': None, 'capital': 'Columbus'},
{'group': None, 'capital': 'Concord'},
{'group': None, 'capital': 'Denver'},
{'group': None, 'capital': 'Des Moines'},
{'group': None, 'capital': 'Dover'},
{'group': 'E - J', 'capital': None},
{'group': None, 'capital': 'Frankfort'},
{'group': None, 'capital': 'Harrisburg'},
{'group': None, 'capital': 'Hartford'},
{'group': None, 'capital': 'Helena'},
{'group': None, 'capital': 'Honolulu'},
{'group': None, 'capital': 'Indianapolis'},
{'group': None, 'capital': 'Jackson'},
{'group': None, 'capital': 'Jefferson City'},
{'group': None, 'capital': 'Juneau'},
{'group': 'K - O', 'capital': None},
{'group': None, 'capital': 'Lansing'},
{'group': None, 'capital': 'Lincon'},
{'group': None, 'capital': 'Little Rock'},
{'group': None, 'capital': 'Madison'},
{'group': None, 'capital': 'Montgomery'},
{'group': None, 'capital': 'Montpelier'},
{'group': None, 'capital': 'Nashville'},
{'group': None, 'capital': 'Oklahoma City'},
{'group': None, 'capital': 'Olympia'},
{'group': 'P - S', 'capital': None},
{'group': None, 'capital': 'Phoenix'},
{'group': None, 'capital': 'Pierre'},
{'group': None, 'capital': 'Providence'},
{'group': None, 'capital': 'Raleigh'},
{'group': None, 'capital': 'Richmond'},
{'group': None, 'capital': 'Sacramento'},
{'group': None, 'capital': 'Salem'},
{'group': None, 'capital': 'Salt Lake City'},
{'group': None, 'capital': 'Santa Fe'},
{'group': None, 'capital': 'Springfield'},
{'group': None, 'capital': 'St. Paul'},
{'group': 'T - Z', 'capital': None},
{'group': None, 'capital': 'Tallahassee'},
{'group': None, 'capital': 'Topeka'},
{'group': None, 'capital': 'Trenton'}
)
parent = None
store = Gtk.TreeStore(str)
for item in capitals:
if item['group']:
parent = store.append(None, (item['group'],))
elif item['capital']:
store.append(parent, (item['capital'],))
return store
def is_capital_sensistive(self, cell_layout, cell, tree_model, treeiter, data):
sensitive = not tree_model.iter_has_child(treeiter)
cell.set_property('sensitive', sensitive)
def fill_combo_entry(self, entry):
entry.append_text('One')
entry.append_text('Two')
entry.append_text('2\302\275')
entry.append_text('Three')
def main(demoapp=None):
app = ComboboxApp(demoapp)
Gtk.main()
if __name__ == '__main__':
main()
|
nzjrs/pygobject
|
demos/gtk-demo/demos/combobox.py
|
Python
|
lgpl-2.1
| 12,201
|
[
"COLUMBUS"
] |
985b7f86f7c7c10f022f994a6f9a0f6add08d2b3337ee3365b82df023ce45c64
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Simple script that tests CytoscapeGraphDrawer.
This script is kept separate from the unit tests as it is very
hard to test for the correctness of CytoscapeGraphDrawer without
a working instance of Cytoscape.
Prerequisites for running this test:
1. Start Cytoscape
2. Activate the Cytoscape RPC plugin, listening at port 9000
"""
from igraph import Graph
from igraph.drawing.graph import CytoscapeGraphDrawer
def test():
g = Graph.GRG(100, 0.2)
### Adding network attributes
g["name"] = "Network name"
g["version"] = 5
g["obsolete"] = False
g["density"] = g.density()
### Adding vertex attributes
# String attribute
g.vs["name"] = ["Node %d" % (i+1) for i in xrange(g.vcount())]
# Integer attribute
g.vs["degree"] = g.degree()
# Float attribute
g.vs["pagerank"] = g.pagerank()
# Boolean attribute
g.vs["even"] = [i % 2 for i in xrange(g.vcount())]
# Mixed attribute
g.vs["mixed"] = ["abc", 123, None, 1.0] * ((g.vcount()+3) / 4)
# Special attribute with Hungarian accents
g.vs[0]["name"] = u"árvíztűrő tükörfúrógép ÁRVÍZTŰRŐ TÜKÖRFÚRÓGÉP"
### Adding edge attributes
# String attribute
g.es["name"] = ["Edge %d -- %d" % edge.tuple for edge in g.es]
# Integer attribute
g.es["multiplicity"] = g.count_multiple()
# Float attribute
g.es["betweenness"] = g.edge_betweenness()
# Boolean attribute
g.es["even"] = [i % 2 for i in xrange(g.ecount())]
# Mixed attribute
g.es["mixed"] = [u"yay", 123, None, 0.7] * ((g.ecount()+3) / 4)
# Sending graph
drawer = CytoscapeGraphDrawer()
drawer.draw(g, layout="fr")
# Fetching graph
g2 = drawer.fetch()
del g2.vs["hiddenLabel"]
del g2.es["interaction"]
# Check isomorphism
result = g2.isomorphic(g)
if not result:
raise ValueError("g2 not isomorphic to g")
# Check the graph attributes
if set(g.attributes()) != set(g2.attributes()):
raise ValueError("Graph attribute name set mismatch")
for attr_name in g.attributes():
if g[attr_name] != g2[attr_name]:
raise ValueError("Graph attribute mismatch for %r" % attr_name)
# Check the vertex attribute names
if set(g.vertex_attributes()) != set(g2.vertex_attributes()):
raise ValueError("Vertex attribute name set mismatch")
# Check the edge attribute names
if set(g.edge_attributes()) != set(g2.edge_attributes()):
raise ValueError("Edge attribute name set mismatch")
if __name__ == "__main__":
test()
|
nickeubank/python-igraph
|
test/cytoscape_test.py
|
Python
|
gpl-2.0
| 2,627
|
[
"Cytoscape"
] |
daaaecfdfabebd0c71b68d4fe6e738c56c1a1ace26557b07ce56567acd55b6f8
|
import os
from sfepy import data_dir
from sfepy.base.base import debug, nm
from sfepy.homogenization.micmac import get_homog_coefs_linear
from sfepy.homogenization.recovery import save_recovery_region, recover_micro_hook
def post_process( out, pb, state, extend = False ):
from sfepy.base.base import Struct
if isinstance( state, dict ):
pass
else:
stress = pb.evaluate('de_cauchy_stress.i1.Omega( solid.D, u )')
strain = pb.evaluate('de_cauchy_strain.i1.Omega( u )')
out['cauchy_strain'] = Struct( name = 'output_data',
mode = 'cell', data = strain,
dofs = None )
out['cauchy_stress'] = Struct( name = 'output_data',
mode = 'cell', data = stress,
dofs = None )
if pb.conf.options.get_default_attr('recover_micro', False):
rname = pb.conf.options.recovery_region
region = pb.domain.regions[rname]
filename = os.path.join(os.path.dirname(pb.get_output_name()),
'recovery_region.vtk')
save_recovery_region(pb, rname, filename=filename);
rstrain = pb.evaluate('de_cauchy_strain.i1.%s( u )' % rname)
recover_micro_hook( pb.conf.options.micro_filename,
region, {'strain' : rstrain} )
return out
def get_elements(coors, domain=None):
return {0 : nm.arange(50, domain.shape.n_el, 100)}
functions = {
'get_elements' : (get_elements,),
'get_homog' : (lambda ts, coors, mode = None, region = None, ig = None:
get_homog_coefs_linear( ts, coors, mode, region, ig,
micro_filename = options['micro_filename'] ), )
}
filename_mesh = data_dir + '/meshes/3d/cylinder.mesh'
regions = {
'Omega' : ('all', {}),
'Left' : ('nodes in (x < 0.001)', {}),
'Right' : ('nodes in (x > 0.099)', {}),
'Recovery' : ('elements by get_elements', {}),
}
materials = {
'solid' : 'get_homog',
}
fields = {
'3_displacement': ('real', 3, 'Omega', 1),
}
integrals = {
'i1' : ('v', 'gauss_o1_d3'),
}
variables = {
'u' : ('unknown field', '3_displacement', 0),
'v' : ('test field', '3_displacement', 'u'),
}
ebcs = {
'Fixed' : ('Left', {'u.all' : 0.0}),
'PerturbedSurface' : ('Right', {'u.0' : 0.02, 'u.1' : 0.0, 'u.2' : 0.0}),
}
equations = {
'balance_of_forces' :
"""dw_lin_elastic.i1.Omega( solid.D, v, u ) = 0""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton',
{ 'i_max' : 1,
'eps_a' : 1e-6,
'problem' : 'nonlinear'}),
}
fe = {
'chunk_size' : 10000
}
micro_filename = data_dir \
+ '/examples/homogenization/linear_homogenization_up.py'
options = {
'nls' : 'newton',
'ls' : 'ls',
'output_dir' : 'output',
'post_process_hook' : 'post_process',
'output_prefix' : 'macro:',
'recover_micro': True,
'recovery_region' : 'Recovery',
'micro_filename' : micro_filename,
}
|
olivierverdier/sfepy
|
examples/homogenization/linear_elastic_mM.py
|
Python
|
bsd-3-clause
| 3,181
|
[
"VTK"
] |
14fefedb258944ac895d0e732fe38d3f6c93792e6a1768a9c747e3086741fa0e
|
#!/usr/bin/python
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2010-2013 Francois Beaune, Jupiter Jazz Limited
# Copyright (c) 2014-2017 Francois Beaune, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import print_function
from distutils import archive_util, dir_util
from stat import *
from subprocess import *
from xml.etree.ElementTree import ElementTree
import glob
import os
import platform
import re
import shutil
import stat
import subprocess
import sys
import time
import traceback
import zipfile
#--------------------------------------------------------------------------------------------------
# Constants.
#--------------------------------------------------------------------------------------------------
VERSION = "2.4.7"
SETTINGS_FILENAME = "appleseed.package.configuration.xml"
#--------------------------------------------------------------------------------------------------
# Utility functions.
#--------------------------------------------------------------------------------------------------
def info(message):
print(" " + message)
def progress(message):
print(" " + message + "...")
def fatal(message):
print("Fatal: " + message + ". Aborting.")
if sys.exc_info()[0]:
print(traceback.format_exc())
sys.exit(1)
def exe(filepath):
return filepath + ".exe" if os.name == "nt" else filepath
def safe_delete_file(path):
try:
if os.path.exists(path):
os.remove(path)
except OSError:
fatal("Failed to delete file '" + path + "'")
def on_rmtree_error(func, path, exc_info):
# path contains the path of the file that couldn't be removed.
# Let's just assume that it's read-only and unlink it.
os.chmod(path, stat.S_IWRITE)
os.unlink(path)
def safe_delete_directory(path):
Attempts = 10
for attempt in range(Attempts):
try:
if os.path.exists(path):
shutil.rmtree(path, onerror=on_rmtree_error)
return
except OSError:
if attempt < Attempts - 1:
time.sleep(0.5)
else:
fatal("Failed to delete directory '" + path + "'")
def safe_make_directory(path):
if not os.path.isdir(path):
os.makedirs(path)
def pushd(path):
old_path = os.getcwd()
os.chdir(path)
return old_path
def extract_zip_file(zip_path, output_path):
zf = zipfile.ZipFile(zip_path)
zf.extractall(output_path)
zf.close()
def copy_glob(input_pattern, output_path):
for input_file in glob.glob(input_pattern):
shutil.copy(input_file, output_path)
def make_writable(filepath):
os.chmod(filepath, S_IRUSR | S_IWUSR)
#--------------------------------------------------------------------------------------------------
# Settings.
#--------------------------------------------------------------------------------------------------
class Settings:
def load(self):
print("Loading settings from " + SETTINGS_FILENAME + "...")
tree = ElementTree()
try:
tree.parse(SETTINGS_FILENAME)
except IOError:
fatal("Failed to load configuration file '" + SETTINGS_FILENAME + "'")
self.load_values(tree)
self.print_summary()
def load_values(self, tree):
self.platform = self.__get_required(tree, "platform")
self.configuration = self.__get_required(tree, "configuration")
self.appleseed_path = self.__get_required(tree, "appleseed_path")
self.appleseed_headers_path = self.__get_required(tree, "appleseed_headers_path")
self.qt_runtime_path = self.__get_required(tree, "qt_runtime_path")
self.platform_runtime_path = self.__get_required(tree, "platform_runtime_path")
self.package_output_path = self.__get_required(tree, "package_output_path")
def print_summary(self):
print("")
print(" Platform: " + self.platform)
print(" Configuration: " + self.configuration)
print(" Path to appleseed: " + self.appleseed_path)
print(" Path to appleseed headers: " + self.appleseed_headers_path)
print(" Path to Qt runtime: " + self.qt_runtime_path)
if os.name == "nt":
print(" Path to platform runtime: " + self.platform_runtime_path)
print(" Output directory: " + self.package_output_path)
print("")
def __get_required(self, tree, key):
value = tree.findtext(key)
if value is None:
fatal("Missing value \"{0}\" in configuration file".format(key))
return value
#--------------------------------------------------------------------------------------------------
# Package information.
#--------------------------------------------------------------------------------------------------
class PackageInfo:
def __init__(self, settings):
self.settings = settings
def load(self):
print("Loading package information...")
self.retrieve_git_tag()
self.build_package_path()
self.print_summary()
def retrieve_git_tag(self):
old_path = pushd(self.settings.appleseed_path)
self.version = Popen("git describe --long", stdout=PIPE, shell=True).stdout.read().strip()
os.chdir(old_path)
def build_package_path(self):
package_name = "appleseed-" + self.version + "-" + self.settings.platform + ".zip"
self.package_path = os.path.join(self.settings.package_output_path, self.version, package_name)
def print_summary(self):
print("")
print(" Version: " + self.version)
print(" Package path: " + self.package_path)
print("")
#--------------------------------------------------------------------------------------------------
# Base package builder.
#--------------------------------------------------------------------------------------------------
class PackageBuilder:
def __init__(self, settings, package_info):
self.settings = settings
self.package_info = package_info
def build_package(self):
print("Building package:")
print("")
self.orchestrate()
print("")
print("The package was successfully built.")
def orchestrate(self):
self.remove_leftovers()
self.retrieve_sandbox_from_git_repository()
self.deploy_sandbox_to_stage()
self.cleanup_stage()
self.add_local_binaries_to_stage()
self.add_local_libraries_to_stage()
self.add_headers_to_stage()
self.add_shaders_to_stage()
self.add_scripts_to_stage()
self.add_local_schema_files_to_stage()
self.add_text_files_to_stage()
self.add_dummy_files_into_empty_directories()
self.disable_system_qt_plugins()
self.alter_stage()
self.build_final_zip_file()
self.remove_stage()
def remove_leftovers(self):
progress("Removing leftovers from previous invocations")
safe_delete_directory("appleseed")
safe_delete_file("sandbox.zip")
safe_delete_file(self.package_info.package_path)
def retrieve_sandbox_from_git_repository(self):
progress("Retrieving sandbox from Git repository")
old_path = pushd(os.path.join(self.settings.appleseed_path, "sandbox"))
self.run("git archive --format=zip --output=" + os.path.join(old_path, "sandbox.zip") + " --worktree-attributes HEAD")
os.chdir(old_path)
def deploy_sandbox_to_stage(self):
progress("Deploying sandbox to staging directory")
extract_zip_file("sandbox.zip", "appleseed/")
safe_delete_file("sandbox.zip")
def cleanup_stage(self):
progress("Cleaning up staging directory")
# Remove API reference documentation.
safe_delete_directory("appleseed/documentation/apireference")
# Remove the test suite.
safe_delete_directory("appleseed/tests/test scenes")
# Remove voluminous unit tests/benchmarks data.
safe_delete_file("appleseed/tests/unit benchmarks/inputs/test_knn_particles.bin")
safe_delete_file("appleseed/tests/unit benchmarks/inputs/test_knn_photons.bin")
# Remove the devkit which we ship separately.
safe_delete_directory("appleseed/extras/devkit")
def add_local_binaries_to_stage(self):
progress("Adding local binaries to staging directory")
safe_make_directory("appleseed/bin")
dir_util.copy_tree(os.path.join(self.settings.appleseed_path, "sandbox/bin", self.settings.configuration), "appleseed/bin/")
shutil.copy(os.path.join(self.settings.appleseed_path, "sandbox/bin", exe("maketx")), "appleseed/bin/")
shutil.copy(os.path.join(self.settings.appleseed_path, "sandbox/bin", exe("oslc")), "appleseed/bin/")
shutil.copy(os.path.join(self.settings.appleseed_path, "sandbox/bin", exe("oslinfo")), "appleseed/bin/")
def add_local_libraries_to_stage(self):
progress("Adding local libraries to staging directory")
safe_make_directory("appleseed/lib")
dir_util.copy_tree(os.path.join(self.settings.appleseed_path, "sandbox/lib", self.settings.configuration), "appleseed/lib/")
#
# This method is used by the Mac and Linux package builders.
# It requires the following members to be defined:
#
# self.shared_lib_ext
# self.get_dependencies_for_file()
#
def add_unix_dependencies_to_stage(self):
# Get shared libs needed by binaries.
bin_libs = set()
for dirpath, dirnames, filenames in os.walk("appleseed/bin"):
for filename in filenames:
ext = os.path.splitext(filename)[1]
if ext != ".py" and ext != ".conf":
libs = self.get_dependencies_for_file(os.path.join("appleseed/bin", filename))
bin_libs = bin_libs.union(libs)
# Get shared libs needed by appleseed.python.
for dirpath, dirnames, filenames in os.walk("appleseed/lib"):
appleseedpython_shared_lib = "_appleseedpython" + self.shared_lib_ext
if appleseedpython_shared_lib in filenames:
libs = self.get_dependencies_for_file(os.path.join(dirpath, appleseedpython_shared_lib))
bin_libs = bin_libs.union(libs)
# Get shared libs needed by libraries.
lib_libs = set()
for lib in bin_libs:
libs = self.get_dependencies_for_file(lib)
lib_libs = lib_libs.union(libs)
all_libs = bin_libs.union(lib_libs)
if False:
# Print dependencies.
info(" Dependencies:")
for lib in all_libs:
info(" " + lib)
# Copy needed libs to lib directory.
dest_dir = os.path.join("appleseed", "lib/")
for lib in all_libs:
# The library might already exist, but without writing rights.
lib_name = os.path.basename(lib)
dest_path = os.path.join(dest_dir, lib_name)
if not os.path.exists(dest_path):
progress(" Copying {0} to {1}".format(lib, dest_dir))
try:
shutil.copy(lib, dest_dir)
make_writable(dest_path)
except IOError:
info("WARNING: could not copy {0} to {1}".format(lib, dest_dir))
def add_headers_to_stage(self):
progress("Adding headers to staging directory")
# appleseed headers.
safe_make_directory("appleseed/include")
ignore_files = shutil.ignore_patterns("*.cpp", "*.c", "*.xsd", "snprintf", "version.h.in")
shutil.copytree(os.path.join(self.settings.appleseed_headers_path, "foundation"), "appleseed/include/foundation", ignore=ignore_files)
shutil.copytree(os.path.join(self.settings.appleseed_headers_path, "main"), "appleseed/include/main", ignore=ignore_files)
shutil.copytree(os.path.join(self.settings.appleseed_headers_path, "renderer"), "appleseed/include/renderer", ignore=ignore_files)
def add_shaders_to_stage(self):
progress("Adding shaders to staging directory")
shutil.rmtree("appleseed/shaders")
shutil.copytree(os.path.join(self.settings.appleseed_path, "sandbox/shaders"), "appleseed/shaders")
def add_scripts_to_stage(self):
progress("Adding scripts to staging directory")
shutil.copy("convertmany.py", "appleseed/bin/")
shutil.copy("rendermany.py", "appleseed/bin/")
shutil.copy("updatemany.py", "appleseed/bin/")
shutil.copy("rendernode.py", "appleseed/bin/")
shutil.copy("rendermanager.py", "appleseed/bin/")
shutil.copy("mitsuba2appleseed.py", "appleseed/bin/")
def add_local_schema_files_to_stage(self):
progress("Adding local schema files to staging directory")
safe_make_directory("appleseed/schemas")
copy_glob(os.path.join(self.settings.appleseed_path, "sandbox/schemas/*.xsd"), "appleseed/schemas/")
def add_text_files_to_stage(self):
progress("Adding LICENSE.txt and README.md files")
shutil.copy(os.path.join(self.settings.appleseed_path, "LICENSE.txt"), "appleseed/")
shutil.copy(os.path.join(self.settings.appleseed_path, "README.md"), "appleseed/")
def add_dummy_files_into_empty_directories(self):
progress("Adding dummy files to preserve empty directories")
for dirpath, dirnames, filenames in os.walk("."):
if len(dirnames) == 0 and len(filenames) == 0:
self.create_preserve_file(dirpath)
def disable_system_qt_plugins(self):
progress("Disabling system's Qt plugins")
with open("appleseed/bin/qt.conf", "w") as f:
pass
def create_preserve_file(self, path):
with open(os.path.join(path, "preserve.txt"), "w") as f:
f.write("This file allows to preserve this otherwise empty directory.\n")
# This method is overridden in the platform-specific builders below.
def alter_stage(self):
return
def build_final_zip_file(self):
progress("Building final zip file from staging directory")
package_base_path = os.path.splitext(self.package_info.package_path)[0]
archive_util.make_zipfile(package_base_path, "appleseed")
def remove_stage(self):
progress("Deleting staging directory")
safe_delete_directory("appleseed")
def run(self, cmdline):
info("Running command line: {0}".format(cmdline))
os.system(cmdline)
def run_subprocess(self, cmdline):
p = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return p.returncode, out, err
#--------------------------------------------------------------------------------------------------
# Windows package builder.
#--------------------------------------------------------------------------------------------------
class WindowsPackageBuilder(PackageBuilder):
def alter_stage(self):
self.add_dependencies_to_stage()
def add_dependencies_to_stage(self):
progress("Windows-specific: Adding dependencies to staging directory")
self.copy_qt_framework("QtCore")
self.copy_qt_framework("QtGui")
copy_glob(os.path.join(self.settings.platform_runtime_path, "*"), "appleseed/bin/")
def copy_qt_framework(self, framework_name):
src_filepath = os.path.join(self.settings.qt_runtime_path, framework_name + "4" + ".dll")
dst_path = os.path.join("appleseed", "bin")
shutil.copy(src_filepath, dst_path)
#--------------------------------------------------------------------------------------------------
# Mac package builder.
#--------------------------------------------------------------------------------------------------
class MacPackageBuilder(PackageBuilder):
def __init__(self, settings, package_info):
PackageBuilder.__init__(self, settings, package_info)
self.shared_lib_ext = ".dylib"
self.system_libs_prefixes = ["/System/Library/", "/usr/lib/libcurl", "/usr/lib/libc++",
"/usr/lib/libbz2", "/usr/lib/libSystem", "usr/lib/libz",
"/usr/lib/libncurses", "/usr/lib/libobjc.A.dylib"]
def alter_stage(self):
safe_delete_file("appleseed/bin/.DS_Store")
self.add_dependencies_to_stage()
self.fixup_binaries()
self.create_qt_conf_file()
os.rename("appleseed/bin/appleseed.studio", "appleseed/bin/appleseed-studio")
def add_dependencies_to_stage(self):
progress("Mac-specific: Adding dependencies to staging directory")
self.add_unix_dependencies_to_stage()
self.copy_qt_framework("QtCore")
self.copy_qt_framework("QtGui")
self.copy_qt_resources("QtGui")
self.copy_qt_framework("QtOpenGL")
def copy_qt_framework(self, framework_name):
framework_dir = framework_name + ".framework"
src_filepath = os.path.join(self.settings.qt_runtime_path, framework_dir, "Versions", "4", framework_name)
dest_path = os.path.join("appleseed", "lib", framework_dir, "Versions", "4")
safe_make_directory(dest_path)
shutil.copy(src_filepath, dest_path)
make_writable(os.path.join(dest_path, framework_name))
def copy_qt_resources(self, framework_name):
framework_dir = framework_name + ".framework"
src_path = os.path.join(self.settings.qt_runtime_path, framework_dir, "Versions", "4", "Resources")
dest_path = os.path.join("appleseed", "lib", framework_dir, "Resources")
shutil.copytree(src_path, dest_path)
def fixup_binaries(self):
progress("Mac-specific: Fixing up binaries")
self.set_libraries_ids()
self.set_qt_framework_ids()
self.change_library_paths_in_libraries()
self.change_library_paths_in_executables()
self.change_qt_framework_paths_in_qt_frameworks()
def set_libraries_ids(self):
for dirpath, dirnames, filenames in os.walk("appleseed/lib"):
for filename in filenames:
if os.path.splitext(filename)[1] == ".dylib":
lib_path = os.path.join(dirpath, filename)
self.set_library_id(lib_path, filename)
def set_qt_framework_ids(self):
self.set_library_id("appleseed/lib/QtCore.framework/Versions/4/QtCore", "QtCore.framework/Versions/4/QtCore")
self.set_library_id("appleseed/lib/QtGui.framework/Versions/4/QtGui", "QtGui.framework/Versions/4/QtGui")
self.set_library_id("appleseed/lib/QtOpenGL.framework/Versions/4/QtOpenGL", "QtOpenGL.framework/Versions/4/QtOpenGL")
def change_library_paths_in_libraries(self):
for dirpath, dirnames, filenames in os.walk("appleseed/lib"):
for filename in filenames:
ext = os.path.splitext(filename)[1]
if ext == ".dylib" or ext == ".so":
lib_path = os.path.join(dirpath, filename)
self.change_library_paths_in_binary(lib_path)
self.change_qt_framework_paths_in_binary(lib_path)
def change_library_paths_in_executables(self):
for dirpath, dirnames, filenames in os.walk("appleseed/bin"):
for filename in filenames:
ext = os.path.splitext(filename)[1]
if ext != ".py" and ext != ".conf":
exe_path = os.path.join(dirpath, filename)
self.change_library_paths_in_binary(exe_path)
self.change_qt_framework_paths_in_binary(exe_path)
# Can be used on executables and dynamic libraries.
def change_library_paths_in_binary(self, bin_path):
progress("Patching {0}".format(bin_path))
bin_dir = os.path.dirname(bin_path)
path_to_appleseed_lib = os.path.relpath("appleseed/lib/", bin_dir)
for lib_path in self.get_dependencies_for_file(bin_path, fix_paths=False):
lib_name = os.path.basename(lib_path)
self.change_library_path(bin_path, lib_path, "@loader_path/{0}/{1}".format(path_to_appleseed_lib, lib_name))
# Can be used on executables and dynamic libraries.
def change_qt_framework_paths_in_binary(self, bin_path):
for fwk_path in self.get_qt_frameworks_for_file(bin_path):
fwk_name = re.search(r"(Qt.*)\.framework", fwk_path).group(1)
self.change_library_path(bin_path, fwk_path, "@executable_path/../lib/{0}.framework/Versions/4/{0}".format(fwk_name))
def change_qt_framework_paths_in_qt_frameworks(self):
self.change_qt_framework_paths_in_binary("appleseed/lib/QtCore.framework/Versions/4/QtCore")
self.change_qt_framework_paths_in_binary("appleseed/lib/QtGui.framework/Versions/4/QtGui")
self.change_qt_framework_paths_in_binary("appleseed/lib/QtOpenGL.framework/Versions/4/QtOpenGL")
def set_library_id(self, target, name):
self.run('install_name_tool -id "{0}" {1}'.format(name, target))
def change_library_path(self, target, old, new):
self.run('install_name_tool -change "{0}" "{1}" {2}'.format(old, new, target))
def get_dependencies_for_file(self, filename, fix_paths=True):
returncode, out, err = self.run_subprocess(["otool", "-L", filename])
if returncode != 0:
fatal("Failed to invoke otool(1) to get dependencies for {0}: {1}".format(filename, err))
libs = set()
for line in out.split("\n")[1:]: # skip the first line
line = line.strip()
# Ignore empty lines.
if len(line) == 0:
continue
# Parse the line.
m = re.match(r"(.*) \(compatibility version .*, current version .*\)", line)
if not m:
fatal("Failed to parse line from otool(1) output: " + line)
lib = m.group(1)
# Ignore libs relative to @loader_path.
if "@loader_path" in lib:
continue
# Ignore system libs.
if self.is_system_lib(lib):
continue
# Ignore Qt frameworks.
if re.search(r"Qt.*\.framework", lib):
continue
if fix_paths:
# Optionally search for libraries in other places.
if not os.path.exists(lib):
candidate = os.path.join("/usr/local/lib/", lib)
if os.path.exists(candidate):
lib = candidate
libs.add(lib)
if False:
info("Dependencies for file {0}:".format(filename))
for lib in libs:
info(" {0}".format(lib))
return libs
def get_qt_frameworks_for_file(self, filename, fix_paths=True):
returncode, out, err = self.run_subprocess(["otool", "-L", filename])
if returncode != 0:
fatal("Failed to invoke otool(1) to get dependencies for {0}: {1}".format(filename, err))
libs = set()
for line in out.split("\n")[1:]: # skip the first line
line = line.strip()
# Ignore empty lines.
if len(line) == 0:
continue
# Parse the line.
m = re.match(r"(.*) \(compatibility version .*, current version .*\)", line)
if not m:
fatal("Failed to parse line from otool(1) output: " + line)
lib = m.group(1)
if re.search(r"Qt.*\.framework", lib):
libs.add(lib)
return libs
def is_system_lib(self, lib):
for prefix in self.system_libs_prefixes:
if lib.startswith(prefix):
return True
return False
def create_qt_conf_file(self):
safe_make_directory("appleseed/bin/Contents/Resources")
open("appleseed/bin/Contents/Resources/qt.conf", "w").close()
#--------------------------------------------------------------------------------------------------
# Linux package builder.
#--------------------------------------------------------------------------------------------------
class LinuxPackageBuilder(PackageBuilder):
def __init__(self, settings, package_info):
PackageBuilder.__init__(self, settings, package_info)
self.shared_lib_ext = ".so"
self.system_libs_prefixes = ["linux", "librt", "libpthread", "libGL", "libX", "libselinux",
"libICE", "libSM", "libdl", "libm.so", "libgcc", "libc.so",
"/lib64/ld-linux-", "libstdc++", "libxcb", "libdrm", "libnsl",
"libuuid", "libgthread", "libglib", "libgobject", "libglapi",
"libffi", "libfontconfig", "libutil", "libpython",
"libxshmfence.so"]
def alter_stage(self):
self.make_executable(os.path.join("appleseed/bin", "maketx"))
self.make_executable(os.path.join("appleseed/bin", "oslc"))
self.make_executable(os.path.join("appleseed/bin", "oslinfo"))
self.add_dependencies_to_stage()
self.set_runtime_paths_on_binaries()
self.clear_runtime_paths_on_libraries()
def make_executable(self, filepath):
mode = os.stat(filepath)[ST_MODE]
mode |= S_IXUSR | S_IXGRP | S_IXOTH
os.chmod(filepath, mode)
def add_dependencies_to_stage(self):
progress("Linux-specific: Adding dependencies to staging directory")
self.add_unix_dependencies_to_stage()
def set_runtime_paths_on_binaries(self):
progress("Linux-specific: Setting runtime paths on binaries")
for dirpath, dirnames, filenames in os.walk("appleseed/bin"):
for filename in filenames:
ext = os.path.splitext(filename)[1]
if ext != ".py" and ext != ".conf":
self.run("chrpath -r \$ORIGIN/../lib " + os.path.join("appleseed/bin", filename))
def clear_runtime_paths_on_libraries(self):
progress("Linux-specific: Clearing runtime paths on libraries")
for dirpath, dirnames, filenames in os.walk("appleseed/lib"):
for filename in filenames:
if os.path.splitext(filename)[1] == ".so":
self.run("chrpath -d " + os.path.join(dirpath, filename))
def get_dependencies_for_file(self, filename):
returncode, out, err = self.run_subprocess(["ldd", filename])
if returncode != 0:
fatal("Failed to invoke ldd(1) to get dependencies for {0}: {1}".format(filename, err))
libs = set()
for line in out.split("\n"):
line = line.strip()
# Ignore empty lines.
if len(line) == 0:
continue
# Ignore system libs.
if self.is_system_lib(line):
continue
libs.add(line.split()[2])
return libs
def is_system_lib(self, lib):
for prefix in self.system_libs_prefixes:
if lib.startswith(prefix):
return True
return False
#--------------------------------------------------------------------------------------------------
# Entry point.
#--------------------------------------------------------------------------------------------------
def main():
print("appleseed.package version " + VERSION)
print("")
print("IMPORTANT:")
print("")
print(" - You may need to run this tool with sudo on Linux and macOS")
print(" - Make sure there are no obsolete binaries in sandbox/bin")
print("")
settings = Settings()
package_info = PackageInfo(settings)
settings.load()
package_info.load()
if os.name == "nt":
package_builder = WindowsPackageBuilder(settings, package_info)
elif os.name == "posix" and platform.mac_ver()[0] != "":
package_builder = MacPackageBuilder(settings, package_info)
elif os.name == "posix" and platform.mac_ver()[0] == "":
package_builder = LinuxPackageBuilder(settings, package_info)
else:
fatal("Unsupported platform: " + os.name)
package_builder.build_package()
if __name__ == '__main__':
main()
|
Aakash1312/appleseed
|
scripts/appleseed.package.py
|
Python
|
mit
| 29,530
|
[
"VisIt"
] |
1b103c4b2e54e9109377c8cb939525742957f555a2d4fc6952684eb659700da7
|
# ******************************************************************************
# Copyright 2014-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""
Convolution layer tests
"""
from builtins import zip
import numpy as np
from neon import NervanaObject
from neon.layers import Sequential, Conv, MergeSum, SkipNode, Activation
from neon.initializers.initializer import Gaussian, IdentityInit
from neon.transforms import Rectlin
from utils import allclose_with_out
try:
from neon.backends.nervanamkl import NervanaMKL
except ImportError:
# stub out the class
class NervanaMKL(object):
pass
init1 = Gaussian(scale=0.01)
relu = Rectlin()
batch_size = 64
def conv_params(fsize, nfm, stride=1, relu=True):
return dict(fshape=(fsize, fsize, nfm), strides=stride, padding=(1 if fsize > 1 else 0),
activation=(Rectlin() if relu else None),
init=init1,
batch_norm=True)
def id_params(nfm):
return dict(fshape=(1, 1, nfm), strides=2, padding=0, activation=None, init=IdentityInit())
def identity_skip(nfm, stride=1):
mainpath = [Conv(**conv_params(3, nfm, stride=stride)),
Conv(**conv_params(3, nfm, relu=False))]
sidepath = [SkipNode() if stride == 1 else Conv(**id_params(nfm))]
module = [MergeSum([mainpath, sidepath]),
Activation(Rectlin())]
return module
def projection_skip(nfm, stride=1):
mainpath = [Conv(**conv_params(3, nfm, stride=stride)),
Conv(**conv_params(3, nfm, relu=False))]
sidepath = [SkipNode() if stride == 1 else Conv(**conv_params(1, nfm, stride, relu=False))]
module = [MergeSum([mainpath, sidepath]),
Activation(Rectlin())]
return module
def module_factory_copy(ref_module, modfunc, nfm, stride=1, name="i"):
mm = modfunc(nfm, stride)
for branch_copy, branch_ref in zip(mm[0].layers, ref_module[0].layers):
for ll, lr in zip(branch_copy.layers, branch_ref.layers):
if ll.has_params:
ll.set_params(lr.get_params_serialize())
return (mm[0].layers[0].layers, mm[0].layers[1].layers)
def test_skip_noupsample(backend_default):
be = NervanaObject.be
be.bsz = 64
mergesum_test_config(be, modfunc=identity_skip, use_stride=1)
def test_skip_upsample(backend_default):
be = NervanaObject.be
be.bsz = 64
mergesum_test_config(be, modfunc=identity_skip, use_stride=2)
def test_proj_upsample(backend_default):
be = NervanaObject.be
be.bsz = 64
mergesum_test_config(be, modfunc=projection_skip, use_stride=2)
def mergesum_test_config(be, modfunc, use_stride=1):
l1 = Conv(**conv_params(3, 16))
neon_layer = modfunc(16, use_stride)
inshape = (16, 32, 32)
insize = np.prod(inshape)
inpa = np.random.random((insize, batch_size))
neon_seq = Sequential([l1] + neon_layer)
neon_seq.configure(inshape)
inp = be.array(inpa)
neon_seq.allocate()
# neon_layer.layers[0].prev_layer = True
neon_seq.allocate_deltas()
neon_out = neon_seq.fprop(inp).get()
# Now make the reference pathways:
p1, p2 = module_factory_copy(neon_layer, modfunc, 16, use_stride)
l11 = Conv(**conv_params(3, 16))
l12 = Conv(**conv_params(3, 16))
for ll in (l11, l12):
for lcopy, lref in zip(ll, l1):
if lcopy.has_params:
lcopy.set_params(lref.get_params_serialize())
path1 = Sequential([l11] + p1)
path2 = Sequential([l12] + p2)
for ll in (path1, path2):
ll.configure(inshape)
ll.allocate()
ll.allocate_deltas()
o1 = path1.fprop(inp)
o2 = path2.fprop(inp)
# convert mkl buffer to cpu for following cpu execution
be.convert_data(o1, False)
be.convert_data(o2, False)
neon_out_ref = be.empty_like(o1)
neon_out_ref[:] = be.maximum(o1 + o2, 0)
# need to have bsum false for this test to be valid
assert allclose_with_out(neon_out_ref.get(), neon_out, rtol=0)
erra = np.random.random(neon_out.shape)
err = be.array(erra)
ebr = neon_seq.layers[-1].bprop(err)
ebr = neon_seq.layers[-2].bprop(ebr)
trunk_neon = ebr.get()
err = be.array(erra)
err[:] = be.greater(neon_out_ref, 0) * err
pstart = len(l1)
eb1 = err
for l in reversed(path1.layers[pstart:]):
eb1 = l.bprop(eb1)
eb2 = err
for l in reversed(path2.layers[pstart:]):
eb2 = l.bprop(eb2)
be.convert_data(eb1, False)
be.convert_data(eb2, False)
err_ref = be.empty_like(eb1)
err_ref[:] = eb1 + eb2
assert allclose_with_out(err_ref.get(), trunk_neon, rtol=0)
if __name__ == '__main__':
test_skip_noupsample()
|
NervanaSystems/neon
|
tests/test_mergesum_layer.py
|
Python
|
apache-2.0
| 5,273
|
[
"Gaussian"
] |
008d4051d7221e5e14b3654e298696f6f23bf9c2c485254e248561fb421878e7
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Run bridge unit tests for cclib."""
import sys
import unittest
sys.path.insert(1, "bridge")
if sys.version_info[0] == 3:
if sys.version_info[1] >= 6:
from .bridge.testpsi4 import *
from .bridge.testpyscf import *
from .bridge.testhorton import Horton3Test
if sys.version_info[1] >= 5:
from .bridge.testase import *
if sys.version_info[1] >= 4:
from .bridge.testbiopython import *
from .bridge.testpyquante import pyquante2Test
from .bridge.testopenbabel import *
if sys.version_info[0] == 2:
from .bridge.testhorton import Horton2Test
from .bridge.testpyquante import PyquanteTest
if __name__ == "__main__":
unittest.main()
|
cclib/cclib
|
test/test_bridge.py
|
Python
|
bsd-3-clause
| 899
|
[
"cclib"
] |
71c69db7b6065d82ca382a63f6e93daf4720446d43b5de2a3d1f9507a5ffe1d6
|
#!/usr/bin/python
#
# Copyright (C) 2015, Jaguar Land Rover
#
# This program is licensed under the terms and conditions of the
# Mozilla Public License, version 2.0. The full text of the
# Mozilla Public License is at https://www.mozilla.org/MPL/2.0/
#
#
# Register a service specified by command line with an RVI node.
# Print out a message when the service gets invoked.
#
import sys
from rvilib import RVI
import getopt
def usage():
print "Usage:", sys.argv[0], "[-n <rvi_url>] <service_name>"
print " <rvi_url> URL of Service Edge on a local RVI node."
print " Default: http://localhost:8801"
print " <service_name> URL of Service to register"
print
print "The RVI Service Edge URL can be found in"
print "[backend,vehicle].config as"
print "env -> rvi -> components -> service_edge -> url"
print
print "The Service Edge URL is also logged as a notice when the"
print "RVI node is started."
print
print "Example: ./rvi_service.py -n http://rvi1.nginfotpdx.net:8801 /test/some_service"
sys.exit(255)
#
# Our general handler, registered with rvi.register_service() below.
#
# You can also explicitly name the arguments, but then
# the sender has to match the argument names.
# For example:
# rvi_call.py http://localhost:8801 jlr.com/vin/test a=1 b=2 c=3 ->
# def service(a,b,c)
#
def service_invoked(**args):
print
print "Service invoked!"
print "args:", args
print
sys.stdout.write("Press enter to quit: ")
sys.stdout.flush()
return ['ok']
def services_available(**args):
print
print "Services available!"
print "args:", args
print
sys.stdout.write("Press enter to quit: ")
sys.stdout.flush()
return ['ok']
def services_unavailable(**args):
print
print "Services unavailable!"
print "args:", args
print
sys.stdout.write("Press enter to quit: ")
sys.stdout.flush()
return ['ok']
#
# Check that we have the correct arguments
#
opts, args= getopt.getopt(sys.argv[1:], "n:")
rvi_node_url = "http://localhost:8801"
for o, a in opts:
if o == "-n":
rvi_node_url = a
else:
usage()
if len(args) != 1:
usage()
service_name = args[0]
# Setup a connection to the local RVI node
rvi = RVI(rvi_node_url)
# Starting the thread that handles incoming calls is
# not really necessary since register_service will do it for us.
rvi.start_serve_thread()
rvi.set_services_available_callback(services_available)
rvi.set_services_unavailable_callback(services_unavailable)
# Register our service and invoke 'service_invoked' if we
# get an incoming JSON-RPC call to it from the RVI node
#
full_service_name = rvi.register_service(service_name, service_invoked)
print "RVI General Service."
print "RVI node URL: ", rvi_node_url
print "Service: ", full_service_name
raw_input('Press enter to quit: ')
rvi.unregister_service(service_name)
rvi.shutdown()
sys.exit(0)
|
lillialexis/rvi_core
|
python/rvi_service.py
|
Python
|
mpl-2.0
| 3,035
|
[
"Jaguar"
] |
041332edb5067b984405af2b7767cc63af761069a275f2683ed37f4a9df34466
|
# -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <vincent.michel@inria.fr>
# Minor fixes by Fabian Pedregosa
# Amit Aides <amitibo@tx.technion.ac.il>
# Yehuda Finkelstein <yehudaf@tx.technion.ac.il>
# Lars Buitinck
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.special import logsumexp
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array, deprecated
from .utils.extmath import safe_sparse_dot
from .utils.multiclass import _check_partial_fit_first_call
from .utils.validation import check_is_fitted, check_non_negative, column_or_1d
from .utils.validation import _check_sample_weight
from .utils.validation import _deprecate_positional_args
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB', 'ComplementNB',
'CategoricalNB']
class _BaseNB(ClassifierMixin, BaseEstimator, metaclass=ABCMeta):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape (n_classes, n_samples).
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
@abstractmethod
def _check_X(self, X):
"""To be overridden in subclasses with the actual checks."""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : ndarray of shape (n_samples,)
Predicted target values for X
"""
check_is_fitted(self)
X = self._check_X(X)
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
check_is_fitted(self)
X = self._check_X(X)
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(_BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via :meth:`partial_fit`.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Parameters
----------
priors : array-like of shape (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
var_smoothing : float, default=1e-9
Portion of the largest variance of all features that is added to
variances for calculation stability.
.. versionadded:: 0.20
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
number of training samples observed in each class.
class_prior_ : ndarray of shape (n_classes,)
probability of each class.
classes_ : ndarray of shape (n_classes,)
class labels known to the classifier
epsilon_ : float
absolute additive value to variances
sigma_ : ndarray of shape (n_classes, n_features)
variance of each feature per class
theta_ : ndarray of shape (n_classes, n_features)
mean of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
@_deprecate_positional_args
def __init__(self, *, priors=None, var_smoothing=1e-9):
self.priors = priors
self.var_smoothing = var_smoothing
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
"""
X, y = self._validate_data(X, y)
y = column_or_1d(y, warn=True)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
def _check_X(self, X):
return check_array(X)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like of shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like of shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like of shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like of shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_new * n_past / n_total) * (mu - new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit : bool, default=False
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
X, y = check_X_y(X, y)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
self.epsilon_ = self.var_smoothing * np.var(X, axis=0).max()
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
# Initialise the class prior
# Take into account the priors
if self.priors is not None:
priors = np.asarray(self.priors)
# Check that the provide prior match the number of classes
if len(priors) != n_classes:
raise ValueError('Number of priors must match number of'
' classes.')
# Check that the sum is 1
if not np.isclose(priors.sum(), 1.0):
raise ValueError('The sum of the priors should be 1.')
# Check that the prior are non-negative
if (priors < 0).any():
raise ValueError('Priors must be non-negative.')
self.class_prior_ = priors
else:
# Initialize the priors to zeros for each class
self.class_prior_ = np.zeros(len(self.classes_),
dtype=np.float64)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= self.epsilon_
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = np.in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(unique_y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += self.epsilon_
# Update if only no priors is provided
if self.priors is None:
# Empirical prior, with sample_weight taken into account
self.class_prior_ = self.class_count_ / self.class_count_.sum()
return self
def _joint_log_likelihood(self, X):
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
_ALPHA_MIN = 1e-10
class _BaseDiscreteNB(_BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per _BaseNB
"""
def _check_X(self, X):
return check_array(X, accept_sparse='csr')
def _check_X_y(self, X, y):
return self._validate_data(X, y, accept_sparse='csr')
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
with warnings.catch_warnings():
# silence the warning when count is 0 because class was not yet
# observed
warnings.simplefilter("ignore", RuntimeWarning)
log_class_count = np.log(self.class_count_)
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (log_class_count -
np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.full(n_classes, -np.log(n_classes))
def _check_alpha(self):
if np.min(self.alpha) < 0:
raise ValueError('Smoothing parameter alpha = %.1e. '
'alpha should be > 0.' % np.min(self.alpha))
if isinstance(self.alpha, np.ndarray):
if not self.alpha.shape[0] == self.n_features_:
raise ValueError("alpha should be a scalar or a numpy array "
"with shape [n_features]")
if np.min(self.alpha) < _ALPHA_MIN:
warnings.warn('alpha too small will result in numeric errors, '
'setting alpha = %.1e' % _ALPHA_MIN)
return np.maximum(self.alpha, _ALPHA_MIN)
return self.alpha
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
X, y = self._check_X_y(X, y)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self._init_counters(n_effective_classes, n_features)
self.n_features_ = n_features
elif n_features != self.n_features_:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.n_features_))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64, copy=False)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
sample_weight = np.atleast_2d(sample_weight)
Y *= sample_weight.T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
X, y = self._check_X_y(X, y)
_, n_features = X.shape
self.n_features_ = n_features
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
if sample_weight is not None:
Y = Y.astype(np.float64, copy=False)
sample_weight = _check_sample_weight(sample_weight, X)
sample_weight = np.atleast_2d(sample_weight)
Y *= sample_weight.T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self._init_counters(n_effective_classes, n_features)
self._count(X, Y)
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self
def _init_counters(self, n_effective_classes, n_features):
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
# mypy error: Decorated property not supported
@deprecated("Attribute coef_ was deprecated in " # type: ignore
"version 0.24 and will be removed in 0.26.")
@property
def coef_(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
# mypy error: Decorated property not supported
@deprecated("Attribute intercept_ was deprecated in " # type: ignore
"version 0.24 and will be removed in 0.26.")
@property
def intercept_(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
def _more_tags(self):
return {'poor_score': True}
class MultinomialNB(_BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes, )
Smoothed empirical log probability for each class.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
coef_ : ndarray of shape (n_classes, n_features)
Mirrors ``feature_log_prob_`` for interpreting `MultinomialNB`
as a linear model.
.. deprecated:: 0.24
``coef_`` is deprecated in 0.24 and will be removed in 0.26.
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
intercept_ : ndarray of shape (n_classes,)
Mirrors ``class_log_prior_`` for interpreting `MultinomialNB`
as a linear model.
.. deprecated:: 0.24
``intercept_`` is deprecated in 0.24 and will be removed in 0.26.
n_features_ : int
Number of features of each sample.
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB()
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
https://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
@_deprecate_positional_args
def __init__(self, *, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _more_tags(self):
return {'requires_positive_X': True}
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
check_non_negative(X, "MultinomialNB (input X)")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
return (safe_sparse_dot(X, self.feature_log_prob_.T) +
self.class_log_prior_)
class ComplementNB(_BaseDiscreteNB):
"""The Complement Naive Bayes classifier described in Rennie et al. (2003).
The Complement Naive Bayes classifier was designed to correct the "severe
assumptions" made by the standard Multinomial Naive Bayes classifier. It is
particularly suited for imbalanced data sets.
Read more in the :ref:`User Guide <complement_naive_bayes>`.
.. versionadded:: 0.20
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing).
fit_prior : bool, default=True
Only used in edge case with a single class in the training set.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. Not used.
norm : bool, default=False
Whether or not a second normalization of the weights is performed. The
default behavior mirrors the implementations found in Mahout and Weka,
which do not follow the full algorithm described in Table 9 of the
paper.
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Smoothed empirical log probability for each class. Only used in edge
case with a single class in the training set.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
coef_ : ndarray of shape (n_classes, n_features)
Mirrors ``feature_log_prob_`` for interpreting `ComplementNB`
as a linear model.
.. deprecated:: 0.24
``coef_`` is deprecated in 0.24 and will be removed in 0.26.
feature_all_ : ndarray of shape (n_features,)
Number of samples encountered for each feature during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature) during fitting.
This value is weighted by the sample weight when provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical weights for class complements.
intercept_ : ndarray of shape (n_classes,)
Mirrors ``class_log_prior_`` for interpreting `ComplementNB`
as a linear model.
.. deprecated:: 0.24
``coef_`` is deprecated in 0.24 and will be removed in 0.26.
n_features_ : int
Number of features of each sample.
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import ComplementNB
>>> clf = ComplementNB()
>>> clf.fit(X, y)
ComplementNB()
>>> print(clf.predict(X[2:3]))
[3]
References
----------
Rennie, J. D., Shih, L., Teevan, J., & Karger, D. R. (2003).
Tackling the poor assumptions of naive bayes text classifiers. In ICML
(Vol. 3, pp. 616-623).
https://people.csail.mit.edu/jrennie/papers/icml03-nb.pdf
"""
@_deprecate_positional_args
def __init__(self, *, alpha=1.0, fit_prior=True, class_prior=None,
norm=False):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
self.norm = norm
def _more_tags(self):
return {'requires_positive_X': True}
def _count(self, X, Y):
"""Count feature occurrences."""
check_non_negative(X, "ComplementNB (input X)")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
self.feature_all_ = self.feature_count_.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and compute the weights."""
comp_count = self.feature_all_ + alpha - self.feature_count_
logged = np.log(comp_count / comp_count.sum(axis=1, keepdims=True))
# _BaseNB.predict uses argmax, but ComplementNB operates with argmin.
if self.norm:
summed = logged.sum(axis=1, keepdims=True)
feature_log_prob = logged / summed
else:
feature_log_prob = -logged
self.feature_log_prob_ = feature_log_prob
def _joint_log_likelihood(self, X):
"""Calculate the class scores for the samples in X."""
jll = safe_sparse_dot(X, self.feature_log_prob_.T)
if len(self.classes_) == 1:
jll += self.class_log_prior_
return jll
class BernoulliNB(_BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, default=0.0
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_count_ : ndarray of shape (n_classes)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes)
Log probability of each class (smoothed).
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
coef_ : ndarray of shape (n_classes, n_features)
Mirrors ``feature_log_prob_`` for interpreting `BernoulliNB`
as a linear model.
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical log probability of features given a class, P(x_i|y).
intercept_ : ndarray of shape (n_classes,)
Mirrors ``class_log_prior_`` for interpreting `BernoulliNB`
as a linear model.
n_features_ : int
Number of features of each sample.
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB()
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
@_deprecate_positional_args
def __init__(self, *, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _check_X(self, X):
X = super()._check_X(X)
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
return X
def _check_X_y(self, X, y):
X, y = super()._check_X_y(X, y)
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
return X, y
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = self.class_count_ + alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
class CategoricalNB(_BaseDiscreteNB):
"""Naive Bayes classifier for categorical features
The categorical Naive Bayes classifier is suitable for classification with
discrete features that are categorically distributed. The categories of
each feature are drawn from a categorical distribution.
Read more in the :ref:`User Guide <categorical_naive_bayes>`.
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
category_count_ : list of arrays of shape (n_features,)
Holds arrays of shape (n_classes, n_categories of respective feature)
for each feature. Each array provides the number of samples
encountered for each class and category of the specific feature.
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Smoothed empirical log probability for each class.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
feature_log_prob_ : list of arrays of shape (n_features,)
Holds arrays of shape (n_classes, n_categories of respective feature)
for each feature. Each array provides the empirical log probability
of categories given the respective feature and class, ``P(x_i|y)``.
n_features_ : int
Number of features of each sample.
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import CategoricalNB
>>> clf = CategoricalNB()
>>> clf.fit(X, y)
CategoricalNB()
>>> print(clf.predict(X[2:3]))
[3]
"""
@_deprecate_positional_args
def __init__(self, *, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features. Here, each feature of X is
assumed to be from a different categorical distribution.
It is further assumed that all categories of each feature are
represented by the numbers 0, ..., n - 1, where n refers to the
total number of categories for the given feature. This can, for
instance, be achieved with the help of OrdinalEncoder.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
return super().fit(X, y, sample_weight=sample_weight)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features. Here, each feature of X is
assumed to be from a different categorical distribution.
It is further assumed that all categories of each feature are
represented by the numbers 0, ..., n - 1, where n refers to the
total number of categories for the given feature. This can, for
instance, be achieved with the help of OrdinalEncoder.
y : array-like of shape (n_samples)
Target values.
classes : array-like of shape (n_classes), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
return super().partial_fit(X, y, classes,
sample_weight=sample_weight)
def _more_tags(self):
return {'requires_positive_X': True}
def _check_X(self, X):
X = check_array(X, dtype='int', accept_sparse=False,
force_all_finite=True)
check_non_negative(X, "CategoricalNB (input X)")
return X
def _check_X_y(self, X, y):
X, y = self._validate_data(X, y, dtype='int', accept_sparse=False,
force_all_finite=True)
check_non_negative(X, "CategoricalNB (input X)")
return X, y
def _init_counters(self, n_effective_classes, n_features):
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.category_count_ = [np.zeros((n_effective_classes, 0))
for _ in range(n_features)]
def _count(self, X, Y):
def _update_cat_count_dims(cat_count, highest_feature):
diff = highest_feature + 1 - cat_count.shape[1]
if diff > 0:
# we append a column full of zeros for each new category
return np.pad(cat_count, [(0, 0), (0, diff)], 'constant')
return cat_count
def _update_cat_count(X_feature, Y, cat_count, n_classes):
for j in range(n_classes):
mask = Y[:, j].astype(bool)
if Y.dtype.type == np.int64:
weights = None
else:
weights = Y[mask, j]
counts = np.bincount(X_feature[mask], weights=weights)
indices = np.nonzero(counts)[0]
cat_count[j, indices] += counts[indices]
self.class_count_ += Y.sum(axis=0)
for i in range(self.n_features_):
X_feature = X[:, i]
self.category_count_[i] = _update_cat_count_dims(
self.category_count_[i], X_feature.max())
_update_cat_count(X_feature, Y,
self.category_count_[i],
self.class_count_.shape[0])
def _update_feature_log_prob(self, alpha):
feature_log_prob = []
for i in range(self.n_features_):
smoothed_cat_count = self.category_count_[i] + alpha
smoothed_class_count = smoothed_cat_count.sum(axis=1)
feature_log_prob.append(
np.log(smoothed_cat_count) -
np.log(smoothed_class_count.reshape(-1, 1)))
self.feature_log_prob_ = feature_log_prob
def _joint_log_likelihood(self, X):
if not X.shape[1] == self.n_features_:
raise ValueError("Expected input with %d features, got %d instead"
% (self.n_features_, X.shape[1]))
jll = np.zeros((X.shape[0], self.class_count_.shape[0]))
for i in range(self.n_features_):
indices = X[:, i]
jll += self.feature_log_prob_[i][:, indices].T
total_ll = jll + self.class_log_prior_
return total_ll
|
bnaul/scikit-learn
|
sklearn/naive_bayes.py
|
Python
|
bsd-3-clause
| 46,904
|
[
"Gaussian"
] |
9ab6c8e119eb74a0e124176451551efd712a400815f710a4f2f817440da1c61e
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.